1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<int> ReciprocalEstimateRefinementSteps(
71 "x86-recip-refinement-steps", cl::init(1),
72 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
73 "result of the hardware reciprocal estimate instruction."),
76 // Forward declarations.
77 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
80 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
81 SelectionDAG &DAG, SDLoc dl,
82 unsigned vectorWidth) {
83 assert((vectorWidth == 128 || vectorWidth == 256) &&
84 "Unsupported vector width");
85 EVT VT = Vec.getValueType();
86 EVT ElVT = VT.getVectorElementType();
87 unsigned Factor = VT.getSizeInBits()/vectorWidth;
88 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
89 VT.getVectorNumElements()/Factor);
91 // Extract from UNDEF is UNDEF.
92 if (Vec.getOpcode() == ISD::UNDEF)
93 return DAG.getUNDEF(ResultVT);
95 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
96 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
98 // This is the index of the first element of the vectorWidth-bit chunk
100 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
103 // If the input is a buildvector just emit a smaller one.
104 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
105 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
106 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
109 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
110 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
113 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
114 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
115 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
116 /// instructions or a simple subregister reference. Idx is an index in the
117 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
118 /// lowering EXTRACT_VECTOR_ELT operations easier.
119 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
120 SelectionDAG &DAG, SDLoc dl) {
121 assert((Vec.getValueType().is256BitVector() ||
122 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
123 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
126 /// Generate a DAG to grab 256-bits from a 512-bit vector.
127 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
128 SelectionDAG &DAG, SDLoc dl) {
129 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
130 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
133 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
134 unsigned IdxVal, SelectionDAG &DAG,
135 SDLoc dl, unsigned vectorWidth) {
136 assert((vectorWidth == 128 || vectorWidth == 256) &&
137 "Unsupported vector width");
138 // Inserting UNDEF is Result
139 if (Vec.getOpcode() == ISD::UNDEF)
141 EVT VT = Vec.getValueType();
142 EVT ElVT = VT.getVectorElementType();
143 EVT ResultVT = Result.getValueType();
145 // Insert the relevant vectorWidth bits.
146 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
148 // This is the index of the first element of the vectorWidth-bit chunk
150 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
153 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
154 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
157 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
158 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
159 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
160 /// simple superregister reference. Idx is an index in the 128 bits
161 /// we want. It need not be aligned to a 128-bit boundary. That makes
162 /// lowering INSERT_VECTOR_ELT operations easier.
163 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
164 SelectionDAG &DAG,SDLoc dl) {
165 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
166 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
169 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
170 SelectionDAG &DAG, SDLoc dl) {
171 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
172 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
175 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
176 /// instructions. This is used because creating CONCAT_VECTOR nodes of
177 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
178 /// large BUILD_VECTORS.
179 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
180 unsigned NumElems, SelectionDAG &DAG,
182 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
183 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
186 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
187 unsigned NumElems, SelectionDAG &DAG,
189 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
190 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
193 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
194 const X86Subtarget &STI)
195 : TargetLowering(TM), Subtarget(&STI) {
196 X86ScalarSSEf64 = Subtarget->hasSSE2();
197 X86ScalarSSEf32 = Subtarget->hasSSE1();
198 TD = getDataLayout();
200 // Set up the TargetLowering object.
201 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
203 // X86 is weird. It always uses i8 for shift amounts and setcc results.
204 setBooleanContents(ZeroOrOneBooleanContent);
205 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
206 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
208 // For 64-bit, since we have so many registers, use the ILP scheduler.
209 // For 32-bit, use the register pressure specific scheduling.
210 // For Atom, always use ILP scheduling.
211 if (Subtarget->isAtom())
212 setSchedulingPreference(Sched::ILP);
213 else if (Subtarget->is64Bit())
214 setSchedulingPreference(Sched::ILP);
216 setSchedulingPreference(Sched::RegPressure);
217 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
218 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
220 // Bypass expensive divides on Atom when compiling with O2.
221 if (TM.getOptLevel() >= CodeGenOpt::Default) {
222 if (Subtarget->hasSlowDivide32())
223 addBypassSlowDiv(32, 8);
224 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
225 addBypassSlowDiv(64, 16);
228 if (Subtarget->isTargetKnownWindowsMSVC()) {
229 // Setup Windows compiler runtime calls.
230 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
231 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
232 setLibcallName(RTLIB::SREM_I64, "_allrem");
233 setLibcallName(RTLIB::UREM_I64, "_aullrem");
234 setLibcallName(RTLIB::MUL_I64, "_allmul");
235 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
236 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
237 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
238 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
239 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
241 // The _ftol2 runtime function has an unusual calling conv, which
242 // is modeled by a special pseudo-instruction.
243 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
244 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
245 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
246 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
249 if (Subtarget->isTargetDarwin()) {
250 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
251 setUseUnderscoreSetJmp(false);
252 setUseUnderscoreLongJmp(false);
253 } else if (Subtarget->isTargetWindowsGNU()) {
254 // MS runtime is weird: it exports _setjmp, but longjmp!
255 setUseUnderscoreSetJmp(true);
256 setUseUnderscoreLongJmp(false);
258 setUseUnderscoreSetJmp(true);
259 setUseUnderscoreLongJmp(true);
262 // Set up the register classes.
263 addRegisterClass(MVT::i8, &X86::GR8RegClass);
264 addRegisterClass(MVT::i16, &X86::GR16RegClass);
265 addRegisterClass(MVT::i32, &X86::GR32RegClass);
266 if (Subtarget->is64Bit())
267 addRegisterClass(MVT::i64, &X86::GR64RegClass);
269 for (MVT VT : MVT::integer_valuetypes())
270 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
272 // We don't accept any truncstore of integer registers.
273 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
274 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
275 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
276 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
277 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
278 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
280 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
282 // SETOEQ and SETUNE require checking two conditions.
283 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
284 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
285 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
286 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
287 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
288 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
290 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
292 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
293 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
294 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
296 if (Subtarget->is64Bit()) {
297 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
298 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
299 } else if (!TM.Options.UseSoftFloat) {
300 // We have an algorithm for SSE2->double, and we turn this into a
301 // 64-bit FILD followed by conditional FADD for other targets.
302 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
303 // We have an algorithm for SSE2, and we turn this into a 64-bit
304 // FILD for other targets.
305 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
308 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
310 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
311 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
313 if (!TM.Options.UseSoftFloat) {
314 // SSE has no i16 to fp conversion, only i32
315 if (X86ScalarSSEf32) {
316 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
317 // f32 and f64 cases are Legal, f80 case is not
318 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
320 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
321 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
324 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
325 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
328 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
329 // are Legal, f80 is custom lowered.
330 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
331 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
333 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
335 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
336 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
338 if (X86ScalarSSEf32) {
339 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
340 // f32 and f64 cases are Legal, f80 case is not
341 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
343 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
344 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
347 // Handle FP_TO_UINT by promoting the destination to a larger signed
349 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
350 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
351 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
353 if (Subtarget->is64Bit()) {
354 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
355 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
356 } else if (!TM.Options.UseSoftFloat) {
357 // Since AVX is a superset of SSE3, only check for SSE here.
358 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
359 // Expand FP_TO_UINT into a select.
360 // FIXME: We would like to use a Custom expander here eventually to do
361 // the optimal thing for SSE vs. the default expansion in the legalizer.
362 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
364 // With SSE3 we can use fisttpll to convert to a signed i64; without
365 // SSE, we're stuck with a fistpll.
366 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
369 if (isTargetFTOL()) {
370 // Use the _ftol2 runtime function, which has a pseudo-instruction
371 // to handle its weird calling convention.
372 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
375 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
376 if (!X86ScalarSSEf64) {
377 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
378 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
379 if (Subtarget->is64Bit()) {
380 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
381 // Without SSE, i64->f64 goes through memory.
382 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
386 // Scalar integer divide and remainder are lowered to use operations that
387 // produce two results, to match the available instructions. This exposes
388 // the two-result form to trivial CSE, which is able to combine x/y and x%y
389 // into a single instruction.
391 // Scalar integer multiply-high is also lowered to use two-result
392 // operations, to match the available instructions. However, plain multiply
393 // (low) operations are left as Legal, as there are single-result
394 // instructions for this in x86. Using the two-result multiply instructions
395 // when both high and low results are needed must be arranged by dagcombine.
396 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
398 setOperationAction(ISD::MULHS, VT, Expand);
399 setOperationAction(ISD::MULHU, VT, Expand);
400 setOperationAction(ISD::SDIV, VT, Expand);
401 setOperationAction(ISD::UDIV, VT, Expand);
402 setOperationAction(ISD::SREM, VT, Expand);
403 setOperationAction(ISD::UREM, VT, Expand);
405 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
406 setOperationAction(ISD::ADDC, VT, Custom);
407 setOperationAction(ISD::ADDE, VT, Custom);
408 setOperationAction(ISD::SUBC, VT, Custom);
409 setOperationAction(ISD::SUBE, VT, Custom);
412 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
413 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
414 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
415 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
416 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
417 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
418 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
419 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
420 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
421 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
422 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
423 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
424 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
425 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
426 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
427 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
428 if (Subtarget->is64Bit())
429 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
430 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
431 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
432 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
433 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
434 setOperationAction(ISD::FREM , MVT::f32 , Expand);
435 setOperationAction(ISD::FREM , MVT::f64 , Expand);
436 setOperationAction(ISD::FREM , MVT::f80 , Expand);
437 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
439 // Promote the i8 variants and force them on up to i32 which has a shorter
441 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
442 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
443 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
444 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
445 if (Subtarget->hasBMI()) {
446 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
447 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
448 if (Subtarget->is64Bit())
449 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
451 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
452 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
453 if (Subtarget->is64Bit())
454 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
457 if (Subtarget->hasLZCNT()) {
458 // When promoting the i8 variants, force them to i32 for a shorter
460 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
461 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
462 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
463 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
464 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
465 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
466 if (Subtarget->is64Bit())
467 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
469 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
470 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
471 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
472 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
473 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
475 if (Subtarget->is64Bit()) {
476 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
481 // Special handling for half-precision floating point conversions.
482 // If we don't have F16C support, then lower half float conversions
483 // into library calls.
484 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
485 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
486 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
489 // There's never any support for operations beyond MVT::f32.
490 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
491 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
492 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
493 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
495 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
496 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
497 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
498 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
499 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
500 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
502 if (Subtarget->hasPOPCNT()) {
503 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
505 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
506 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
507 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
508 if (Subtarget->is64Bit())
509 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
512 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
514 if (!Subtarget->hasMOVBE())
515 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
517 // These should be promoted to a larger select which is supported.
518 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
519 // X86 wants to expand cmov itself.
520 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
521 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
522 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
523 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
524 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
525 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
526 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
527 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
528 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
529 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
530 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
531 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
532 if (Subtarget->is64Bit()) {
533 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
534 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
536 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
537 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
538 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
539 // support continuation, user-level threading, and etc.. As a result, no
540 // other SjLj exception interfaces are implemented and please don't build
541 // your own exception handling based on them.
542 // LLVM/Clang supports zero-cost DWARF exception handling.
543 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
544 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
547 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
548 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
549 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
550 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
551 if (Subtarget->is64Bit())
552 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
553 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
554 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
555 if (Subtarget->is64Bit()) {
556 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
557 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
558 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
559 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
560 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
562 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
563 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
564 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
565 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
566 if (Subtarget->is64Bit()) {
567 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
568 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
569 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
572 if (Subtarget->hasSSE1())
573 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
575 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
577 // Expand certain atomics
578 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
580 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
581 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
582 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
585 if (Subtarget->hasCmpxchg16b()) {
586 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
589 // FIXME - use subtarget debug flags
590 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
591 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
592 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
595 if (Subtarget->is64Bit()) {
596 setExceptionPointerRegister(X86::RAX);
597 setExceptionSelectorRegister(X86::RDX);
599 setExceptionPointerRegister(X86::EAX);
600 setExceptionSelectorRegister(X86::EDX);
602 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
603 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
605 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
606 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
608 setOperationAction(ISD::TRAP, MVT::Other, Legal);
609 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
611 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
612 setOperationAction(ISD::VASTART , MVT::Other, Custom);
613 setOperationAction(ISD::VAEND , MVT::Other, Expand);
614 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
615 // TargetInfo::X86_64ABIBuiltinVaList
616 setOperationAction(ISD::VAARG , MVT::Other, Custom);
617 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
619 // TargetInfo::CharPtrBuiltinVaList
620 setOperationAction(ISD::VAARG , MVT::Other, Expand);
621 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
624 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
625 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
627 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
629 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
630 // f32 and f64 use SSE.
631 // Set up the FP register classes.
632 addRegisterClass(MVT::f32, &X86::FR32RegClass);
633 addRegisterClass(MVT::f64, &X86::FR64RegClass);
635 // Use ANDPD to simulate FABS.
636 setOperationAction(ISD::FABS , MVT::f64, Custom);
637 setOperationAction(ISD::FABS , MVT::f32, Custom);
639 // Use XORP to simulate FNEG.
640 setOperationAction(ISD::FNEG , MVT::f64, Custom);
641 setOperationAction(ISD::FNEG , MVT::f32, Custom);
643 // Use ANDPD and ORPD to simulate FCOPYSIGN.
644 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
645 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
647 // Lower this to FGETSIGNx86 plus an AND.
648 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
649 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
651 // We don't support sin/cos/fmod
652 setOperationAction(ISD::FSIN , MVT::f64, Expand);
653 setOperationAction(ISD::FCOS , MVT::f64, Expand);
654 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
655 setOperationAction(ISD::FSIN , MVT::f32, Expand);
656 setOperationAction(ISD::FCOS , MVT::f32, Expand);
657 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
659 // Expand FP immediates into loads from the stack, except for the special
661 addLegalFPImmediate(APFloat(+0.0)); // xorpd
662 addLegalFPImmediate(APFloat(+0.0f)); // xorps
663 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
664 // Use SSE for f32, x87 for f64.
665 // Set up the FP register classes.
666 addRegisterClass(MVT::f32, &X86::FR32RegClass);
667 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
669 // Use ANDPS to simulate FABS.
670 setOperationAction(ISD::FABS , MVT::f32, Custom);
672 // Use XORP to simulate FNEG.
673 setOperationAction(ISD::FNEG , MVT::f32, Custom);
675 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
677 // Use ANDPS and ORPS to simulate FCOPYSIGN.
678 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
679 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
681 // We don't support sin/cos/fmod
682 setOperationAction(ISD::FSIN , MVT::f32, Expand);
683 setOperationAction(ISD::FCOS , MVT::f32, Expand);
684 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
686 // Special cases we handle for FP constants.
687 addLegalFPImmediate(APFloat(+0.0f)); // xorps
688 addLegalFPImmediate(APFloat(+0.0)); // FLD0
689 addLegalFPImmediate(APFloat(+1.0)); // FLD1
690 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
691 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
693 if (!TM.Options.UnsafeFPMath) {
694 setOperationAction(ISD::FSIN , MVT::f64, Expand);
695 setOperationAction(ISD::FCOS , MVT::f64, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
698 } else if (!TM.Options.UseSoftFloat) {
699 // f32 and f64 in x87.
700 // Set up the FP register classes.
701 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
702 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
704 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
705 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
706 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
707 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
709 if (!TM.Options.UnsafeFPMath) {
710 setOperationAction(ISD::FSIN , MVT::f64, Expand);
711 setOperationAction(ISD::FSIN , MVT::f32, Expand);
712 setOperationAction(ISD::FCOS , MVT::f64, Expand);
713 setOperationAction(ISD::FCOS , MVT::f32, Expand);
714 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
715 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
717 addLegalFPImmediate(APFloat(+0.0)); // FLD0
718 addLegalFPImmediate(APFloat(+1.0)); // FLD1
719 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
720 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
721 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
722 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
723 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
724 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
727 // We don't support FMA.
728 setOperationAction(ISD::FMA, MVT::f64, Expand);
729 setOperationAction(ISD::FMA, MVT::f32, Expand);
731 // Long double always uses X87.
732 if (!TM.Options.UseSoftFloat) {
733 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
734 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
735 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
737 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
738 addLegalFPImmediate(TmpFlt); // FLD0
740 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
743 APFloat TmpFlt2(+1.0);
744 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
746 addLegalFPImmediate(TmpFlt2); // FLD1
747 TmpFlt2.changeSign();
748 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
751 if (!TM.Options.UnsafeFPMath) {
752 setOperationAction(ISD::FSIN , MVT::f80, Expand);
753 setOperationAction(ISD::FCOS , MVT::f80, Expand);
754 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
757 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
758 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
759 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
760 setOperationAction(ISD::FRINT, MVT::f80, Expand);
761 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
762 setOperationAction(ISD::FMA, MVT::f80, Expand);
765 // Always use a library call for pow.
766 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
767 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
768 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
770 setOperationAction(ISD::FLOG, MVT::f80, Expand);
771 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
772 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
773 setOperationAction(ISD::FEXP, MVT::f80, Expand);
774 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
775 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
776 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
778 // First set operation action for all vector types to either promote
779 // (for widening) or expand (for scalarization). Then we will selectively
780 // turn on ones that can be effectively codegen'd.
781 for (MVT VT : MVT::vector_valuetypes()) {
782 setOperationAction(ISD::ADD , VT, Expand);
783 setOperationAction(ISD::SUB , VT, Expand);
784 setOperationAction(ISD::FADD, VT, Expand);
785 setOperationAction(ISD::FNEG, VT, Expand);
786 setOperationAction(ISD::FSUB, VT, Expand);
787 setOperationAction(ISD::MUL , VT, Expand);
788 setOperationAction(ISD::FMUL, VT, Expand);
789 setOperationAction(ISD::SDIV, VT, Expand);
790 setOperationAction(ISD::UDIV, VT, Expand);
791 setOperationAction(ISD::FDIV, VT, Expand);
792 setOperationAction(ISD::SREM, VT, Expand);
793 setOperationAction(ISD::UREM, VT, Expand);
794 setOperationAction(ISD::LOAD, VT, Expand);
795 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
796 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
797 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
798 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
799 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
800 setOperationAction(ISD::FABS, VT, Expand);
801 setOperationAction(ISD::FSIN, VT, Expand);
802 setOperationAction(ISD::FSINCOS, VT, Expand);
803 setOperationAction(ISD::FCOS, VT, Expand);
804 setOperationAction(ISD::FSINCOS, VT, Expand);
805 setOperationAction(ISD::FREM, VT, Expand);
806 setOperationAction(ISD::FMA, VT, Expand);
807 setOperationAction(ISD::FPOWI, VT, Expand);
808 setOperationAction(ISD::FSQRT, VT, Expand);
809 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
810 setOperationAction(ISD::FFLOOR, VT, Expand);
811 setOperationAction(ISD::FCEIL, VT, Expand);
812 setOperationAction(ISD::FTRUNC, VT, Expand);
813 setOperationAction(ISD::FRINT, VT, Expand);
814 setOperationAction(ISD::FNEARBYINT, VT, Expand);
815 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
816 setOperationAction(ISD::MULHS, VT, Expand);
817 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
818 setOperationAction(ISD::MULHU, VT, Expand);
819 setOperationAction(ISD::SDIVREM, VT, Expand);
820 setOperationAction(ISD::UDIVREM, VT, Expand);
821 setOperationAction(ISD::FPOW, VT, Expand);
822 setOperationAction(ISD::CTPOP, VT, Expand);
823 setOperationAction(ISD::CTTZ, VT, Expand);
824 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
825 setOperationAction(ISD::CTLZ, VT, Expand);
826 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
827 setOperationAction(ISD::SHL, VT, Expand);
828 setOperationAction(ISD::SRA, VT, Expand);
829 setOperationAction(ISD::SRL, VT, Expand);
830 setOperationAction(ISD::ROTL, VT, Expand);
831 setOperationAction(ISD::ROTR, VT, Expand);
832 setOperationAction(ISD::BSWAP, VT, Expand);
833 setOperationAction(ISD::SETCC, VT, Expand);
834 setOperationAction(ISD::FLOG, VT, Expand);
835 setOperationAction(ISD::FLOG2, VT, Expand);
836 setOperationAction(ISD::FLOG10, VT, Expand);
837 setOperationAction(ISD::FEXP, VT, Expand);
838 setOperationAction(ISD::FEXP2, VT, Expand);
839 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
840 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
841 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
842 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
843 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
844 setOperationAction(ISD::TRUNCATE, VT, Expand);
845 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
846 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
847 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
848 setOperationAction(ISD::VSELECT, VT, Expand);
849 setOperationAction(ISD::SELECT_CC, VT, Expand);
850 for (MVT InnerVT : MVT::vector_valuetypes()) {
851 setTruncStoreAction(InnerVT, VT, Expand);
853 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
854 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
856 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
857 // types, we have to deal with them whether we ask for Expansion or not.
858 // Setting Expand causes its own optimisation problems though, so leave
860 if (VT.getVectorElementType() == MVT::i1)
861 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
865 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
866 // with -msoft-float, disable use of MMX as well.
867 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
868 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
869 // No operations on x86mmx supported, everything uses intrinsics.
872 // MMX-sized vectors (other than x86mmx) are expected to be expanded
873 // into smaller operations.
874 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
875 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
876 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
877 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
878 setOperationAction(ISD::AND, MVT::v8i8, Expand);
879 setOperationAction(ISD::AND, MVT::v4i16, Expand);
880 setOperationAction(ISD::AND, MVT::v2i32, Expand);
881 setOperationAction(ISD::AND, MVT::v1i64, Expand);
882 setOperationAction(ISD::OR, MVT::v8i8, Expand);
883 setOperationAction(ISD::OR, MVT::v4i16, Expand);
884 setOperationAction(ISD::OR, MVT::v2i32, Expand);
885 setOperationAction(ISD::OR, MVT::v1i64, Expand);
886 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
887 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
888 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
889 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
890 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
891 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
892 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
893 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
894 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
895 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
896 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
897 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
898 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
899 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
900 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
901 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
902 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
904 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
905 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
907 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
908 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
909 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
910 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
911 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
912 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
913 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
914 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
915 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
916 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
917 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
918 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
919 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
920 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
923 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
924 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
926 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
927 // registers cannot be used even for integer operations.
928 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
929 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
930 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
931 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
933 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
934 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
935 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
936 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
937 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
938 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
939 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
940 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
941 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
942 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
943 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
944 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
945 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
946 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
947 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
948 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
949 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
950 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
951 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
952 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
953 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
954 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
956 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
957 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
958 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
959 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
961 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
962 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
963 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
964 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
965 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
967 // Only provide customized ctpop vector bit twiddling for vector types we
968 // know to perform better than using the popcnt instructions on each vector
969 // element. If popcnt isn't supported, always provide the custom version.
970 if (!Subtarget->hasPOPCNT()) {
971 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
972 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
975 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
976 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
977 MVT VT = (MVT::SimpleValueType)i;
978 // Do not attempt to custom lower non-power-of-2 vectors
979 if (!isPowerOf2_32(VT.getVectorNumElements()))
981 // Do not attempt to custom lower non-128-bit vectors
982 if (!VT.is128BitVector())
984 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
985 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
986 setOperationAction(ISD::VSELECT, VT, Custom);
987 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
990 // We support custom legalizing of sext and anyext loads for specific
991 // memory vector types which we can load as a scalar (or sequence of
992 // scalars) and extend in-register to a legal 128-bit vector type. For sext
993 // loads these must work with a single scalar load.
994 for (MVT VT : MVT::integer_vector_valuetypes()) {
995 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
996 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
997 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
998 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
999 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1000 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1001 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1002 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1003 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1006 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1007 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1008 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1009 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1010 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1011 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1012 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1013 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1015 if (Subtarget->is64Bit()) {
1016 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1017 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1020 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1021 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1022 MVT VT = (MVT::SimpleValueType)i;
1024 // Do not attempt to promote non-128-bit vectors
1025 if (!VT.is128BitVector())
1028 setOperationAction(ISD::AND, VT, Promote);
1029 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1030 setOperationAction(ISD::OR, VT, Promote);
1031 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1032 setOperationAction(ISD::XOR, VT, Promote);
1033 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1034 setOperationAction(ISD::LOAD, VT, Promote);
1035 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1036 setOperationAction(ISD::SELECT, VT, Promote);
1037 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1040 // Custom lower v2i64 and v2f64 selects.
1041 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1042 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1043 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1044 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1046 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1047 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1049 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1050 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1051 // As there is no 64-bit GPR available, we need build a special custom
1052 // sequence to convert from v2i32 to v2f32.
1053 if (!Subtarget->is64Bit())
1054 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1056 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1057 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1059 for (MVT VT : MVT::fp_vector_valuetypes())
1060 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1062 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1063 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1064 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1067 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1068 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1069 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1070 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1071 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1072 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1073 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1074 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1075 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1076 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1077 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1079 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1080 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1081 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1082 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1083 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1084 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1085 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1086 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1087 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1088 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1090 // FIXME: Do we need to handle scalar-to-vector here?
1091 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1093 // We directly match byte blends in the backend as they match the VSELECT
1095 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1097 // SSE41 brings specific instructions for doing vector sign extend even in
1098 // cases where we don't have SRA.
1099 for (MVT VT : MVT::integer_vector_valuetypes()) {
1100 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1101 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1102 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1105 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1106 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1107 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1108 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1109 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1110 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1111 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1113 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1114 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1115 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1116 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1117 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1118 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1120 // i8 and i16 vectors are custom because the source register and source
1121 // source memory operand types are not the same width. f32 vectors are
1122 // custom since the immediate controlling the insert encodes additional
1124 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1125 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1126 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1127 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1129 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1130 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1131 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1132 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1134 // FIXME: these should be Legal, but that's only for the case where
1135 // the index is constant. For now custom expand to deal with that.
1136 if (Subtarget->is64Bit()) {
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1138 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1142 if (Subtarget->hasSSE2()) {
1143 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1144 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1146 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1147 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1149 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1150 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1152 // In the customized shift lowering, the legal cases in AVX2 will be
1154 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1155 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1157 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1158 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1160 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1163 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1164 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1165 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1166 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1167 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1168 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1169 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1171 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1172 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1173 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1175 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1176 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1177 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1178 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1179 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1180 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1181 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1182 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1183 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1184 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1185 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1186 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1188 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1199 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1201 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1202 // even though v8i16 is a legal type.
1203 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1204 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1205 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1207 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1208 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1209 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1211 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1212 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1214 for (MVT VT : MVT::fp_vector_valuetypes())
1215 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1217 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1218 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1220 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1221 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1223 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1224 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1226 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1227 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1228 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1229 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1231 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1232 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1233 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1235 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1236 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1237 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1238 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1239 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1240 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1241 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1242 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1243 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1244 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1245 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1246 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1248 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1249 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1250 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1251 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1252 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1253 setOperationAction(ISD::FMA, MVT::f32, Legal);
1254 setOperationAction(ISD::FMA, MVT::f64, Legal);
1257 if (Subtarget->hasInt256()) {
1258 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1259 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1260 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1261 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1263 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1264 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1265 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1266 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1268 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1269 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1270 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1271 // Don't lower v32i8 because there is no 128-bit byte mul
1273 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1274 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1275 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1276 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1278 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1279 // when we have a 256bit-wide blend with immediate.
1280 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1282 // Only provide customized ctpop vector bit twiddling for vector types we
1283 // know to perform better than using the popcnt instructions on each
1284 // vector element. If popcnt isn't supported, always provide the custom
1286 if (!Subtarget->hasPOPCNT())
1287 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1289 // Custom CTPOP always performs better on natively supported v8i32
1290 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1292 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1293 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1294 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1295 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1296 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1297 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1298 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1300 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1301 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1302 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1303 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1304 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1305 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1307 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1308 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1309 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1310 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1312 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1313 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1314 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1315 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1317 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1318 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1319 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1320 // Don't lower v32i8 because there is no 128-bit byte mul
1323 // In the customized shift lowering, the legal cases in AVX2 will be
1325 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1326 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1328 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1329 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1331 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1333 // Custom lower several nodes for 256-bit types.
1334 for (MVT VT : MVT::vector_valuetypes()) {
1335 if (VT.getScalarSizeInBits() >= 32) {
1336 setOperationAction(ISD::MLOAD, VT, Legal);
1337 setOperationAction(ISD::MSTORE, VT, Legal);
1339 // Extract subvector is special because the value type
1340 // (result) is 128-bit but the source is 256-bit wide.
1341 if (VT.is128BitVector()) {
1342 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1344 // Do not attempt to custom lower other non-256-bit vectors
1345 if (!VT.is256BitVector())
1348 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1349 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1350 setOperationAction(ISD::VSELECT, VT, Custom);
1351 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1352 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1353 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1354 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1355 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1358 if (Subtarget->hasInt256())
1359 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1362 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1363 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1364 MVT VT = (MVT::SimpleValueType)i;
1366 // Do not attempt to promote non-256-bit vectors
1367 if (!VT.is256BitVector())
1370 setOperationAction(ISD::AND, VT, Promote);
1371 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1372 setOperationAction(ISD::OR, VT, Promote);
1373 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1374 setOperationAction(ISD::XOR, VT, Promote);
1375 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1376 setOperationAction(ISD::LOAD, VT, Promote);
1377 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1378 setOperationAction(ISD::SELECT, VT, Promote);
1379 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1383 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1384 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1385 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1386 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1387 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1389 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1390 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1391 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1393 for (MVT VT : MVT::fp_vector_valuetypes())
1394 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1396 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1397 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1398 setOperationAction(ISD::XOR, MVT::i1, Legal);
1399 setOperationAction(ISD::OR, MVT::i1, Legal);
1400 setOperationAction(ISD::AND, MVT::i1, Legal);
1401 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1402 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1403 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1404 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1405 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1407 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1408 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1409 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1410 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1411 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1412 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1414 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1415 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1416 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1417 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1418 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1419 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1420 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1421 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1423 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1424 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1425 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1426 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1427 if (Subtarget->is64Bit()) {
1428 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1429 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1430 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1431 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1433 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1434 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1435 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1436 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1437 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1438 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1439 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1440 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1443 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1444 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1445 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1446 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1448 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1449 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1450 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1451 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1452 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1453 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1454 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1455 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1456 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1457 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1458 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1459 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1460 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1462 setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
1463 setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
1464 setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
1465 setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
1466 setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
1467 setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
1468 setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
1469 setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
1470 setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
1471 setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
1473 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1474 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1475 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1476 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1477 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1480 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1481 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1483 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1485 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1486 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1487 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1488 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1489 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1490 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1491 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1492 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1493 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1495 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1496 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1498 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1499 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1501 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1504 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1506 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1507 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1509 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1510 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1512 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1513 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1514 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1515 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1516 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1517 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1519 if (Subtarget->hasCDI()) {
1520 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1521 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1524 // Custom lower several nodes.
1525 for (MVT VT : MVT::vector_valuetypes()) {
1526 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1527 // Extract subvector is special because the value type
1528 // (result) is 256/128-bit but the source is 512-bit wide.
1529 if (VT.is128BitVector() || VT.is256BitVector()) {
1530 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1532 if (VT.getVectorElementType() == MVT::i1)
1533 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1535 // Do not attempt to custom lower other non-512-bit vectors
1536 if (!VT.is512BitVector())
1539 if ( EltSize >= 32) {
1540 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1541 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1542 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1543 setOperationAction(ISD::VSELECT, VT, Legal);
1544 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1545 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1546 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1547 setOperationAction(ISD::MLOAD, VT, Legal);
1548 setOperationAction(ISD::MSTORE, VT, Legal);
1551 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1552 MVT VT = (MVT::SimpleValueType)i;
1554 // Do not attempt to promote non-512-bit vectors.
1555 if (!VT.is512BitVector())
1558 setOperationAction(ISD::SELECT, VT, Promote);
1559 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1563 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1564 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1565 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1567 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1568 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1570 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1571 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1572 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1573 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1574 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1575 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1576 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1577 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1578 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1580 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1581 const MVT VT = (MVT::SimpleValueType)i;
1583 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1585 // Do not attempt to promote non-512-bit vectors.
1586 if (!VT.is512BitVector())
1590 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1591 setOperationAction(ISD::VSELECT, VT, Legal);
1596 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1597 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1598 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1600 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1601 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1602 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1604 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1605 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1606 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1607 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1608 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1609 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1612 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1613 // of this type with custom code.
1614 for (MVT VT : MVT::vector_valuetypes())
1615 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1617 // We want to custom lower some of our intrinsics.
1618 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1619 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1620 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1621 if (!Subtarget->is64Bit())
1622 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1624 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1625 // handle type legalization for these operations here.
1627 // FIXME: We really should do custom legalization for addition and
1628 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1629 // than generic legalization for 64-bit multiplication-with-overflow, though.
1630 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1631 // Add/Sub/Mul with overflow operations are custom lowered.
1633 setOperationAction(ISD::SADDO, VT, Custom);
1634 setOperationAction(ISD::UADDO, VT, Custom);
1635 setOperationAction(ISD::SSUBO, VT, Custom);
1636 setOperationAction(ISD::USUBO, VT, Custom);
1637 setOperationAction(ISD::SMULO, VT, Custom);
1638 setOperationAction(ISD::UMULO, VT, Custom);
1642 if (!Subtarget->is64Bit()) {
1643 // These libcalls are not available in 32-bit.
1644 setLibcallName(RTLIB::SHL_I128, nullptr);
1645 setLibcallName(RTLIB::SRL_I128, nullptr);
1646 setLibcallName(RTLIB::SRA_I128, nullptr);
1649 // Combine sin / cos into one node or libcall if possible.
1650 if (Subtarget->hasSinCos()) {
1651 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1652 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1653 if (Subtarget->isTargetDarwin()) {
1654 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1655 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1656 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1657 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1661 if (Subtarget->isTargetWin64()) {
1662 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1663 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1664 setOperationAction(ISD::SREM, MVT::i128, Custom);
1665 setOperationAction(ISD::UREM, MVT::i128, Custom);
1666 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1667 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1670 // We have target-specific dag combine patterns for the following nodes:
1671 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1672 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1673 setTargetDAGCombine(ISD::BITCAST);
1674 setTargetDAGCombine(ISD::VSELECT);
1675 setTargetDAGCombine(ISD::SELECT);
1676 setTargetDAGCombine(ISD::SHL);
1677 setTargetDAGCombine(ISD::SRA);
1678 setTargetDAGCombine(ISD::SRL);
1679 setTargetDAGCombine(ISD::OR);
1680 setTargetDAGCombine(ISD::AND);
1681 setTargetDAGCombine(ISD::ADD);
1682 setTargetDAGCombine(ISD::FADD);
1683 setTargetDAGCombine(ISD::FSUB);
1684 setTargetDAGCombine(ISD::FMA);
1685 setTargetDAGCombine(ISD::SUB);
1686 setTargetDAGCombine(ISD::LOAD);
1687 setTargetDAGCombine(ISD::MLOAD);
1688 setTargetDAGCombine(ISD::STORE);
1689 setTargetDAGCombine(ISD::MSTORE);
1690 setTargetDAGCombine(ISD::ZERO_EXTEND);
1691 setTargetDAGCombine(ISD::ANY_EXTEND);
1692 setTargetDAGCombine(ISD::SIGN_EXTEND);
1693 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1694 setTargetDAGCombine(ISD::TRUNCATE);
1695 setTargetDAGCombine(ISD::SINT_TO_FP);
1696 setTargetDAGCombine(ISD::SETCC);
1697 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1698 setTargetDAGCombine(ISD::BUILD_VECTOR);
1699 setTargetDAGCombine(ISD::MUL);
1700 setTargetDAGCombine(ISD::XOR);
1702 computeRegisterProperties();
1704 // On Darwin, -Os means optimize for size without hurting performance,
1705 // do not reduce the limit.
1706 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1707 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1708 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1709 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1710 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1711 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1712 setPrefLoopAlignment(4); // 2^4 bytes.
1714 // Predictable cmov don't hurt on atom because it's in-order.
1715 PredictableSelectIsExpensive = !Subtarget->isAtom();
1716 EnableExtLdPromotion = true;
1717 setPrefFunctionAlignment(4); // 2^4 bytes.
1719 verifyIntrinsicTables();
1722 // This has so far only been implemented for 64-bit MachO.
1723 bool X86TargetLowering::useLoadStackGuardNode() const {
1724 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1727 TargetLoweringBase::LegalizeTypeAction
1728 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1729 if (ExperimentalVectorWideningLegalization &&
1730 VT.getVectorNumElements() != 1 &&
1731 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1732 return TypeWidenVector;
1734 return TargetLoweringBase::getPreferredVectorAction(VT);
1737 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1739 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1741 const unsigned NumElts = VT.getVectorNumElements();
1742 const EVT EltVT = VT.getVectorElementType();
1743 if (VT.is512BitVector()) {
1744 if (Subtarget->hasAVX512())
1745 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1746 EltVT == MVT::f32 || EltVT == MVT::f64)
1748 case 8: return MVT::v8i1;
1749 case 16: return MVT::v16i1;
1751 if (Subtarget->hasBWI())
1752 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1754 case 32: return MVT::v32i1;
1755 case 64: return MVT::v64i1;
1759 if (VT.is256BitVector() || VT.is128BitVector()) {
1760 if (Subtarget->hasVLX())
1761 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1762 EltVT == MVT::f32 || EltVT == MVT::f64)
1764 case 2: return MVT::v2i1;
1765 case 4: return MVT::v4i1;
1766 case 8: return MVT::v8i1;
1768 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1769 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1771 case 8: return MVT::v8i1;
1772 case 16: return MVT::v16i1;
1773 case 32: return MVT::v32i1;
1777 return VT.changeVectorElementTypeToInteger();
1780 /// Helper for getByValTypeAlignment to determine
1781 /// the desired ByVal argument alignment.
1782 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1785 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1786 if (VTy->getBitWidth() == 128)
1788 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1789 unsigned EltAlign = 0;
1790 getMaxByValAlign(ATy->getElementType(), EltAlign);
1791 if (EltAlign > MaxAlign)
1792 MaxAlign = EltAlign;
1793 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1794 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1795 unsigned EltAlign = 0;
1796 getMaxByValAlign(STy->getElementType(i), EltAlign);
1797 if (EltAlign > MaxAlign)
1798 MaxAlign = EltAlign;
1805 /// Return the desired alignment for ByVal aggregate
1806 /// function arguments in the caller parameter area. For X86, aggregates
1807 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1808 /// are at 4-byte boundaries.
1809 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1810 if (Subtarget->is64Bit()) {
1811 // Max of 8 and alignment of type.
1812 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1819 if (Subtarget->hasSSE1())
1820 getMaxByValAlign(Ty, Align);
1824 /// Returns the target specific optimal type for load
1825 /// and store operations as a result of memset, memcpy, and memmove
1826 /// lowering. If DstAlign is zero that means it's safe to destination
1827 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1828 /// means there isn't a need to check it against alignment requirement,
1829 /// probably because the source does not need to be loaded. If 'IsMemset' is
1830 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1831 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1832 /// source is constant so it does not need to be loaded.
1833 /// It returns EVT::Other if the type should be determined using generic
1834 /// target-independent logic.
1836 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1837 unsigned DstAlign, unsigned SrcAlign,
1838 bool IsMemset, bool ZeroMemset,
1840 MachineFunction &MF) const {
1841 const Function *F = MF.getFunction();
1842 if ((!IsMemset || ZeroMemset) &&
1843 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1845 (Subtarget->isUnalignedMemAccessFast() ||
1846 ((DstAlign == 0 || DstAlign >= 16) &&
1847 (SrcAlign == 0 || SrcAlign >= 16)))) {
1849 if (Subtarget->hasInt256())
1851 if (Subtarget->hasFp256())
1854 if (Subtarget->hasSSE2())
1856 if (Subtarget->hasSSE1())
1858 } else if (!MemcpyStrSrc && Size >= 8 &&
1859 !Subtarget->is64Bit() &&
1860 Subtarget->hasSSE2()) {
1861 // Do not use f64 to lower memcpy if source is string constant. It's
1862 // better to use i32 to avoid the loads.
1866 if (Subtarget->is64Bit() && Size >= 8)
1871 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1873 return X86ScalarSSEf32;
1874 else if (VT == MVT::f64)
1875 return X86ScalarSSEf64;
1880 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1885 *Fast = Subtarget->isUnalignedMemAccessFast();
1889 /// Return the entry encoding for a jump table in the
1890 /// current function. The returned value is a member of the
1891 /// MachineJumpTableInfo::JTEntryKind enum.
1892 unsigned X86TargetLowering::getJumpTableEncoding() const {
1893 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1895 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1896 Subtarget->isPICStyleGOT())
1897 return MachineJumpTableInfo::EK_Custom32;
1899 // Otherwise, use the normal jump table encoding heuristics.
1900 return TargetLowering::getJumpTableEncoding();
1904 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1905 const MachineBasicBlock *MBB,
1906 unsigned uid,MCContext &Ctx) const{
1907 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1908 Subtarget->isPICStyleGOT());
1909 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1911 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1912 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1915 /// Returns relocation base for the given PIC jumptable.
1916 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1917 SelectionDAG &DAG) const {
1918 if (!Subtarget->is64Bit())
1919 // This doesn't have SDLoc associated with it, but is not really the
1920 // same as a Register.
1921 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1925 /// This returns the relocation base for the given PIC jumptable,
1926 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1927 const MCExpr *X86TargetLowering::
1928 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1929 MCContext &Ctx) const {
1930 // X86-64 uses RIP relative addressing based on the jump table label.
1931 if (Subtarget->isPICStyleRIPRel())
1932 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1934 // Otherwise, the reference is relative to the PIC base.
1935 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1938 // FIXME: Why this routine is here? Move to RegInfo!
1939 std::pair<const TargetRegisterClass*, uint8_t>
1940 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1941 const TargetRegisterClass *RRC = nullptr;
1943 switch (VT.SimpleTy) {
1945 return TargetLowering::findRepresentativeClass(VT);
1946 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1947 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1950 RRC = &X86::VR64RegClass;
1952 case MVT::f32: case MVT::f64:
1953 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1954 case MVT::v4f32: case MVT::v2f64:
1955 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1957 RRC = &X86::VR128RegClass;
1960 return std::make_pair(RRC, Cost);
1963 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1964 unsigned &Offset) const {
1965 if (!Subtarget->isTargetLinux())
1968 if (Subtarget->is64Bit()) {
1969 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1971 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1983 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1984 unsigned DestAS) const {
1985 assert(SrcAS != DestAS && "Expected different address spaces!");
1987 return SrcAS < 256 && DestAS < 256;
1990 //===----------------------------------------------------------------------===//
1991 // Return Value Calling Convention Implementation
1992 //===----------------------------------------------------------------------===//
1994 #include "X86GenCallingConv.inc"
1997 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
1998 MachineFunction &MF, bool isVarArg,
1999 const SmallVectorImpl<ISD::OutputArg> &Outs,
2000 LLVMContext &Context) const {
2001 SmallVector<CCValAssign, 16> RVLocs;
2002 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2003 return CCInfo.CheckReturn(Outs, RetCC_X86);
2006 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2007 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2012 X86TargetLowering::LowerReturn(SDValue Chain,
2013 CallingConv::ID CallConv, bool isVarArg,
2014 const SmallVectorImpl<ISD::OutputArg> &Outs,
2015 const SmallVectorImpl<SDValue> &OutVals,
2016 SDLoc dl, SelectionDAG &DAG) const {
2017 MachineFunction &MF = DAG.getMachineFunction();
2018 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2020 SmallVector<CCValAssign, 16> RVLocs;
2021 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2022 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2025 SmallVector<SDValue, 6> RetOps;
2026 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2027 // Operand #1 = Bytes To Pop
2028 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2031 // Copy the result values into the output registers.
2032 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2033 CCValAssign &VA = RVLocs[i];
2034 assert(VA.isRegLoc() && "Can only return in registers!");
2035 SDValue ValToCopy = OutVals[i];
2036 EVT ValVT = ValToCopy.getValueType();
2038 // Promote values to the appropriate types.
2039 if (VA.getLocInfo() == CCValAssign::SExt)
2040 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2041 else if (VA.getLocInfo() == CCValAssign::ZExt)
2042 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2043 else if (VA.getLocInfo() == CCValAssign::AExt)
2044 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2045 else if (VA.getLocInfo() == CCValAssign::BCvt)
2046 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2048 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2049 "Unexpected FP-extend for return value.");
2051 // If this is x86-64, and we disabled SSE, we can't return FP values,
2052 // or SSE or MMX vectors.
2053 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2054 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2055 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2056 report_fatal_error("SSE register return with SSE disabled");
2058 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2059 // llvm-gcc has never done it right and no one has noticed, so this
2060 // should be OK for now.
2061 if (ValVT == MVT::f64 &&
2062 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2063 report_fatal_error("SSE2 register return with SSE2 disabled");
2065 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2066 // the RET instruction and handled by the FP Stackifier.
2067 if (VA.getLocReg() == X86::FP0 ||
2068 VA.getLocReg() == X86::FP1) {
2069 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2070 // change the value to the FP stack register class.
2071 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2072 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2073 RetOps.push_back(ValToCopy);
2074 // Don't emit a copytoreg.
2078 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2079 // which is returned in RAX / RDX.
2080 if (Subtarget->is64Bit()) {
2081 if (ValVT == MVT::x86mmx) {
2082 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2083 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2084 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2086 // If we don't have SSE2 available, convert to v4f32 so the generated
2087 // register is legal.
2088 if (!Subtarget->hasSSE2())
2089 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2094 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2095 Flag = Chain.getValue(1);
2096 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2099 // The x86-64 ABIs require that for returning structs by value we copy
2100 // the sret argument into %rax/%eax (depending on ABI) for the return.
2101 // Win32 requires us to put the sret argument to %eax as well.
2102 // We saved the argument into a virtual register in the entry block,
2103 // so now we copy the value out and into %rax/%eax.
2105 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2106 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2107 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2108 // either case FuncInfo->setSRetReturnReg() will have been called.
2109 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2110 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2111 "No need for an sret register");
2112 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2115 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2116 X86::RAX : X86::EAX;
2117 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2118 Flag = Chain.getValue(1);
2120 // RAX/EAX now acts like a return value.
2121 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2124 RetOps[0] = Chain; // Update chain.
2126 // Add the flag if we have it.
2128 RetOps.push_back(Flag);
2130 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2133 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2134 if (N->getNumValues() != 1)
2136 if (!N->hasNUsesOfValue(1, 0))
2139 SDValue TCChain = Chain;
2140 SDNode *Copy = *N->use_begin();
2141 if (Copy->getOpcode() == ISD::CopyToReg) {
2142 // If the copy has a glue operand, we conservatively assume it isn't safe to
2143 // perform a tail call.
2144 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2146 TCChain = Copy->getOperand(0);
2147 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2150 bool HasRet = false;
2151 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2153 if (UI->getOpcode() != X86ISD::RET_FLAG)
2155 // If we are returning more than one value, we can definitely
2156 // not make a tail call see PR19530
2157 if (UI->getNumOperands() > 4)
2159 if (UI->getNumOperands() == 4 &&
2160 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2173 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2174 ISD::NodeType ExtendKind) const {
2176 // TODO: Is this also valid on 32-bit?
2177 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2178 ReturnMVT = MVT::i8;
2180 ReturnMVT = MVT::i32;
2182 EVT MinVT = getRegisterType(Context, ReturnMVT);
2183 return VT.bitsLT(MinVT) ? MinVT : VT;
2186 /// Lower the result values of a call into the
2187 /// appropriate copies out of appropriate physical registers.
2190 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2191 CallingConv::ID CallConv, bool isVarArg,
2192 const SmallVectorImpl<ISD::InputArg> &Ins,
2193 SDLoc dl, SelectionDAG &DAG,
2194 SmallVectorImpl<SDValue> &InVals) const {
2196 // Assign locations to each value returned by this call.
2197 SmallVector<CCValAssign, 16> RVLocs;
2198 bool Is64Bit = Subtarget->is64Bit();
2199 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2201 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2203 // Copy all of the result registers out of their specified physreg.
2204 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2205 CCValAssign &VA = RVLocs[i];
2206 EVT CopyVT = VA.getValVT();
2208 // If this is x86-64, and we disabled SSE, we can't return FP values
2209 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2210 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2211 report_fatal_error("SSE register return with SSE disabled");
2214 // If we prefer to use the value in xmm registers, copy it out as f80 and
2215 // use a truncate to move it from fp stack reg to xmm reg.
2216 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2217 isScalarFPTypeInSSEReg(VA.getValVT()))
2220 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2221 CopyVT, InFlag).getValue(1);
2222 SDValue Val = Chain.getValue(0);
2224 if (CopyVT != VA.getValVT())
2225 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2226 // This truncation won't change the value.
2227 DAG.getIntPtrConstant(1));
2229 InFlag = Chain.getValue(2);
2230 InVals.push_back(Val);
2236 //===----------------------------------------------------------------------===//
2237 // C & StdCall & Fast Calling Convention implementation
2238 //===----------------------------------------------------------------------===//
2239 // StdCall calling convention seems to be standard for many Windows' API
2240 // routines and around. It differs from C calling convention just a little:
2241 // callee should clean up the stack, not caller. Symbols should be also
2242 // decorated in some fancy way :) It doesn't support any vector arguments.
2243 // For info on fast calling convention see Fast Calling Convention (tail call)
2244 // implementation LowerX86_32FastCCCallTo.
2246 /// CallIsStructReturn - Determines whether a call uses struct return
2248 enum StructReturnType {
2253 static StructReturnType
2254 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2256 return NotStructReturn;
2258 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2259 if (!Flags.isSRet())
2260 return NotStructReturn;
2261 if (Flags.isInReg())
2262 return RegStructReturn;
2263 return StackStructReturn;
2266 /// Determines whether a function uses struct return semantics.
2267 static StructReturnType
2268 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2270 return NotStructReturn;
2272 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2273 if (!Flags.isSRet())
2274 return NotStructReturn;
2275 if (Flags.isInReg())
2276 return RegStructReturn;
2277 return StackStructReturn;
2280 /// Make a copy of an aggregate at address specified by "Src" to address
2281 /// "Dst" with size and alignment information specified by the specific
2282 /// parameter attribute. The copy will be passed as a byval function parameter.
2284 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2285 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2287 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2289 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2290 /*isVolatile*/false, /*AlwaysInline=*/true,
2291 MachinePointerInfo(), MachinePointerInfo());
2294 /// Return true if the calling convention is one that
2295 /// supports tail call optimization.
2296 static bool IsTailCallConvention(CallingConv::ID CC) {
2297 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2298 CC == CallingConv::HiPE);
2301 /// \brief Return true if the calling convention is a C calling convention.
2302 static bool IsCCallConvention(CallingConv::ID CC) {
2303 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2304 CC == CallingConv::X86_64_SysV);
2307 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2308 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2312 CallingConv::ID CalleeCC = CS.getCallingConv();
2313 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2319 /// Return true if the function is being made into
2320 /// a tailcall target by changing its ABI.
2321 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2322 bool GuaranteedTailCallOpt) {
2323 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2327 X86TargetLowering::LowerMemArgument(SDValue Chain,
2328 CallingConv::ID CallConv,
2329 const SmallVectorImpl<ISD::InputArg> &Ins,
2330 SDLoc dl, SelectionDAG &DAG,
2331 const CCValAssign &VA,
2332 MachineFrameInfo *MFI,
2334 // Create the nodes corresponding to a load from this parameter slot.
2335 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2336 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2337 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2338 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2341 // If value is passed by pointer we have address passed instead of the value
2343 if (VA.getLocInfo() == CCValAssign::Indirect)
2344 ValVT = VA.getLocVT();
2346 ValVT = VA.getValVT();
2348 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2349 // changed with more analysis.
2350 // In case of tail call optimization mark all arguments mutable. Since they
2351 // could be overwritten by lowering of arguments in case of a tail call.
2352 if (Flags.isByVal()) {
2353 unsigned Bytes = Flags.getByValSize();
2354 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2355 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2356 return DAG.getFrameIndex(FI, getPointerTy());
2358 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2359 VA.getLocMemOffset(), isImmutable);
2360 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2361 return DAG.getLoad(ValVT, dl, Chain, FIN,
2362 MachinePointerInfo::getFixedStack(FI),
2363 false, false, false, 0);
2367 // FIXME: Get this from tablegen.
2368 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2369 const X86Subtarget *Subtarget) {
2370 assert(Subtarget->is64Bit());
2372 if (Subtarget->isCallingConvWin64(CallConv)) {
2373 static const MCPhysReg GPR64ArgRegsWin64[] = {
2374 X86::RCX, X86::RDX, X86::R8, X86::R9
2376 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2379 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2380 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2382 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2385 // FIXME: Get this from tablegen.
2386 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2387 CallingConv::ID CallConv,
2388 const X86Subtarget *Subtarget) {
2389 assert(Subtarget->is64Bit());
2390 if (Subtarget->isCallingConvWin64(CallConv)) {
2391 // The XMM registers which might contain var arg parameters are shadowed
2392 // in their paired GPR. So we only need to save the GPR to their home
2394 // TODO: __vectorcall will change this.
2398 const Function *Fn = MF.getFunction();
2399 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2400 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2401 "SSE register cannot be used when SSE is disabled!");
2402 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2403 !Subtarget->hasSSE1())
2404 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2408 static const MCPhysReg XMMArgRegs64Bit[] = {
2409 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2410 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2412 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2416 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2417 CallingConv::ID CallConv,
2419 const SmallVectorImpl<ISD::InputArg> &Ins,
2422 SmallVectorImpl<SDValue> &InVals)
2424 MachineFunction &MF = DAG.getMachineFunction();
2425 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2427 const Function* Fn = MF.getFunction();
2428 if (Fn->hasExternalLinkage() &&
2429 Subtarget->isTargetCygMing() &&
2430 Fn->getName() == "main")
2431 FuncInfo->setForceFramePointer(true);
2433 MachineFrameInfo *MFI = MF.getFrameInfo();
2434 bool Is64Bit = Subtarget->is64Bit();
2435 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2437 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2438 "Var args not supported with calling convention fastcc, ghc or hipe");
2440 // Assign locations to all of the incoming arguments.
2441 SmallVector<CCValAssign, 16> ArgLocs;
2442 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2444 // Allocate shadow area for Win64
2446 CCInfo.AllocateStack(32, 8);
2448 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2450 unsigned LastVal = ~0U;
2452 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2453 CCValAssign &VA = ArgLocs[i];
2454 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2456 assert(VA.getValNo() != LastVal &&
2457 "Don't support value assigned to multiple locs yet");
2459 LastVal = VA.getValNo();
2461 if (VA.isRegLoc()) {
2462 EVT RegVT = VA.getLocVT();
2463 const TargetRegisterClass *RC;
2464 if (RegVT == MVT::i32)
2465 RC = &X86::GR32RegClass;
2466 else if (Is64Bit && RegVT == MVT::i64)
2467 RC = &X86::GR64RegClass;
2468 else if (RegVT == MVT::f32)
2469 RC = &X86::FR32RegClass;
2470 else if (RegVT == MVT::f64)
2471 RC = &X86::FR64RegClass;
2472 else if (RegVT.is512BitVector())
2473 RC = &X86::VR512RegClass;
2474 else if (RegVT.is256BitVector())
2475 RC = &X86::VR256RegClass;
2476 else if (RegVT.is128BitVector())
2477 RC = &X86::VR128RegClass;
2478 else if (RegVT == MVT::x86mmx)
2479 RC = &X86::VR64RegClass;
2480 else if (RegVT == MVT::i1)
2481 RC = &X86::VK1RegClass;
2482 else if (RegVT == MVT::v8i1)
2483 RC = &X86::VK8RegClass;
2484 else if (RegVT == MVT::v16i1)
2485 RC = &X86::VK16RegClass;
2486 else if (RegVT == MVT::v32i1)
2487 RC = &X86::VK32RegClass;
2488 else if (RegVT == MVT::v64i1)
2489 RC = &X86::VK64RegClass;
2491 llvm_unreachable("Unknown argument type!");
2493 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2494 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2496 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2497 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2499 if (VA.getLocInfo() == CCValAssign::SExt)
2500 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2501 DAG.getValueType(VA.getValVT()));
2502 else if (VA.getLocInfo() == CCValAssign::ZExt)
2503 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2504 DAG.getValueType(VA.getValVT()));
2505 else if (VA.getLocInfo() == CCValAssign::BCvt)
2506 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2508 if (VA.isExtInLoc()) {
2509 // Handle MMX values passed in XMM regs.
2510 if (RegVT.isVector())
2511 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2513 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2516 assert(VA.isMemLoc());
2517 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2520 // If value is passed via pointer - do a load.
2521 if (VA.getLocInfo() == CCValAssign::Indirect)
2522 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2523 MachinePointerInfo(), false, false, false, 0);
2525 InVals.push_back(ArgValue);
2528 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2529 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2530 // The x86-64 ABIs require that for returning structs by value we copy
2531 // the sret argument into %rax/%eax (depending on ABI) for the return.
2532 // Win32 requires us to put the sret argument to %eax as well.
2533 // Save the argument into a virtual register so that we can access it
2534 // from the return points.
2535 if (Ins[i].Flags.isSRet()) {
2536 unsigned Reg = FuncInfo->getSRetReturnReg();
2538 MVT PtrTy = getPointerTy();
2539 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2540 FuncInfo->setSRetReturnReg(Reg);
2542 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2543 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2549 unsigned StackSize = CCInfo.getNextStackOffset();
2550 // Align stack specially for tail calls.
2551 if (FuncIsMadeTailCallSafe(CallConv,
2552 MF.getTarget().Options.GuaranteedTailCallOpt))
2553 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2555 // If the function takes variable number of arguments, make a frame index for
2556 // the start of the first vararg value... for expansion of llvm.va_start. We
2557 // can skip this if there are no va_start calls.
2558 if (MFI->hasVAStart() &&
2559 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2560 CallConv != CallingConv::X86_ThisCall))) {
2561 FuncInfo->setVarArgsFrameIndex(
2562 MFI->CreateFixedObject(1, StackSize, true));
2565 // Figure out if XMM registers are in use.
2566 assert(!(MF.getTarget().Options.UseSoftFloat &&
2567 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2568 "SSE register cannot be used when SSE is disabled!");
2570 // 64-bit calling conventions support varargs and register parameters, so we
2571 // have to do extra work to spill them in the prologue.
2572 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2573 // Find the first unallocated argument registers.
2574 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2575 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2576 unsigned NumIntRegs =
2577 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2578 unsigned NumXMMRegs =
2579 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2580 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2581 "SSE register cannot be used when SSE is disabled!");
2583 // Gather all the live in physical registers.
2584 SmallVector<SDValue, 6> LiveGPRs;
2585 SmallVector<SDValue, 8> LiveXMMRegs;
2587 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2588 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2590 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2592 if (!ArgXMMs.empty()) {
2593 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2594 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2595 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2596 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2597 LiveXMMRegs.push_back(
2598 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2603 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2604 // Get to the caller-allocated home save location. Add 8 to account
2605 // for the return address.
2606 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2607 FuncInfo->setRegSaveFrameIndex(
2608 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2609 // Fixup to set vararg frame on shadow area (4 x i64).
2611 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2613 // For X86-64, if there are vararg parameters that are passed via
2614 // registers, then we must store them to their spots on the stack so
2615 // they may be loaded by deferencing the result of va_next.
2616 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2617 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2618 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2619 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2622 // Store the integer parameter registers.
2623 SmallVector<SDValue, 8> MemOps;
2624 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2626 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2627 for (SDValue Val : LiveGPRs) {
2628 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2629 DAG.getIntPtrConstant(Offset));
2631 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2632 MachinePointerInfo::getFixedStack(
2633 FuncInfo->getRegSaveFrameIndex(), Offset),
2635 MemOps.push_back(Store);
2639 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2640 // Now store the XMM (fp + vector) parameter registers.
2641 SmallVector<SDValue, 12> SaveXMMOps;
2642 SaveXMMOps.push_back(Chain);
2643 SaveXMMOps.push_back(ALVal);
2644 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2645 FuncInfo->getRegSaveFrameIndex()));
2646 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2647 FuncInfo->getVarArgsFPOffset()));
2648 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2650 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2651 MVT::Other, SaveXMMOps));
2654 if (!MemOps.empty())
2655 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2658 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2659 // Find the largest legal vector type.
2660 MVT VecVT = MVT::Other;
2661 // FIXME: Only some x86_32 calling conventions support AVX512.
2662 if (Subtarget->hasAVX512() &&
2663 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2664 CallConv == CallingConv::Intel_OCL_BI)))
2665 VecVT = MVT::v16f32;
2666 else if (Subtarget->hasAVX())
2668 else if (Subtarget->hasSSE2())
2671 // We forward some GPRs and some vector types.
2672 SmallVector<MVT, 2> RegParmTypes;
2673 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2674 RegParmTypes.push_back(IntVT);
2675 if (VecVT != MVT::Other)
2676 RegParmTypes.push_back(VecVT);
2678 // Compute the set of forwarded registers. The rest are scratch.
2679 SmallVectorImpl<ForwardedRegister> &Forwards =
2680 FuncInfo->getForwardedMustTailRegParms();
2681 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2683 // Conservatively forward AL on x86_64, since it might be used for varargs.
2684 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2685 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2686 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2689 // Copy all forwards from physical to virtual registers.
2690 for (ForwardedRegister &F : Forwards) {
2691 // FIXME: Can we use a less constrained schedule?
2692 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2693 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2694 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2698 // Some CCs need callee pop.
2699 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2700 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2701 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2703 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2704 // If this is an sret function, the return should pop the hidden pointer.
2705 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2706 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2707 argsAreStructReturn(Ins) == StackStructReturn)
2708 FuncInfo->setBytesToPopOnReturn(4);
2712 // RegSaveFrameIndex is X86-64 only.
2713 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2714 if (CallConv == CallingConv::X86_FastCall ||
2715 CallConv == CallingConv::X86_ThisCall)
2716 // fastcc functions can't have varargs.
2717 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2720 FuncInfo->setArgumentStackSize(StackSize);
2726 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2727 SDValue StackPtr, SDValue Arg,
2728 SDLoc dl, SelectionDAG &DAG,
2729 const CCValAssign &VA,
2730 ISD::ArgFlagsTy Flags) const {
2731 unsigned LocMemOffset = VA.getLocMemOffset();
2732 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2733 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2734 if (Flags.isByVal())
2735 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2737 return DAG.getStore(Chain, dl, Arg, PtrOff,
2738 MachinePointerInfo::getStack(LocMemOffset),
2742 /// Emit a load of return address if tail call
2743 /// optimization is performed and it is required.
2745 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2746 SDValue &OutRetAddr, SDValue Chain,
2747 bool IsTailCall, bool Is64Bit,
2748 int FPDiff, SDLoc dl) const {
2749 // Adjust the Return address stack slot.
2750 EVT VT = getPointerTy();
2751 OutRetAddr = getReturnAddressFrameIndex(DAG);
2753 // Load the "old" Return address.
2754 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2755 false, false, false, 0);
2756 return SDValue(OutRetAddr.getNode(), 1);
2759 /// Emit a store of the return address if tail call
2760 /// optimization is performed and it is required (FPDiff!=0).
2761 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2762 SDValue Chain, SDValue RetAddrFrIdx,
2763 EVT PtrVT, unsigned SlotSize,
2764 int FPDiff, SDLoc dl) {
2765 // Store the return address to the appropriate stack slot.
2766 if (!FPDiff) return Chain;
2767 // Calculate the new stack slot for the return address.
2768 int NewReturnAddrFI =
2769 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2771 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2772 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2773 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2779 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2780 SmallVectorImpl<SDValue> &InVals) const {
2781 SelectionDAG &DAG = CLI.DAG;
2783 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2784 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2785 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2786 SDValue Chain = CLI.Chain;
2787 SDValue Callee = CLI.Callee;
2788 CallingConv::ID CallConv = CLI.CallConv;
2789 bool &isTailCall = CLI.IsTailCall;
2790 bool isVarArg = CLI.IsVarArg;
2792 MachineFunction &MF = DAG.getMachineFunction();
2793 bool Is64Bit = Subtarget->is64Bit();
2794 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2795 StructReturnType SR = callIsStructReturn(Outs);
2796 bool IsSibcall = false;
2797 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2799 if (MF.getTarget().Options.DisableTailCalls)
2802 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2804 // Force this to be a tail call. The verifier rules are enough to ensure
2805 // that we can lower this successfully without moving the return address
2808 } else if (isTailCall) {
2809 // Check if it's really possible to do a tail call.
2810 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2811 isVarArg, SR != NotStructReturn,
2812 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2813 Outs, OutVals, Ins, DAG);
2815 // Sibcalls are automatically detected tailcalls which do not require
2817 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2824 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2825 "Var args not supported with calling convention fastcc, ghc or hipe");
2827 // Analyze operands of the call, assigning locations to each operand.
2828 SmallVector<CCValAssign, 16> ArgLocs;
2829 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2831 // Allocate shadow area for Win64
2833 CCInfo.AllocateStack(32, 8);
2835 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2837 // Get a count of how many bytes are to be pushed on the stack.
2838 unsigned NumBytes = CCInfo.getNextStackOffset();
2840 // This is a sibcall. The memory operands are available in caller's
2841 // own caller's stack.
2843 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2844 IsTailCallConvention(CallConv))
2845 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2848 if (isTailCall && !IsSibcall && !IsMustTail) {
2849 // Lower arguments at fp - stackoffset + fpdiff.
2850 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2852 FPDiff = NumBytesCallerPushed - NumBytes;
2854 // Set the delta of movement of the returnaddr stackslot.
2855 // But only set if delta is greater than previous delta.
2856 if (FPDiff < X86Info->getTCReturnAddrDelta())
2857 X86Info->setTCReturnAddrDelta(FPDiff);
2860 unsigned NumBytesToPush = NumBytes;
2861 unsigned NumBytesToPop = NumBytes;
2863 // If we have an inalloca argument, all stack space has already been allocated
2864 // for us and be right at the top of the stack. We don't support multiple
2865 // arguments passed in memory when using inalloca.
2866 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2868 if (!ArgLocs.back().isMemLoc())
2869 report_fatal_error("cannot use inalloca attribute on a register "
2871 if (ArgLocs.back().getLocMemOffset() != 0)
2872 report_fatal_error("any parameter with the inalloca attribute must be "
2873 "the only memory argument");
2877 Chain = DAG.getCALLSEQ_START(
2878 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2880 SDValue RetAddrFrIdx;
2881 // Load return address for tail calls.
2882 if (isTailCall && FPDiff)
2883 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2884 Is64Bit, FPDiff, dl);
2886 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2887 SmallVector<SDValue, 8> MemOpChains;
2890 // Walk the register/memloc assignments, inserting copies/loads. In the case
2891 // of tail call optimization arguments are handle later.
2892 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2893 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2894 // Skip inalloca arguments, they have already been written.
2895 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2896 if (Flags.isInAlloca())
2899 CCValAssign &VA = ArgLocs[i];
2900 EVT RegVT = VA.getLocVT();
2901 SDValue Arg = OutVals[i];
2902 bool isByVal = Flags.isByVal();
2904 // Promote the value if needed.
2905 switch (VA.getLocInfo()) {
2906 default: llvm_unreachable("Unknown loc info!");
2907 case CCValAssign::Full: break;
2908 case CCValAssign::SExt:
2909 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2911 case CCValAssign::ZExt:
2912 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2914 case CCValAssign::AExt:
2915 if (RegVT.is128BitVector()) {
2916 // Special case: passing MMX values in XMM registers.
2917 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2918 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2919 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2921 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2923 case CCValAssign::BCvt:
2924 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2926 case CCValAssign::Indirect: {
2927 // Store the argument.
2928 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2929 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2930 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2931 MachinePointerInfo::getFixedStack(FI),
2938 if (VA.isRegLoc()) {
2939 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2940 if (isVarArg && IsWin64) {
2941 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2942 // shadow reg if callee is a varargs function.
2943 unsigned ShadowReg = 0;
2944 switch (VA.getLocReg()) {
2945 case X86::XMM0: ShadowReg = X86::RCX; break;
2946 case X86::XMM1: ShadowReg = X86::RDX; break;
2947 case X86::XMM2: ShadowReg = X86::R8; break;
2948 case X86::XMM3: ShadowReg = X86::R9; break;
2951 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2953 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2954 assert(VA.isMemLoc());
2955 if (!StackPtr.getNode())
2956 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2958 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2959 dl, DAG, VA, Flags));
2963 if (!MemOpChains.empty())
2964 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2966 if (Subtarget->isPICStyleGOT()) {
2967 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2970 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2971 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2973 // If we are tail calling and generating PIC/GOT style code load the
2974 // address of the callee into ECX. The value in ecx is used as target of
2975 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2976 // for tail calls on PIC/GOT architectures. Normally we would just put the
2977 // address of GOT into ebx and then call target@PLT. But for tail calls
2978 // ebx would be restored (since ebx is callee saved) before jumping to the
2981 // Note: The actual moving to ECX is done further down.
2982 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2983 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2984 !G->getGlobal()->hasProtectedVisibility())
2985 Callee = LowerGlobalAddress(Callee, DAG);
2986 else if (isa<ExternalSymbolSDNode>(Callee))
2987 Callee = LowerExternalSymbol(Callee, DAG);
2991 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2992 // From AMD64 ABI document:
2993 // For calls that may call functions that use varargs or stdargs
2994 // (prototype-less calls or calls to functions containing ellipsis (...) in
2995 // the declaration) %al is used as hidden argument to specify the number
2996 // of SSE registers used. The contents of %al do not need to match exactly
2997 // the number of registers, but must be an ubound on the number of SSE
2998 // registers used and is in the range 0 - 8 inclusive.
3000 // Count the number of XMM registers allocated.
3001 static const MCPhysReg XMMArgRegs[] = {
3002 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3003 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3005 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3006 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3007 && "SSE registers cannot be used when SSE is disabled");
3009 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3010 DAG.getConstant(NumXMMRegs, MVT::i8)));
3013 if (isVarArg && IsMustTail) {
3014 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3015 for (const auto &F : Forwards) {
3016 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3017 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3021 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3022 // don't need this because the eligibility check rejects calls that require
3023 // shuffling arguments passed in memory.
3024 if (!IsSibcall && isTailCall) {
3025 // Force all the incoming stack arguments to be loaded from the stack
3026 // before any new outgoing arguments are stored to the stack, because the
3027 // outgoing stack slots may alias the incoming argument stack slots, and
3028 // the alias isn't otherwise explicit. This is slightly more conservative
3029 // than necessary, because it means that each store effectively depends
3030 // on every argument instead of just those arguments it would clobber.
3031 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3033 SmallVector<SDValue, 8> MemOpChains2;
3036 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3037 CCValAssign &VA = ArgLocs[i];
3040 assert(VA.isMemLoc());
3041 SDValue Arg = OutVals[i];
3042 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3043 // Skip inalloca arguments. They don't require any work.
3044 if (Flags.isInAlloca())
3046 // Create frame index.
3047 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3048 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3049 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3050 FIN = DAG.getFrameIndex(FI, getPointerTy());
3052 if (Flags.isByVal()) {
3053 // Copy relative to framepointer.
3054 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3055 if (!StackPtr.getNode())
3056 StackPtr = DAG.getCopyFromReg(Chain, dl,
3057 RegInfo->getStackRegister(),
3059 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3061 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3065 // Store relative to framepointer.
3066 MemOpChains2.push_back(
3067 DAG.getStore(ArgChain, dl, Arg, FIN,
3068 MachinePointerInfo::getFixedStack(FI),
3073 if (!MemOpChains2.empty())
3074 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3076 // Store the return address to the appropriate stack slot.
3077 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3078 getPointerTy(), RegInfo->getSlotSize(),
3082 // Build a sequence of copy-to-reg nodes chained together with token chain
3083 // and flag operands which copy the outgoing args into registers.
3085 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3086 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3087 RegsToPass[i].second, InFlag);
3088 InFlag = Chain.getValue(1);
3091 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3092 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3093 // In the 64-bit large code model, we have to make all calls
3094 // through a register, since the call instruction's 32-bit
3095 // pc-relative offset may not be large enough to hold the whole
3097 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3098 // If the callee is a GlobalAddress node (quite common, every direct call
3099 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3101 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3103 // We should use extra load for direct calls to dllimported functions in
3105 const GlobalValue *GV = G->getGlobal();
3106 if (!GV->hasDLLImportStorageClass()) {
3107 unsigned char OpFlags = 0;
3108 bool ExtraLoad = false;
3109 unsigned WrapperKind = ISD::DELETED_NODE;
3111 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3112 // external symbols most go through the PLT in PIC mode. If the symbol
3113 // has hidden or protected visibility, or if it is static or local, then
3114 // we don't need to use the PLT - we can directly call it.
3115 if (Subtarget->isTargetELF() &&
3116 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3117 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3118 OpFlags = X86II::MO_PLT;
3119 } else if (Subtarget->isPICStyleStubAny() &&
3120 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3121 (!Subtarget->getTargetTriple().isMacOSX() ||
3122 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3123 // PC-relative references to external symbols should go through $stub,
3124 // unless we're building with the leopard linker or later, which
3125 // automatically synthesizes these stubs.
3126 OpFlags = X86II::MO_DARWIN_STUB;
3127 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3128 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3129 // If the function is marked as non-lazy, generate an indirect call
3130 // which loads from the GOT directly. This avoids runtime overhead
3131 // at the cost of eager binding (and one extra byte of encoding).
3132 OpFlags = X86II::MO_GOTPCREL;
3133 WrapperKind = X86ISD::WrapperRIP;
3137 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3138 G->getOffset(), OpFlags);
3140 // Add a wrapper if needed.
3141 if (WrapperKind != ISD::DELETED_NODE)
3142 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3143 // Add extra indirection if needed.
3145 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3146 MachinePointerInfo::getGOT(),
3147 false, false, false, 0);
3149 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3150 unsigned char OpFlags = 0;
3152 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3153 // external symbols should go through the PLT.
3154 if (Subtarget->isTargetELF() &&
3155 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3156 OpFlags = X86II::MO_PLT;
3157 } else if (Subtarget->isPICStyleStubAny() &&
3158 (!Subtarget->getTargetTriple().isMacOSX() ||
3159 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3160 // PC-relative references to external symbols should go through $stub,
3161 // unless we're building with the leopard linker or later, which
3162 // automatically synthesizes these stubs.
3163 OpFlags = X86II::MO_DARWIN_STUB;
3166 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3168 } else if (Subtarget->isTarget64BitILP32() &&
3169 Callee->getValueType(0) == MVT::i32) {
3170 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3171 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3174 // Returns a chain & a flag for retval copy to use.
3175 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3176 SmallVector<SDValue, 8> Ops;
3178 if (!IsSibcall && isTailCall) {
3179 Chain = DAG.getCALLSEQ_END(Chain,
3180 DAG.getIntPtrConstant(NumBytesToPop, true),
3181 DAG.getIntPtrConstant(0, true), InFlag, dl);
3182 InFlag = Chain.getValue(1);
3185 Ops.push_back(Chain);
3186 Ops.push_back(Callee);
3189 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3191 // Add argument registers to the end of the list so that they are known live
3193 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3194 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3195 RegsToPass[i].second.getValueType()));
3197 // Add a register mask operand representing the call-preserved registers.
3198 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3199 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3200 assert(Mask && "Missing call preserved mask for calling convention");
3201 Ops.push_back(DAG.getRegisterMask(Mask));
3203 if (InFlag.getNode())
3204 Ops.push_back(InFlag);
3208 //// If this is the first return lowered for this function, add the regs
3209 //// to the liveout set for the function.
3210 // This isn't right, although it's probably harmless on x86; liveouts
3211 // should be computed from returns not tail calls. Consider a void
3212 // function making a tail call to a function returning int.
3213 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3216 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3217 InFlag = Chain.getValue(1);
3219 // Create the CALLSEQ_END node.
3220 unsigned NumBytesForCalleeToPop;
3221 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3222 DAG.getTarget().Options.GuaranteedTailCallOpt))
3223 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3224 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3225 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3226 SR == StackStructReturn)
3227 // If this is a call to a struct-return function, the callee
3228 // pops the hidden struct pointer, so we have to push it back.
3229 // This is common for Darwin/X86, Linux & Mingw32 targets.
3230 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3231 NumBytesForCalleeToPop = 4;
3233 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3235 // Returns a flag for retval copy to use.
3237 Chain = DAG.getCALLSEQ_END(Chain,
3238 DAG.getIntPtrConstant(NumBytesToPop, true),
3239 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3242 InFlag = Chain.getValue(1);
3245 // Handle result values, copying them out of physregs into vregs that we
3247 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3248 Ins, dl, DAG, InVals);
3251 //===----------------------------------------------------------------------===//
3252 // Fast Calling Convention (tail call) implementation
3253 //===----------------------------------------------------------------------===//
3255 // Like std call, callee cleans arguments, convention except that ECX is
3256 // reserved for storing the tail called function address. Only 2 registers are
3257 // free for argument passing (inreg). Tail call optimization is performed
3259 // * tailcallopt is enabled
3260 // * caller/callee are fastcc
3261 // On X86_64 architecture with GOT-style position independent code only local
3262 // (within module) calls are supported at the moment.
3263 // To keep the stack aligned according to platform abi the function
3264 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3265 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3266 // If a tail called function callee has more arguments than the caller the
3267 // caller needs to make sure that there is room to move the RETADDR to. This is
3268 // achieved by reserving an area the size of the argument delta right after the
3269 // original RETADDR, but before the saved framepointer or the spilled registers
3270 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3282 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3283 /// for a 16 byte align requirement.
3285 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3286 SelectionDAG& DAG) const {
3287 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3288 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3289 unsigned StackAlignment = TFI.getStackAlignment();
3290 uint64_t AlignMask = StackAlignment - 1;
3291 int64_t Offset = StackSize;
3292 unsigned SlotSize = RegInfo->getSlotSize();
3293 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3294 // Number smaller than 12 so just add the difference.
3295 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3297 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3298 Offset = ((~AlignMask) & Offset) + StackAlignment +
3299 (StackAlignment-SlotSize);
3304 /// MatchingStackOffset - Return true if the given stack call argument is
3305 /// already available in the same position (relatively) of the caller's
3306 /// incoming argument stack.
3308 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3309 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3310 const X86InstrInfo *TII) {
3311 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3313 if (Arg.getOpcode() == ISD::CopyFromReg) {
3314 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3315 if (!TargetRegisterInfo::isVirtualRegister(VR))
3317 MachineInstr *Def = MRI->getVRegDef(VR);
3320 if (!Flags.isByVal()) {
3321 if (!TII->isLoadFromStackSlot(Def, FI))
3324 unsigned Opcode = Def->getOpcode();
3325 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3326 Opcode == X86::LEA64_32r) &&
3327 Def->getOperand(1).isFI()) {
3328 FI = Def->getOperand(1).getIndex();
3329 Bytes = Flags.getByValSize();
3333 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3334 if (Flags.isByVal())
3335 // ByVal argument is passed in as a pointer but it's now being
3336 // dereferenced. e.g.
3337 // define @foo(%struct.X* %A) {
3338 // tail call @bar(%struct.X* byval %A)
3341 SDValue Ptr = Ld->getBasePtr();
3342 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3345 FI = FINode->getIndex();
3346 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3347 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3348 FI = FINode->getIndex();
3349 Bytes = Flags.getByValSize();
3353 assert(FI != INT_MAX);
3354 if (!MFI->isFixedObjectIndex(FI))
3356 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3359 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3360 /// for tail call optimization. Targets which want to do tail call
3361 /// optimization should implement this function.
3363 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3364 CallingConv::ID CalleeCC,
3366 bool isCalleeStructRet,
3367 bool isCallerStructRet,
3369 const SmallVectorImpl<ISD::OutputArg> &Outs,
3370 const SmallVectorImpl<SDValue> &OutVals,
3371 const SmallVectorImpl<ISD::InputArg> &Ins,
3372 SelectionDAG &DAG) const {
3373 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3376 // If -tailcallopt is specified, make fastcc functions tail-callable.
3377 const MachineFunction &MF = DAG.getMachineFunction();
3378 const Function *CallerF = MF.getFunction();
3380 // If the function return type is x86_fp80 and the callee return type is not,
3381 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3382 // perform a tailcall optimization here.
3383 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3386 CallingConv::ID CallerCC = CallerF->getCallingConv();
3387 bool CCMatch = CallerCC == CalleeCC;
3388 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3389 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3391 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3392 if (IsTailCallConvention(CalleeCC) && CCMatch)
3397 // Look for obvious safe cases to perform tail call optimization that do not
3398 // require ABI changes. This is what gcc calls sibcall.
3400 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3401 // emit a special epilogue.
3402 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3403 if (RegInfo->needsStackRealignment(MF))
3406 // Also avoid sibcall optimization if either caller or callee uses struct
3407 // return semantics.
3408 if (isCalleeStructRet || isCallerStructRet)
3411 // An stdcall/thiscall caller is expected to clean up its arguments; the
3412 // callee isn't going to do that.
3413 // FIXME: this is more restrictive than needed. We could produce a tailcall
3414 // when the stack adjustment matches. For example, with a thiscall that takes
3415 // only one argument.
3416 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3417 CallerCC == CallingConv::X86_ThisCall))
3420 // Do not sibcall optimize vararg calls unless all arguments are passed via
3422 if (isVarArg && !Outs.empty()) {
3424 // Optimizing for varargs on Win64 is unlikely to be safe without
3425 // additional testing.
3426 if (IsCalleeWin64 || IsCallerWin64)
3429 SmallVector<CCValAssign, 16> ArgLocs;
3430 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3433 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3434 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3435 if (!ArgLocs[i].isRegLoc())
3439 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3440 // stack. Therefore, if it's not used by the call it is not safe to optimize
3441 // this into a sibcall.
3442 bool Unused = false;
3443 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3450 SmallVector<CCValAssign, 16> RVLocs;
3451 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3453 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3454 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3455 CCValAssign &VA = RVLocs[i];
3456 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3461 // If the calling conventions do not match, then we'd better make sure the
3462 // results are returned in the same way as what the caller expects.
3464 SmallVector<CCValAssign, 16> RVLocs1;
3465 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3467 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3469 SmallVector<CCValAssign, 16> RVLocs2;
3470 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3472 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3474 if (RVLocs1.size() != RVLocs2.size())
3476 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3477 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3479 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3481 if (RVLocs1[i].isRegLoc()) {
3482 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3485 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3491 // If the callee takes no arguments then go on to check the results of the
3493 if (!Outs.empty()) {
3494 // Check if stack adjustment is needed. For now, do not do this if any
3495 // argument is passed on the stack.
3496 SmallVector<CCValAssign, 16> ArgLocs;
3497 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3500 // Allocate shadow area for Win64
3502 CCInfo.AllocateStack(32, 8);
3504 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3505 if (CCInfo.getNextStackOffset()) {
3506 MachineFunction &MF = DAG.getMachineFunction();
3507 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3510 // Check if the arguments are already laid out in the right way as
3511 // the caller's fixed stack objects.
3512 MachineFrameInfo *MFI = MF.getFrameInfo();
3513 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3514 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3515 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3516 CCValAssign &VA = ArgLocs[i];
3517 SDValue Arg = OutVals[i];
3518 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3519 if (VA.getLocInfo() == CCValAssign::Indirect)
3521 if (!VA.isRegLoc()) {
3522 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3529 // If the tailcall address may be in a register, then make sure it's
3530 // possible to register allocate for it. In 32-bit, the call address can
3531 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3532 // callee-saved registers are restored. These happen to be the same
3533 // registers used to pass 'inreg' arguments so watch out for those.
3534 if (!Subtarget->is64Bit() &&
3535 ((!isa<GlobalAddressSDNode>(Callee) &&
3536 !isa<ExternalSymbolSDNode>(Callee)) ||
3537 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3538 unsigned NumInRegs = 0;
3539 // In PIC we need an extra register to formulate the address computation
3541 unsigned MaxInRegs =
3542 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3544 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3545 CCValAssign &VA = ArgLocs[i];
3548 unsigned Reg = VA.getLocReg();
3551 case X86::EAX: case X86::EDX: case X86::ECX:
3552 if (++NumInRegs == MaxInRegs)
3564 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3565 const TargetLibraryInfo *libInfo) const {
3566 return X86::createFastISel(funcInfo, libInfo);
3569 //===----------------------------------------------------------------------===//
3570 // Other Lowering Hooks
3571 //===----------------------------------------------------------------------===//
3573 static bool MayFoldLoad(SDValue Op) {
3574 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3577 static bool MayFoldIntoStore(SDValue Op) {
3578 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3581 static bool isTargetShuffle(unsigned Opcode) {
3583 default: return false;
3584 case X86ISD::BLENDI:
3585 case X86ISD::PSHUFB:
3586 case X86ISD::PSHUFD:
3587 case X86ISD::PSHUFHW:
3588 case X86ISD::PSHUFLW:
3590 case X86ISD::PALIGNR:
3591 case X86ISD::MOVLHPS:
3592 case X86ISD::MOVLHPD:
3593 case X86ISD::MOVHLPS:
3594 case X86ISD::MOVLPS:
3595 case X86ISD::MOVLPD:
3596 case X86ISD::MOVSHDUP:
3597 case X86ISD::MOVSLDUP:
3598 case X86ISD::MOVDDUP:
3601 case X86ISD::UNPCKL:
3602 case X86ISD::UNPCKH:
3603 case X86ISD::VPERMILPI:
3604 case X86ISD::VPERM2X128:
3605 case X86ISD::VPERMI:
3610 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3611 SDValue V1, unsigned TargetMask,
3612 SelectionDAG &DAG) {
3614 default: llvm_unreachable("Unknown x86 shuffle node");
3615 case X86ISD::PSHUFD:
3616 case X86ISD::PSHUFHW:
3617 case X86ISD::PSHUFLW:
3618 case X86ISD::VPERMILPI:
3619 case X86ISD::VPERMI:
3620 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3624 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3625 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3627 default: llvm_unreachable("Unknown x86 shuffle node");
3628 case X86ISD::MOVLHPS:
3629 case X86ISD::MOVLHPD:
3630 case X86ISD::MOVHLPS:
3631 case X86ISD::MOVLPS:
3632 case X86ISD::MOVLPD:
3635 case X86ISD::UNPCKL:
3636 case X86ISD::UNPCKH:
3637 return DAG.getNode(Opc, dl, VT, V1, V2);
3641 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3642 MachineFunction &MF = DAG.getMachineFunction();
3643 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3644 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3645 int ReturnAddrIndex = FuncInfo->getRAIndex();
3647 if (ReturnAddrIndex == 0) {
3648 // Set up a frame object for the return address.
3649 unsigned SlotSize = RegInfo->getSlotSize();
3650 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3653 FuncInfo->setRAIndex(ReturnAddrIndex);
3656 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3659 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3660 bool hasSymbolicDisplacement) {
3661 // Offset should fit into 32 bit immediate field.
3662 if (!isInt<32>(Offset))
3665 // If we don't have a symbolic displacement - we don't have any extra
3667 if (!hasSymbolicDisplacement)
3670 // FIXME: Some tweaks might be needed for medium code model.
3671 if (M != CodeModel::Small && M != CodeModel::Kernel)
3674 // For small code model we assume that latest object is 16MB before end of 31
3675 // bits boundary. We may also accept pretty large negative constants knowing
3676 // that all objects are in the positive half of address space.
3677 if (M == CodeModel::Small && Offset < 16*1024*1024)
3680 // For kernel code model we know that all object resist in the negative half
3681 // of 32bits address space. We may not accept negative offsets, since they may
3682 // be just off and we may accept pretty large positive ones.
3683 if (M == CodeModel::Kernel && Offset >= 0)
3689 /// isCalleePop - Determines whether the callee is required to pop its
3690 /// own arguments. Callee pop is necessary to support tail calls.
3691 bool X86::isCalleePop(CallingConv::ID CallingConv,
3692 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3693 switch (CallingConv) {
3696 case CallingConv::X86_StdCall:
3697 case CallingConv::X86_FastCall:
3698 case CallingConv::X86_ThisCall:
3700 case CallingConv::Fast:
3701 case CallingConv::GHC:
3702 case CallingConv::HiPE:
3709 /// \brief Return true if the condition is an unsigned comparison operation.
3710 static bool isX86CCUnsigned(unsigned X86CC) {
3712 default: llvm_unreachable("Invalid integer condition!");
3713 case X86::COND_E: return true;
3714 case X86::COND_G: return false;
3715 case X86::COND_GE: return false;
3716 case X86::COND_L: return false;
3717 case X86::COND_LE: return false;
3718 case X86::COND_NE: return true;
3719 case X86::COND_B: return true;
3720 case X86::COND_A: return true;
3721 case X86::COND_BE: return true;
3722 case X86::COND_AE: return true;
3724 llvm_unreachable("covered switch fell through?!");
3727 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3728 /// specific condition code, returning the condition code and the LHS/RHS of the
3729 /// comparison to make.
3730 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3731 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3733 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3734 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3735 // X > -1 -> X == 0, jump !sign.
3736 RHS = DAG.getConstant(0, RHS.getValueType());
3737 return X86::COND_NS;
3739 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3740 // X < 0 -> X == 0, jump on sign.
3743 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3745 RHS = DAG.getConstant(0, RHS.getValueType());
3746 return X86::COND_LE;
3750 switch (SetCCOpcode) {
3751 default: llvm_unreachable("Invalid integer condition!");
3752 case ISD::SETEQ: return X86::COND_E;
3753 case ISD::SETGT: return X86::COND_G;
3754 case ISD::SETGE: return X86::COND_GE;
3755 case ISD::SETLT: return X86::COND_L;
3756 case ISD::SETLE: return X86::COND_LE;
3757 case ISD::SETNE: return X86::COND_NE;
3758 case ISD::SETULT: return X86::COND_B;
3759 case ISD::SETUGT: return X86::COND_A;
3760 case ISD::SETULE: return X86::COND_BE;
3761 case ISD::SETUGE: return X86::COND_AE;
3765 // First determine if it is required or is profitable to flip the operands.
3767 // If LHS is a foldable load, but RHS is not, flip the condition.
3768 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3769 !ISD::isNON_EXTLoad(RHS.getNode())) {
3770 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3771 std::swap(LHS, RHS);
3774 switch (SetCCOpcode) {
3780 std::swap(LHS, RHS);
3784 // On a floating point condition, the flags are set as follows:
3786 // 0 | 0 | 0 | X > Y
3787 // 0 | 0 | 1 | X < Y
3788 // 1 | 0 | 0 | X == Y
3789 // 1 | 1 | 1 | unordered
3790 switch (SetCCOpcode) {
3791 default: llvm_unreachable("Condcode should be pre-legalized away");
3793 case ISD::SETEQ: return X86::COND_E;
3794 case ISD::SETOLT: // flipped
3796 case ISD::SETGT: return X86::COND_A;
3797 case ISD::SETOLE: // flipped
3799 case ISD::SETGE: return X86::COND_AE;
3800 case ISD::SETUGT: // flipped
3802 case ISD::SETLT: return X86::COND_B;
3803 case ISD::SETUGE: // flipped
3805 case ISD::SETLE: return X86::COND_BE;
3807 case ISD::SETNE: return X86::COND_NE;
3808 case ISD::SETUO: return X86::COND_P;
3809 case ISD::SETO: return X86::COND_NP;
3811 case ISD::SETUNE: return X86::COND_INVALID;
3815 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3816 /// code. Current x86 isa includes the following FP cmov instructions:
3817 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3818 static bool hasFPCMov(unsigned X86CC) {
3834 /// isFPImmLegal - Returns true if the target can instruction select the
3835 /// specified FP immediate natively. If false, the legalizer will
3836 /// materialize the FP immediate as a load from a constant pool.
3837 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3838 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3839 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3845 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3846 ISD::LoadExtType ExtTy,
3848 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3849 // relocation target a movq or addq instruction: don't let the load shrink.
3850 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3851 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3852 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3853 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3857 /// \brief Returns true if it is beneficial to convert a load of a constant
3858 /// to just the constant itself.
3859 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3861 assert(Ty->isIntegerTy());
3863 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3864 if (BitSize == 0 || BitSize > 64)
3869 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3870 unsigned Index) const {
3871 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3874 return (Index == 0 || Index == ResVT.getVectorNumElements());
3877 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3878 // Speculate cttz only if we can directly use TZCNT.
3879 return Subtarget->hasBMI();
3882 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3883 // Speculate ctlz only if we can directly use LZCNT.
3884 return Subtarget->hasLZCNT();
3887 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3888 /// the specified range (L, H].
3889 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3890 return (Val < 0) || (Val >= Low && Val < Hi);
3893 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3894 /// specified value.
3895 static bool isUndefOrEqual(int Val, int CmpVal) {
3896 return (Val < 0 || Val == CmpVal);
3899 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3900 /// from position Pos and ending in Pos+Size, falls within the specified
3901 /// sequential range (Low, Low+Size]. or is undef.
3902 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3903 unsigned Pos, unsigned Size, int Low) {
3904 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3905 if (!isUndefOrEqual(Mask[i], Low))
3910 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
3911 /// the two vector operands have swapped position.
3912 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
3913 unsigned NumElems) {
3914 for (unsigned i = 0; i != NumElems; ++i) {
3918 else if (idx < (int)NumElems)
3919 Mask[i] = idx + NumElems;
3921 Mask[i] = idx - NumElems;
3925 /// isVEXTRACTIndex - Return true if the specified
3926 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
3927 /// suitable for instruction that extract 128 or 256 bit vectors
3928 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
3929 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
3930 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
3933 // The index should be aligned on a vecWidth-bit boundary.
3935 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
3937 MVT VT = N->getSimpleValueType(0);
3938 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
3939 bool Result = (Index * ElSize) % vecWidth == 0;
3944 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
3945 /// operand specifies a subvector insert that is suitable for input to
3946 /// insertion of 128 or 256-bit subvectors
3947 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
3948 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
3949 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
3951 // The index should be aligned on a vecWidth-bit boundary.
3953 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
3955 MVT VT = N->getSimpleValueType(0);
3956 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
3957 bool Result = (Index * ElSize) % vecWidth == 0;
3962 bool X86::isVINSERT128Index(SDNode *N) {
3963 return isVINSERTIndex(N, 128);
3966 bool X86::isVINSERT256Index(SDNode *N) {
3967 return isVINSERTIndex(N, 256);
3970 bool X86::isVEXTRACT128Index(SDNode *N) {
3971 return isVEXTRACTIndex(N, 128);
3974 bool X86::isVEXTRACT256Index(SDNode *N) {
3975 return isVEXTRACTIndex(N, 256);
3978 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
3979 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
3980 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
3981 llvm_unreachable("Illegal extract subvector for VEXTRACT");
3984 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
3986 MVT VecVT = N->getOperand(0).getSimpleValueType();
3987 MVT ElVT = VecVT.getVectorElementType();
3989 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
3990 return Index / NumElemsPerChunk;
3993 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
3994 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
3995 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
3996 llvm_unreachable("Illegal insert subvector for VINSERT");
3999 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4001 MVT VecVT = N->getSimpleValueType(0);
4002 MVT ElVT = VecVT.getVectorElementType();
4004 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4005 return Index / NumElemsPerChunk;
4008 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4009 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4010 /// and VINSERTI128 instructions.
4011 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4012 return getExtractVEXTRACTImmediate(N, 128);
4015 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4016 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4017 /// and VINSERTI64x4 instructions.
4018 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4019 return getExtractVEXTRACTImmediate(N, 256);
4022 /// getInsertVINSERT128Immediate - Return the appropriate immediate
4023 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
4024 /// and VINSERTI128 instructions.
4025 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
4026 return getInsertVINSERTImmediate(N, 128);
4029 /// getInsertVINSERT256Immediate - Return the appropriate immediate
4030 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
4031 /// and VINSERTI64x4 instructions.
4032 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
4033 return getInsertVINSERTImmediate(N, 256);
4036 /// isZero - Returns true if Elt is a constant integer zero
4037 static bool isZero(SDValue V) {
4038 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
4039 return C && C->isNullValue();
4042 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
4044 bool X86::isZeroNode(SDValue Elt) {
4047 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
4048 return CFP->getValueAPF().isPosZero();
4052 /// getZeroVector - Returns a vector of specified type with all zero elements.
4054 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
4055 SelectionDAG &DAG, SDLoc dl) {
4056 assert(VT.isVector() && "Expected a vector type");
4058 // Always build SSE zero vectors as <4 x i32> bitcasted
4059 // to their dest type. This ensures they get CSE'd.
4061 if (VT.is128BitVector()) { // SSE
4062 if (Subtarget->hasSSE2()) { // SSE2
4063 SDValue Cst = DAG.getConstant(0, MVT::i32);
4064 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
4066 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
4067 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
4069 } else if (VT.is256BitVector()) { // AVX
4070 if (Subtarget->hasInt256()) { // AVX2
4071 SDValue Cst = DAG.getConstant(0, MVT::i32);
4072 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4073 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
4075 // 256-bit logic and arithmetic instructions in AVX are all
4076 // floating-point, no support for integer ops. Emit fp zeroed vectors.
4077 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
4078 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4079 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
4081 } else if (VT.is512BitVector()) { // AVX-512
4082 SDValue Cst = DAG.getConstant(0, MVT::i32);
4083 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
4084 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4085 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
4086 } else if (VT.getScalarType() == MVT::i1) {
4087 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
4088 SDValue Cst = DAG.getConstant(0, MVT::i1);
4089 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
4090 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
4092 llvm_unreachable("Unexpected vector type");
4094 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
4097 /// getOnesVector - Returns a vector of specified type with all bits set.
4098 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
4099 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
4100 /// Then bitcast to their original type, ensuring they get CSE'd.
4101 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
4103 assert(VT.isVector() && "Expected a vector type");
4105 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
4107 if (VT.is256BitVector()) {
4108 if (HasInt256) { // AVX2
4109 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4110 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
4112 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
4113 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
4115 } else if (VT.is128BitVector()) {
4116 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
4118 llvm_unreachable("Unexpected vector type");
4120 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
4123 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
4124 /// operation of specified width.
4125 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
4127 unsigned NumElems = VT.getVectorNumElements();
4128 SmallVector<int, 8> Mask;
4129 Mask.push_back(NumElems);
4130 for (unsigned i = 1; i != NumElems; ++i)
4132 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
4135 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
4136 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
4138 unsigned NumElems = VT.getVectorNumElements();
4139 SmallVector<int, 8> Mask;
4140 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
4142 Mask.push_back(i + NumElems);
4144 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
4147 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
4148 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
4150 unsigned NumElems = VT.getVectorNumElements();
4151 SmallVector<int, 8> Mask;
4152 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
4153 Mask.push_back(i + Half);
4154 Mask.push_back(i + NumElems + Half);
4156 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
4159 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
4160 /// vector of zero or undef vector. This produces a shuffle where the low
4161 /// element of V2 is swizzled into the zero/undef vector, landing at element
4162 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
4163 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
4165 const X86Subtarget *Subtarget,
4166 SelectionDAG &DAG) {
4167 MVT VT = V2.getSimpleValueType();
4169 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
4170 unsigned NumElems = VT.getVectorNumElements();
4171 SmallVector<int, 16> MaskVec;
4172 for (unsigned i = 0; i != NumElems; ++i)
4173 // If this is the insertion idx, put the low elt of V2 here.
4174 MaskVec.push_back(i == Idx ? NumElems : i);
4175 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
4178 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
4179 /// target specific opcode. Returns true if the Mask could be calculated. Sets
4180 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
4181 /// shuffles which use a single input multiple times, and in those cases it will
4182 /// adjust the mask to only have indices within that single input.
4183 static bool getTargetShuffleMask(SDNode *N, MVT VT,
4184 SmallVectorImpl<int> &Mask, bool &IsUnary) {
4185 unsigned NumElems = VT.getVectorNumElements();
4189 bool IsFakeUnary = false;
4190 switch(N->getOpcode()) {
4191 case X86ISD::BLENDI:
4192 ImmN = N->getOperand(N->getNumOperands()-1);
4193 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4196 ImmN = N->getOperand(N->getNumOperands()-1);
4197 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4198 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4200 case X86ISD::UNPCKH:
4201 DecodeUNPCKHMask(VT, Mask);
4202 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4204 case X86ISD::UNPCKL:
4205 DecodeUNPCKLMask(VT, Mask);
4206 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4208 case X86ISD::MOVHLPS:
4209 DecodeMOVHLPSMask(NumElems, Mask);
4210 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4212 case X86ISD::MOVLHPS:
4213 DecodeMOVLHPSMask(NumElems, Mask);
4214 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4216 case X86ISD::PALIGNR:
4217 ImmN = N->getOperand(N->getNumOperands()-1);
4218 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4220 case X86ISD::PSHUFD:
4221 case X86ISD::VPERMILPI:
4222 ImmN = N->getOperand(N->getNumOperands()-1);
4223 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4226 case X86ISD::PSHUFHW:
4227 ImmN = N->getOperand(N->getNumOperands()-1);
4228 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4231 case X86ISD::PSHUFLW:
4232 ImmN = N->getOperand(N->getNumOperands()-1);
4233 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4236 case X86ISD::PSHUFB: {
4238 SDValue MaskNode = N->getOperand(1);
4239 while (MaskNode->getOpcode() == ISD::BITCAST)
4240 MaskNode = MaskNode->getOperand(0);
4242 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
4243 // If we have a build-vector, then things are easy.
4244 EVT VT = MaskNode.getValueType();
4245 assert(VT.isVector() &&
4246 "Can't produce a non-vector with a build_vector!");
4247 if (!VT.isInteger())
4250 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
4252 SmallVector<uint64_t, 32> RawMask;
4253 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
4254 SDValue Op = MaskNode->getOperand(i);
4255 if (Op->getOpcode() == ISD::UNDEF) {
4256 RawMask.push_back((uint64_t)SM_SentinelUndef);
4259 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
4262 APInt MaskElement = CN->getAPIntValue();
4264 // We now have to decode the element which could be any integer size and
4265 // extract each byte of it.
4266 for (int j = 0; j < NumBytesPerElement; ++j) {
4267 // Note that this is x86 and so always little endian: the low byte is
4268 // the first byte of the mask.
4269 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
4270 MaskElement = MaskElement.lshr(8);
4273 DecodePSHUFBMask(RawMask, Mask);
4277 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
4281 SDValue Ptr = MaskLoad->getBasePtr();
4282 if (Ptr->getOpcode() == X86ISD::Wrapper)
4283 Ptr = Ptr->getOperand(0);
4285 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
4286 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
4289 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
4290 DecodePSHUFBMask(C, Mask);
4298 case X86ISD::VPERMI:
4299 ImmN = N->getOperand(N->getNumOperands()-1);
4300 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4305 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
4307 case X86ISD::VPERM2X128:
4308 ImmN = N->getOperand(N->getNumOperands()-1);
4309 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4310 if (Mask.empty()) return false;
4312 case X86ISD::MOVSLDUP:
4313 DecodeMOVSLDUPMask(VT, Mask);
4316 case X86ISD::MOVSHDUP:
4317 DecodeMOVSHDUPMask(VT, Mask);
4320 case X86ISD::MOVDDUP:
4321 DecodeMOVDDUPMask(VT, Mask);
4324 case X86ISD::MOVLHPD:
4325 case X86ISD::MOVLPD:
4326 case X86ISD::MOVLPS:
4327 // Not yet implemented
4329 default: llvm_unreachable("unknown target shuffle node");
4332 // If we have a fake unary shuffle, the shuffle mask is spread across two
4333 // inputs that are actually the same node. Re-map the mask to always point
4334 // into the first input.
4337 if (M >= (int)Mask.size())
4343 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
4344 /// element of the result of the vector shuffle.
4345 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
4348 return SDValue(); // Limit search depth.
4350 SDValue V = SDValue(N, 0);
4351 EVT VT = V.getValueType();
4352 unsigned Opcode = V.getOpcode();
4354 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
4355 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
4356 int Elt = SV->getMaskElt(Index);
4359 return DAG.getUNDEF(VT.getVectorElementType());
4361 unsigned NumElems = VT.getVectorNumElements();
4362 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
4363 : SV->getOperand(1);
4364 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
4367 // Recurse into target specific vector shuffles to find scalars.
4368 if (isTargetShuffle(Opcode)) {
4369 MVT ShufVT = V.getSimpleValueType();
4370 unsigned NumElems = ShufVT.getVectorNumElements();
4371 SmallVector<int, 16> ShuffleMask;
4374 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
4377 int Elt = ShuffleMask[Index];
4379 return DAG.getUNDEF(ShufVT.getVectorElementType());
4381 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
4383 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
4387 // Actual nodes that may contain scalar elements
4388 if (Opcode == ISD::BITCAST) {
4389 V = V.getOperand(0);
4390 EVT SrcVT = V.getValueType();
4391 unsigned NumElems = VT.getVectorNumElements();
4393 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
4397 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
4398 return (Index == 0) ? V.getOperand(0)
4399 : DAG.getUNDEF(VT.getVectorElementType());
4401 if (V.getOpcode() == ISD::BUILD_VECTOR)
4402 return V.getOperand(Index);
4407 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
4409 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
4410 unsigned NumNonZero, unsigned NumZero,
4412 const X86Subtarget* Subtarget,
4413 const TargetLowering &TLI) {
4420 for (unsigned i = 0; i < 16; ++i) {
4421 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
4422 if (ThisIsNonZero && First) {
4424 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
4426 V = DAG.getUNDEF(MVT::v8i16);
4431 SDValue ThisElt, LastElt;
4432 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
4433 if (LastIsNonZero) {
4434 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
4435 MVT::i16, Op.getOperand(i-1));
4437 if (ThisIsNonZero) {
4438 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
4439 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
4440 ThisElt, DAG.getConstant(8, MVT::i8));
4442 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
4446 if (ThisElt.getNode())
4447 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
4448 DAG.getIntPtrConstant(i/2));
4452 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
4455 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
4457 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
4458 unsigned NumNonZero, unsigned NumZero,
4460 const X86Subtarget* Subtarget,
4461 const TargetLowering &TLI) {
4468 for (unsigned i = 0; i < 8; ++i) {
4469 bool isNonZero = (NonZeros & (1 << i)) != 0;
4473 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
4475 V = DAG.getUNDEF(MVT::v8i16);
4478 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
4479 MVT::v8i16, V, Op.getOperand(i),
4480 DAG.getIntPtrConstant(i));
4487 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
4488 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
4489 const X86Subtarget *Subtarget,
4490 const TargetLowering &TLI) {
4491 // Find all zeroable elements.
4492 std::bitset<4> Zeroable;
4493 for (int i=0; i < 4; ++i) {
4494 SDValue Elt = Op->getOperand(i);
4495 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
4497 assert(Zeroable.size() - Zeroable.count() > 1 &&
4498 "We expect at least two non-zero elements!");
4500 // We only know how to deal with build_vector nodes where elements are either
4501 // zeroable or extract_vector_elt with constant index.
4502 SDValue FirstNonZero;
4503 unsigned FirstNonZeroIdx;
4504 for (unsigned i=0; i < 4; ++i) {
4507 SDValue Elt = Op->getOperand(i);
4508 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4509 !isa<ConstantSDNode>(Elt.getOperand(1)))
4511 // Make sure that this node is extracting from a 128-bit vector.
4512 MVT VT = Elt.getOperand(0).getSimpleValueType();
4513 if (!VT.is128BitVector())
4515 if (!FirstNonZero.getNode()) {
4517 FirstNonZeroIdx = i;
4521 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
4522 SDValue V1 = FirstNonZero.getOperand(0);
4523 MVT VT = V1.getSimpleValueType();
4525 // See if this build_vector can be lowered as a blend with zero.
4527 unsigned EltMaskIdx, EltIdx;
4529 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
4530 if (Zeroable[EltIdx]) {
4531 // The zero vector will be on the right hand side.
4532 Mask[EltIdx] = EltIdx+4;
4536 Elt = Op->getOperand(EltIdx);
4537 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
4538 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
4539 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
4541 Mask[EltIdx] = EltIdx;
4545 // Let the shuffle legalizer deal with blend operations.
4546 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
4547 if (V1.getSimpleValueType() != VT)
4548 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
4549 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
4552 // See if we can lower this build_vector to a INSERTPS.
4553 if (!Subtarget->hasSSE41())
4556 SDValue V2 = Elt.getOperand(0);
4557 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
4560 bool CanFold = true;
4561 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
4565 SDValue Current = Op->getOperand(i);
4566 SDValue SrcVector = Current->getOperand(0);
4569 CanFold = SrcVector == V1 &&
4570 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
4576 assert(V1.getNode() && "Expected at least two non-zero elements!");
4577 if (V1.getSimpleValueType() != MVT::v4f32)
4578 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
4579 if (V2.getSimpleValueType() != MVT::v4f32)
4580 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
4582 // Ok, we can emit an INSERTPS instruction.
4583 unsigned ZMask = Zeroable.to_ulong();
4585 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
4586 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
4587 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
4588 DAG.getIntPtrConstant(InsertPSMask));
4589 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
4592 /// Return a vector logical shift node.
4593 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
4594 unsigned NumBits, SelectionDAG &DAG,
4595 const TargetLowering &TLI, SDLoc dl) {
4596 assert(VT.is128BitVector() && "Unknown type for VShift");
4597 MVT ShVT = MVT::v2i64;
4598 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
4599 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
4600 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
4601 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
4602 SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
4603 return DAG.getNode(ISD::BITCAST, dl, VT,
4604 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
4608 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
4610 // Check if the scalar load can be widened into a vector load. And if
4611 // the address is "base + cst" see if the cst can be "absorbed" into
4612 // the shuffle mask.
4613 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
4614 SDValue Ptr = LD->getBasePtr();
4615 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
4617 EVT PVT = LD->getValueType(0);
4618 if (PVT != MVT::i32 && PVT != MVT::f32)
4623 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
4624 FI = FINode->getIndex();
4626 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
4627 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
4628 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4629 Offset = Ptr.getConstantOperandVal(1);
4630 Ptr = Ptr.getOperand(0);
4635 // FIXME: 256-bit vector instructions don't require a strict alignment,
4636 // improve this code to support it better.
4637 unsigned RequiredAlign = VT.getSizeInBits()/8;
4638 SDValue Chain = LD->getChain();
4639 // Make sure the stack object alignment is at least 16 or 32.
4640 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
4641 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
4642 if (MFI->isFixedObjectIndex(FI)) {
4643 // Can't change the alignment. FIXME: It's possible to compute
4644 // the exact stack offset and reference FI + adjust offset instead.
4645 // If someone *really* cares about this. That's the way to implement it.
4648 MFI->setObjectAlignment(FI, RequiredAlign);
4652 // (Offset % 16 or 32) must be multiple of 4. Then address is then
4653 // Ptr + (Offset & ~15).
4656 if ((Offset % RequiredAlign) & 3)
4658 int64_t StartOffset = Offset & ~(RequiredAlign-1);
4660 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
4661 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
4663 int EltNo = (Offset - StartOffset) >> 2;
4664 unsigned NumElems = VT.getVectorNumElements();
4666 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
4667 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
4668 LD->getPointerInfo().getWithOffset(StartOffset),
4669 false, false, false, 0);
4671 SmallVector<int, 8> Mask(NumElems, EltNo);
4673 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
4679 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
4680 /// elements can be replaced by a single large load which has the same value as
4681 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
4683 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
4685 /// FIXME: we'd also like to handle the case where the last elements are zero
4686 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
4687 /// There's even a handy isZeroNode for that purpose.
4688 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
4689 SDLoc &DL, SelectionDAG &DAG,
4690 bool isAfterLegalize) {
4691 unsigned NumElems = Elts.size();
4693 LoadSDNode *LDBase = nullptr;
4694 unsigned LastLoadedElt = -1U;
4696 // For each element in the initializer, see if we've found a load or an undef.
4697 // If we don't find an initial load element, or later load elements are
4698 // non-consecutive, bail out.
4699 for (unsigned i = 0; i < NumElems; ++i) {
4700 SDValue Elt = Elts[i];
4701 // Look through a bitcast.
4702 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
4703 Elt = Elt.getOperand(0);
4704 if (!Elt.getNode() ||
4705 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
4708 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
4710 LDBase = cast<LoadSDNode>(Elt.getNode());
4714 if (Elt.getOpcode() == ISD::UNDEF)
4717 LoadSDNode *LD = cast<LoadSDNode>(Elt);
4718 EVT LdVT = Elt.getValueType();
4719 // Each loaded element must be the correct fractional portion of the
4720 // requested vector load.
4721 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
4723 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
4728 // If we have found an entire vector of loads and undefs, then return a large
4729 // load of the entire vector width starting at the base pointer. If we found
4730 // consecutive loads for the low half, generate a vzext_load node.
4731 if (LastLoadedElt == NumElems - 1) {
4732 assert(LDBase && "Did not find base load for merging consecutive loads");
4733 EVT EltVT = LDBase->getValueType(0);
4734 // Ensure that the input vector size for the merged loads matches the
4735 // cumulative size of the input elements.
4736 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
4739 if (isAfterLegalize &&
4740 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
4743 SDValue NewLd = SDValue();
4745 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
4746 LDBase->getPointerInfo(), LDBase->isVolatile(),
4747 LDBase->isNonTemporal(), LDBase->isInvariant(),
4748 LDBase->getAlignment());
4750 if (LDBase->hasAnyUseOfValue(1)) {
4751 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
4753 SDValue(NewLd.getNode(), 1));
4754 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
4755 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
4756 SDValue(NewLd.getNode(), 1));
4762 //TODO: The code below fires only for for loading the low v2i32 / v2f32
4763 //of a v4i32 / v4f32. It's probably worth generalizing.
4764 EVT EltVT = VT.getVectorElementType();
4765 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
4766 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
4767 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
4768 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
4770 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
4771 LDBase->getPointerInfo(),
4772 LDBase->getAlignment(),
4773 false/*isVolatile*/, true/*ReadMem*/,
4776 // Make sure the newly-created LOAD is in the same position as LDBase in
4777 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
4778 // update uses of LDBase's output chain to use the TokenFactor.
4779 if (LDBase->hasAnyUseOfValue(1)) {
4780 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
4781 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
4782 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
4783 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
4784 SDValue(ResNode.getNode(), 1));
4787 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
4792 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
4793 /// to generate a splat value for the following cases:
4794 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
4795 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
4796 /// a scalar load, or a constant.
4797 /// The VBROADCAST node is returned when a pattern is found,
4798 /// or SDValue() otherwise.
4799 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
4800 SelectionDAG &DAG) {
4801 // VBROADCAST requires AVX.
4802 // TODO: Splats could be generated for non-AVX CPUs using SSE
4803 // instructions, but there's less potential gain for only 128-bit vectors.
4804 if (!Subtarget->hasAVX())
4807 MVT VT = Op.getSimpleValueType();
4810 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
4811 "Unsupported vector type for broadcast.");
4816 switch (Op.getOpcode()) {
4818 // Unknown pattern found.
4821 case ISD::BUILD_VECTOR: {
4822 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
4823 BitVector UndefElements;
4824 SDValue Splat = BVOp->getSplatValue(&UndefElements);
4826 // We need a splat of a single value to use broadcast, and it doesn't
4827 // make any sense if the value is only in one element of the vector.
4828 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
4832 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
4833 Ld.getOpcode() == ISD::ConstantFP);
4835 // Make sure that all of the users of a non-constant load are from the
4836 // BUILD_VECTOR node.
4837 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
4842 case ISD::VECTOR_SHUFFLE: {
4843 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
4845 // Shuffles must have a splat mask where the first element is
4847 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
4850 SDValue Sc = Op.getOperand(0);
4851 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
4852 Sc.getOpcode() != ISD::BUILD_VECTOR) {
4854 if (!Subtarget->hasInt256())
4857 // Use the register form of the broadcast instruction available on AVX2.
4858 if (VT.getSizeInBits() >= 256)
4859 Sc = Extract128BitVector(Sc, 0, DAG, dl);
4860 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
4863 Ld = Sc.getOperand(0);
4864 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
4865 Ld.getOpcode() == ISD::ConstantFP);
4867 // The scalar_to_vector node and the suspected
4868 // load node must have exactly one user.
4869 // Constants may have multiple users.
4871 // AVX-512 has register version of the broadcast
4872 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
4873 Ld.getValueType().getSizeInBits() >= 32;
4874 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
4881 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
4882 bool IsGE256 = (VT.getSizeInBits() >= 256);
4884 // When optimizing for size, generate up to 5 extra bytes for a broadcast
4885 // instruction to save 8 or more bytes of constant pool data.
4886 // TODO: If multiple splats are generated to load the same constant,
4887 // it may be detrimental to overall size. There needs to be a way to detect
4888 // that condition to know if this is truly a size win.
4889 const Function *F = DAG.getMachineFunction().getFunction();
4890 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
4892 // Handle broadcasting a single constant scalar from the constant pool
4894 // On Sandybridge (no AVX2), it is still better to load a constant vector
4895 // from the constant pool and not to broadcast it from a scalar.
4896 // But override that restriction when optimizing for size.
4897 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
4898 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
4899 EVT CVT = Ld.getValueType();
4900 assert(!CVT.isVector() && "Must not broadcast a vector type");
4902 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
4903 // For size optimization, also splat v2f64 and v2i64, and for size opt
4904 // with AVX2, also splat i8 and i16.
4905 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
4906 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
4907 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
4908 const Constant *C = nullptr;
4909 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
4910 C = CI->getConstantIntValue();
4911 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
4912 C = CF->getConstantFPValue();
4914 assert(C && "Invalid constant type");
4916 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4917 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
4918 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
4919 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
4920 MachinePointerInfo::getConstantPool(),
4921 false, false, false, Alignment);
4923 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
4927 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
4929 // Handle AVX2 in-register broadcasts.
4930 if (!IsLoad && Subtarget->hasInt256() &&
4931 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
4932 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
4934 // The scalar source must be a normal load.
4938 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
4939 (Subtarget->hasVLX() && ScalarSize == 64))
4940 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
4942 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
4943 // double since there is no vbroadcastsd xmm
4944 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
4945 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
4946 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
4949 // Unsupported broadcast.
4953 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
4954 /// underlying vector and index.
4956 /// Modifies \p ExtractedFromVec to the real vector and returns the real
4958 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
4960 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
4961 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
4964 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
4966 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
4968 // (extract_vector_elt (vector_shuffle<2,u,u,u>
4969 // (extract_subvector (v8f32 %vreg0), Constant<4>),
4972 // In this case the vector is the extract_subvector expression and the index
4973 // is 2, as specified by the shuffle.
4974 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
4975 SDValue ShuffleVec = SVOp->getOperand(0);
4976 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
4977 assert(ShuffleVecVT.getVectorElementType() ==
4978 ExtractedFromVec.getSimpleValueType().getVectorElementType());
4980 int ShuffleIdx = SVOp->getMaskElt(Idx);
4981 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
4982 ExtractedFromVec = ShuffleVec;
4988 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
4989 MVT VT = Op.getSimpleValueType();
4991 // Skip if insert_vec_elt is not supported.
4992 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4993 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
4997 unsigned NumElems = Op.getNumOperands();
5001 SmallVector<unsigned, 4> InsertIndices;
5002 SmallVector<int, 8> Mask(NumElems, -1);
5004 for (unsigned i = 0; i != NumElems; ++i) {
5005 unsigned Opc = Op.getOperand(i).getOpcode();
5007 if (Opc == ISD::UNDEF)
5010 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
5011 // Quit if more than 1 elements need inserting.
5012 if (InsertIndices.size() > 1)
5015 InsertIndices.push_back(i);
5019 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
5020 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
5021 // Quit if non-constant index.
5022 if (!isa<ConstantSDNode>(ExtIdx))
5024 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
5026 // Quit if extracted from vector of different type.
5027 if (ExtractedFromVec.getValueType() != VT)
5030 if (!VecIn1.getNode())
5031 VecIn1 = ExtractedFromVec;
5032 else if (VecIn1 != ExtractedFromVec) {
5033 if (!VecIn2.getNode())
5034 VecIn2 = ExtractedFromVec;
5035 else if (VecIn2 != ExtractedFromVec)
5036 // Quit if more than 2 vectors to shuffle
5040 if (ExtractedFromVec == VecIn1)
5042 else if (ExtractedFromVec == VecIn2)
5043 Mask[i] = Idx + NumElems;
5046 if (!VecIn1.getNode())
5049 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
5050 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
5051 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
5052 unsigned Idx = InsertIndices[i];
5053 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
5054 DAG.getIntPtrConstant(Idx));
5060 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
5062 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
5064 MVT VT = Op.getSimpleValueType();
5065 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
5066 "Unexpected type in LowerBUILD_VECTORvXi1!");
5069 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
5070 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
5071 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5072 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5075 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
5076 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
5077 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5078 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5081 bool AllContants = true;
5082 uint64_t Immediate = 0;
5083 int NonConstIdx = -1;
5084 bool IsSplat = true;
5085 unsigned NumNonConsts = 0;
5086 unsigned NumConsts = 0;
5087 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
5088 SDValue In = Op.getOperand(idx);
5089 if (In.getOpcode() == ISD::UNDEF)
5091 if (!isa<ConstantSDNode>(In)) {
5092 AllContants = false;
5097 if (cast<ConstantSDNode>(In)->getZExtValue())
5098 Immediate |= (1ULL << idx);
5100 if (In != Op.getOperand(0))
5105 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
5106 DAG.getConstant(Immediate, MVT::i16));
5107 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
5108 DAG.getIntPtrConstant(0));
5111 if (NumNonConsts == 1 && NonConstIdx != 0) {
5114 SDValue VecAsImm = DAG.getConstant(Immediate,
5115 MVT::getIntegerVT(VT.getSizeInBits()));
5116 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
5119 DstVec = DAG.getUNDEF(VT);
5120 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
5121 Op.getOperand(NonConstIdx),
5122 DAG.getIntPtrConstant(NonConstIdx));
5124 if (!IsSplat && (NonConstIdx != 0))
5125 llvm_unreachable("Unsupported BUILD_VECTOR operation");
5126 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
5129 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
5130 DAG.getConstant(-1, SelectVT),
5131 DAG.getConstant(0, SelectVT));
5133 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
5134 DAG.getConstant((Immediate | 1), SelectVT),
5135 DAG.getConstant(Immediate, SelectVT));
5136 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
5139 /// \brief Return true if \p N implements a horizontal binop and return the
5140 /// operands for the horizontal binop into V0 and V1.
5142 /// This is a helper function of PerformBUILD_VECTORCombine.
5143 /// This function checks that the build_vector \p N in input implements a
5144 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
5145 /// operation to match.
5146 /// For example, if \p Opcode is equal to ISD::ADD, then this function
5147 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
5148 /// is equal to ISD::SUB, then this function checks if this is a horizontal
5151 /// This function only analyzes elements of \p N whose indices are
5152 /// in range [BaseIdx, LastIdx).
5153 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
5155 unsigned BaseIdx, unsigned LastIdx,
5156 SDValue &V0, SDValue &V1) {
5157 EVT VT = N->getValueType(0);
5159 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
5160 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
5161 "Invalid Vector in input!");
5163 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
5164 bool CanFold = true;
5165 unsigned ExpectedVExtractIdx = BaseIdx;
5166 unsigned NumElts = LastIdx - BaseIdx;
5167 V0 = DAG.getUNDEF(VT);
5168 V1 = DAG.getUNDEF(VT);
5170 // Check if N implements a horizontal binop.
5171 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
5172 SDValue Op = N->getOperand(i + BaseIdx);
5175 if (Op->getOpcode() == ISD::UNDEF) {
5176 // Update the expected vector extract index.
5177 if (i * 2 == NumElts)
5178 ExpectedVExtractIdx = BaseIdx;
5179 ExpectedVExtractIdx += 2;
5183 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
5188 SDValue Op0 = Op.getOperand(0);
5189 SDValue Op1 = Op.getOperand(1);
5191 // Try to match the following pattern:
5192 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
5193 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5194 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5195 Op0.getOperand(0) == Op1.getOperand(0) &&
5196 isa<ConstantSDNode>(Op0.getOperand(1)) &&
5197 isa<ConstantSDNode>(Op1.getOperand(1)));
5201 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
5202 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
5204 if (i * 2 < NumElts) {
5205 if (V0.getOpcode() == ISD::UNDEF)
5206 V0 = Op0.getOperand(0);
5208 if (V1.getOpcode() == ISD::UNDEF)
5209 V1 = Op0.getOperand(0);
5210 if (i * 2 == NumElts)
5211 ExpectedVExtractIdx = BaseIdx;
5214 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
5215 if (I0 == ExpectedVExtractIdx)
5216 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
5217 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
5218 // Try to match the following dag sequence:
5219 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
5220 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
5224 ExpectedVExtractIdx += 2;
5230 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
5231 /// a concat_vector.
5233 /// This is a helper function of PerformBUILD_VECTORCombine.
5234 /// This function expects two 256-bit vectors called V0 and V1.
5235 /// At first, each vector is split into two separate 128-bit vectors.
5236 /// Then, the resulting 128-bit vectors are used to implement two
5237 /// horizontal binary operations.
5239 /// The kind of horizontal binary operation is defined by \p X86Opcode.
5241 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
5242 /// the two new horizontal binop.
5243 /// When Mode is set, the first horizontal binop dag node would take as input
5244 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
5245 /// horizontal binop dag node would take as input the lower 128-bit of V1
5246 /// and the upper 128-bit of V1.
5248 /// HADD V0_LO, V0_HI
5249 /// HADD V1_LO, V1_HI
5251 /// Otherwise, the first horizontal binop dag node takes as input the lower
5252 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
5253 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
5255 /// HADD V0_LO, V1_LO
5256 /// HADD V0_HI, V1_HI
5258 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
5259 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
5260 /// the upper 128-bits of the result.
5261 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
5262 SDLoc DL, SelectionDAG &DAG,
5263 unsigned X86Opcode, bool Mode,
5264 bool isUndefLO, bool isUndefHI) {
5265 EVT VT = V0.getValueType();
5266 assert(VT.is256BitVector() && VT == V1.getValueType() &&
5267 "Invalid nodes in input!");
5269 unsigned NumElts = VT.getVectorNumElements();
5270 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
5271 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
5272 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
5273 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
5274 EVT NewVT = V0_LO.getValueType();
5276 SDValue LO = DAG.getUNDEF(NewVT);
5277 SDValue HI = DAG.getUNDEF(NewVT);
5280 // Don't emit a horizontal binop if the result is expected to be UNDEF.
5281 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
5282 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
5283 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
5284 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
5286 // Don't emit a horizontal binop if the result is expected to be UNDEF.
5287 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
5288 V1_LO->getOpcode() != ISD::UNDEF))
5289 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
5291 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
5292 V1_HI->getOpcode() != ISD::UNDEF))
5293 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
5296 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
5299 /// \brief Try to fold a build_vector that performs an 'addsub' into the
5300 /// sequence of 'vadd + vsub + blendi'.
5301 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
5302 const X86Subtarget *Subtarget) {
5304 EVT VT = BV->getValueType(0);
5305 unsigned NumElts = VT.getVectorNumElements();
5306 SDValue InVec0 = DAG.getUNDEF(VT);
5307 SDValue InVec1 = DAG.getUNDEF(VT);
5309 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
5310 VT == MVT::v2f64) && "build_vector with an invalid type found!");
5312 // Odd-numbered elements in the input build vector are obtained from
5313 // adding two integer/float elements.
5314 // Even-numbered elements in the input build vector are obtained from
5315 // subtracting two integer/float elements.
5316 unsigned ExpectedOpcode = ISD::FSUB;
5317 unsigned NextExpectedOpcode = ISD::FADD;
5318 bool AddFound = false;
5319 bool SubFound = false;
5321 for (unsigned i = 0, e = NumElts; i != e; ++i) {
5322 SDValue Op = BV->getOperand(i);
5324 // Skip 'undef' values.
5325 unsigned Opcode = Op.getOpcode();
5326 if (Opcode == ISD::UNDEF) {
5327 std::swap(ExpectedOpcode, NextExpectedOpcode);
5331 // Early exit if we found an unexpected opcode.
5332 if (Opcode != ExpectedOpcode)
5335 SDValue Op0 = Op.getOperand(0);
5336 SDValue Op1 = Op.getOperand(1);
5338 // Try to match the following pattern:
5339 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
5340 // Early exit if we cannot match that sequence.
5341 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5342 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5343 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
5344 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
5345 Op0.getOperand(1) != Op1.getOperand(1))
5348 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
5352 // We found a valid add/sub node. Update the information accordingly.
5358 // Update InVec0 and InVec1.
5359 if (InVec0.getOpcode() == ISD::UNDEF)
5360 InVec0 = Op0.getOperand(0);
5361 if (InVec1.getOpcode() == ISD::UNDEF)
5362 InVec1 = Op1.getOperand(0);
5364 // Make sure that operands in input to each add/sub node always
5365 // come from a same pair of vectors.
5366 if (InVec0 != Op0.getOperand(0)) {
5367 if (ExpectedOpcode == ISD::FSUB)
5370 // FADD is commutable. Try to commute the operands
5371 // and then test again.
5372 std::swap(Op0, Op1);
5373 if (InVec0 != Op0.getOperand(0))
5377 if (InVec1 != Op1.getOperand(0))
5380 // Update the pair of expected opcodes.
5381 std::swap(ExpectedOpcode, NextExpectedOpcode);
5384 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
5385 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
5386 InVec1.getOpcode() != ISD::UNDEF)
5387 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
5392 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
5393 const X86Subtarget *Subtarget) {
5395 EVT VT = N->getValueType(0);
5396 unsigned NumElts = VT.getVectorNumElements();
5397 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
5398 SDValue InVec0, InVec1;
5400 // Try to match an ADDSUB.
5401 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
5402 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
5403 SDValue Value = matchAddSub(BV, DAG, Subtarget);
5404 if (Value.getNode())
5408 // Try to match horizontal ADD/SUB.
5409 unsigned NumUndefsLO = 0;
5410 unsigned NumUndefsHI = 0;
5411 unsigned Half = NumElts/2;
5413 // Count the number of UNDEF operands in the build_vector in input.
5414 for (unsigned i = 0, e = Half; i != e; ++i)
5415 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
5418 for (unsigned i = Half, e = NumElts; i != e; ++i)
5419 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
5422 // Early exit if this is either a build_vector of all UNDEFs or all the
5423 // operands but one are UNDEF.
5424 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
5427 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
5428 // Try to match an SSE3 float HADD/HSUB.
5429 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
5430 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
5432 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
5433 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
5434 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
5435 // Try to match an SSSE3 integer HADD/HSUB.
5436 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
5437 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
5439 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
5440 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
5443 if (!Subtarget->hasAVX())
5446 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
5447 // Try to match an AVX horizontal add/sub of packed single/double
5448 // precision floating point values from 256-bit vectors.
5449 SDValue InVec2, InVec3;
5450 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
5451 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
5452 ((InVec0.getOpcode() == ISD::UNDEF ||
5453 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
5454 ((InVec1.getOpcode() == ISD::UNDEF ||
5455 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
5456 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
5458 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
5459 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
5460 ((InVec0.getOpcode() == ISD::UNDEF ||
5461 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
5462 ((InVec1.getOpcode() == ISD::UNDEF ||
5463 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
5464 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
5465 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
5466 // Try to match an AVX2 horizontal add/sub of signed integers.
5467 SDValue InVec2, InVec3;
5469 bool CanFold = true;
5471 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
5472 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
5473 ((InVec0.getOpcode() == ISD::UNDEF ||
5474 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
5475 ((InVec1.getOpcode() == ISD::UNDEF ||
5476 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
5477 X86Opcode = X86ISD::HADD;
5478 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
5479 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
5480 ((InVec0.getOpcode() == ISD::UNDEF ||
5481 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
5482 ((InVec1.getOpcode() == ISD::UNDEF ||
5483 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
5484 X86Opcode = X86ISD::HSUB;
5489 // Fold this build_vector into a single horizontal add/sub.
5490 // Do this only if the target has AVX2.
5491 if (Subtarget->hasAVX2())
5492 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
5494 // Do not try to expand this build_vector into a pair of horizontal
5495 // add/sub if we can emit a pair of scalar add/sub.
5496 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
5499 // Convert this build_vector into a pair of horizontal binop followed by
5501 bool isUndefLO = NumUndefsLO == Half;
5502 bool isUndefHI = NumUndefsHI == Half;
5503 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
5504 isUndefLO, isUndefHI);
5508 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
5509 VT == MVT::v16i16) && Subtarget->hasAVX()) {
5511 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
5512 X86Opcode = X86ISD::HADD;
5513 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
5514 X86Opcode = X86ISD::HSUB;
5515 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
5516 X86Opcode = X86ISD::FHADD;
5517 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
5518 X86Opcode = X86ISD::FHSUB;
5522 // Don't try to expand this build_vector into a pair of horizontal add/sub
5523 // if we can simply emit a pair of scalar add/sub.
5524 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
5527 // Convert this build_vector into two horizontal add/sub followed by
5529 bool isUndefLO = NumUndefsLO == Half;
5530 bool isUndefHI = NumUndefsHI == Half;
5531 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
5532 isUndefLO, isUndefHI);
5539 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
5542 MVT VT = Op.getSimpleValueType();
5543 MVT ExtVT = VT.getVectorElementType();
5544 unsigned NumElems = Op.getNumOperands();
5546 // Generate vectors for predicate vectors.
5547 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
5548 return LowerBUILD_VECTORvXi1(Op, DAG);
5550 // Vectors containing all zeros can be matched by pxor and xorps later
5551 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
5552 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
5553 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
5554 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
5557 return getZeroVector(VT, Subtarget, DAG, dl);
5560 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
5561 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
5562 // vpcmpeqd on 256-bit vectors.
5563 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
5564 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
5567 if (!VT.is512BitVector())
5568 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
5571 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
5572 if (Broadcast.getNode())
5575 unsigned EVTBits = ExtVT.getSizeInBits();
5577 unsigned NumZero = 0;
5578 unsigned NumNonZero = 0;
5579 unsigned NonZeros = 0;
5580 bool IsAllConstants = true;
5581 SmallSet<SDValue, 8> Values;
5582 for (unsigned i = 0; i < NumElems; ++i) {
5583 SDValue Elt = Op.getOperand(i);
5584 if (Elt.getOpcode() == ISD::UNDEF)
5587 if (Elt.getOpcode() != ISD::Constant &&
5588 Elt.getOpcode() != ISD::ConstantFP)
5589 IsAllConstants = false;
5590 if (X86::isZeroNode(Elt))
5593 NonZeros |= (1 << i);
5598 // All undef vector. Return an UNDEF. All zero vectors were handled above.
5599 if (NumNonZero == 0)
5600 return DAG.getUNDEF(VT);
5602 // Special case for single non-zero, non-undef, element.
5603 if (NumNonZero == 1) {
5604 unsigned Idx = countTrailingZeros(NonZeros);
5605 SDValue Item = Op.getOperand(Idx);
5607 // If this is an insertion of an i64 value on x86-32, and if the top bits of
5608 // the value are obviously zero, truncate the value to i32 and do the
5609 // insertion that way. Only do this if the value is non-constant or if the
5610 // value is a constant being inserted into element 0. It is cheaper to do
5611 // a constant pool load than it is to do a movd + shuffle.
5612 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
5613 (!IsAllConstants || Idx == 0)) {
5614 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
5616 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
5617 EVT VecVT = MVT::v4i32;
5619 // Truncate the value (which may itself be a constant) to i32, and
5620 // convert it to a vector with movd (S2V+shuffle to zero extend).
5621 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
5622 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
5624 ISD::BITCAST, dl, VT,
5625 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
5629 // If we have a constant or non-constant insertion into the low element of
5630 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
5631 // the rest of the elements. This will be matched as movd/movq/movss/movsd
5632 // depending on what the source datatype is.
5635 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
5637 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
5638 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
5639 if (VT.is256BitVector() || VT.is512BitVector()) {
5640 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
5641 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
5642 Item, DAG.getIntPtrConstant(0));
5644 assert(VT.is128BitVector() && "Expected an SSE value type!");
5645 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
5646 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
5647 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
5650 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
5651 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
5652 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
5653 if (VT.is256BitVector()) {
5654 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
5655 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
5657 assert(VT.is128BitVector() && "Expected an SSE value type!");
5658 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
5660 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
5664 // Is it a vector logical left shift?
5665 if (NumElems == 2 && Idx == 1 &&
5666 X86::isZeroNode(Op.getOperand(0)) &&
5667 !X86::isZeroNode(Op.getOperand(1))) {
5668 unsigned NumBits = VT.getSizeInBits();
5669 return getVShift(true, VT,
5670 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
5671 VT, Op.getOperand(1)),
5672 NumBits/2, DAG, *this, dl);
5675 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
5678 // Otherwise, if this is a vector with i32 or f32 elements, and the element
5679 // is a non-constant being inserted into an element other than the low one,
5680 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
5681 // movd/movss) to move this into the low element, then shuffle it into
5683 if (EVTBits == 32) {
5684 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
5685 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
5689 // Splat is obviously ok. Let legalizer expand it to a shuffle.
5690 if (Values.size() == 1) {
5691 if (EVTBits == 32) {
5692 // Instead of a shuffle like this:
5693 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
5694 // Check if it's possible to issue this instead.
5695 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
5696 unsigned Idx = countTrailingZeros(NonZeros);
5697 SDValue Item = Op.getOperand(Idx);
5698 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
5699 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
5704 // A vector full of immediates; various special cases are already
5705 // handled, so this is best done with a single constant-pool load.
5709 // For AVX-length vectors, see if we can use a vector load to get all of the
5710 // elements, otherwise build the individual 128-bit pieces and use
5711 // shuffles to put them in place.
5712 if (VT.is256BitVector() || VT.is512BitVector()) {
5713 SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
5715 // Check for a build vector of consecutive loads.
5716 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
5719 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
5721 // Build both the lower and upper subvector.
5722 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
5723 makeArrayRef(&V[0], NumElems/2));
5724 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
5725 makeArrayRef(&V[NumElems / 2], NumElems/2));
5727 // Recreate the wider vector with the lower and upper part.
5728 if (VT.is256BitVector())
5729 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
5730 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
5733 // Let legalizer expand 2-wide build_vectors.
5734 if (EVTBits == 64) {
5735 if (NumNonZero == 1) {
5736 // One half is zero or undef.
5737 unsigned Idx = countTrailingZeros(NonZeros);
5738 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
5739 Op.getOperand(Idx));
5740 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
5745 // If element VT is < 32 bits, convert it to inserts into a zero vector.
5746 if (EVTBits == 8 && NumElems == 16) {
5747 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
5749 if (V.getNode()) return V;
5752 if (EVTBits == 16 && NumElems == 8) {
5753 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
5755 if (V.getNode()) return V;
5758 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
5759 if (EVTBits == 32 && NumElems == 4) {
5760 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
5765 // If element VT is == 32 bits, turn it into a number of shuffles.
5766 SmallVector<SDValue, 8> V(NumElems);
5767 if (NumElems == 4 && NumZero > 0) {
5768 for (unsigned i = 0; i < 4; ++i) {
5769 bool isZero = !(NonZeros & (1 << i));
5771 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
5773 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
5776 for (unsigned i = 0; i < 2; ++i) {
5777 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
5780 V[i] = V[i*2]; // Must be a zero vector.
5783 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
5786 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
5789 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
5794 bool Reverse1 = (NonZeros & 0x3) == 2;
5795 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
5799 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
5800 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
5802 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
5805 if (Values.size() > 1 && VT.is128BitVector()) {
5806 // Check for a build vector of consecutive loads.
5807 for (unsigned i = 0; i < NumElems; ++i)
5808 V[i] = Op.getOperand(i);
5810 // Check for elements which are consecutive loads.
5811 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
5815 // Check for a build vector from mostly shuffle plus few inserting.
5816 SDValue Sh = buildFromShuffleMostly(Op, DAG);
5820 // For SSE 4.1, use insertps to put the high elements into the low element.
5821 if (Subtarget->hasSSE41()) {
5823 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
5824 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
5826 Result = DAG.getUNDEF(VT);
5828 for (unsigned i = 1; i < NumElems; ++i) {
5829 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
5830 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
5831 Op.getOperand(i), DAG.getIntPtrConstant(i));
5836 // Otherwise, expand into a number of unpckl*, start by extending each of
5837 // our (non-undef) elements to the full vector width with the element in the
5838 // bottom slot of the vector (which generates no code for SSE).
5839 for (unsigned i = 0; i < NumElems; ++i) {
5840 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
5841 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
5843 V[i] = DAG.getUNDEF(VT);
5846 // Next, we iteratively mix elements, e.g. for v4f32:
5847 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
5848 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
5849 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
5850 unsigned EltStride = NumElems >> 1;
5851 while (EltStride != 0) {
5852 for (unsigned i = 0; i < EltStride; ++i) {
5853 // If V[i+EltStride] is undef and this is the first round of mixing,
5854 // then it is safe to just drop this shuffle: V[i] is already in the
5855 // right place, the one element (since it's the first round) being
5856 // inserted as undef can be dropped. This isn't safe for successive
5857 // rounds because they will permute elements within both vectors.
5858 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
5859 EltStride == NumElems/2)
5862 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
5871 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
5872 // to create 256-bit vectors from two other 128-bit ones.
5873 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
5875 MVT ResVT = Op.getSimpleValueType();
5877 assert((ResVT.is256BitVector() ||
5878 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
5880 SDValue V1 = Op.getOperand(0);
5881 SDValue V2 = Op.getOperand(1);
5882 unsigned NumElems = ResVT.getVectorNumElements();
5883 if(ResVT.is256BitVector())
5884 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
5886 if (Op.getNumOperands() == 4) {
5887 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
5888 ResVT.getVectorNumElements()/2);
5889 SDValue V3 = Op.getOperand(2);
5890 SDValue V4 = Op.getOperand(3);
5891 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
5892 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
5894 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
5897 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
5898 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
5899 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
5900 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
5901 Op.getNumOperands() == 4)));
5903 // AVX can use the vinsertf128 instruction to create 256-bit vectors
5904 // from two other 128-bit ones.
5906 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
5907 return LowerAVXCONCAT_VECTORS(Op, DAG);
5911 //===----------------------------------------------------------------------===//
5912 // Vector shuffle lowering
5914 // This is an experimental code path for lowering vector shuffles on x86. It is
5915 // designed to handle arbitrary vector shuffles and blends, gracefully
5916 // degrading performance as necessary. It works hard to recognize idiomatic
5917 // shuffles and lower them to optimal instruction patterns without leaving
5918 // a framework that allows reasonably efficient handling of all vector shuffle
5920 //===----------------------------------------------------------------------===//
5922 /// \brief Tiny helper function to identify a no-op mask.
5924 /// This is a somewhat boring predicate function. It checks whether the mask
5925 /// array input, which is assumed to be a single-input shuffle mask of the kind
5926 /// used by the X86 shuffle instructions (not a fully general
5927 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
5928 /// in-place shuffle are 'no-op's.
5929 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
5930 for (int i = 0, Size = Mask.size(); i < Size; ++i)
5931 if (Mask[i] != -1 && Mask[i] != i)
5936 /// \brief Helper function to classify a mask as a single-input mask.
5938 /// This isn't a generic single-input test because in the vector shuffle
5939 /// lowering we canonicalize single inputs to be the first input operand. This
5940 /// means we can more quickly test for a single input by only checking whether
5941 /// an input from the second operand exists. We also assume that the size of
5942 /// mask corresponds to the size of the input vectors which isn't true in the
5943 /// fully general case.
5944 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
5946 if (M >= (int)Mask.size())
5951 /// \brief Test whether there are elements crossing 128-bit lanes in this
5954 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
5955 /// and we routinely test for these.
5956 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
5957 int LaneSize = 128 / VT.getScalarSizeInBits();
5958 int Size = Mask.size();
5959 for (int i = 0; i < Size; ++i)
5960 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
5965 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
5967 /// This checks a shuffle mask to see if it is performing the same
5968 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
5969 /// that it is also not lane-crossing. It may however involve a blend from the
5970 /// same lane of a second vector.
5972 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
5973 /// non-trivial to compute in the face of undef lanes. The representation is
5974 /// *not* suitable for use with existing 128-bit shuffles as it will contain
5975 /// entries from both V1 and V2 inputs to the wider mask.
5977 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
5978 SmallVectorImpl<int> &RepeatedMask) {
5979 int LaneSize = 128 / VT.getScalarSizeInBits();
5980 RepeatedMask.resize(LaneSize, -1);
5981 int Size = Mask.size();
5982 for (int i = 0; i < Size; ++i) {
5985 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
5986 // This entry crosses lanes, so there is no way to model this shuffle.
5989 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
5990 if (RepeatedMask[i % LaneSize] == -1)
5991 // This is the first non-undef entry in this slot of a 128-bit lane.
5992 RepeatedMask[i % LaneSize] =
5993 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
5994 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
5995 // Found a mismatch with the repeated mask.
6001 /// \brief Base case helper for testing a single mask element.
6002 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
6003 BuildVectorSDNode *BV1,
6004 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
6006 int Size = Mask.size();
6007 if (Mask[i] != -1 && Mask[i] != Arg) {
6008 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
6009 auto *ArgsBV = Arg < Size ? BV1 : BV2;
6010 if (!MaskBV || !ArgsBV ||
6011 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
6017 /// \brief Recursive helper to peel off and test each mask element.
6018 template <typename... Ts>
6019 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
6020 BuildVectorSDNode *BV1,
6021 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
6022 int i, int Arg, Ts... Args) {
6023 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
6026 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
6029 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
6032 /// This is a fast way to test a shuffle mask against a fixed pattern:
6034 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
6036 /// It returns true if the mask is exactly as wide as the argument list, and
6037 /// each element of the mask is either -1 (signifying undef) or the value given
6038 /// in the argument.
6039 template <typename... Ts>
6040 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
6042 if (Mask.size() != sizeof...(Args))
6045 // If the values are build vectors, we can look through them to find
6046 // equivalent inputs that make the shuffles equivalent.
6047 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
6048 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
6050 // Recursively peel off arguments and test them against the mask.
6051 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
6054 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
6056 /// This helper function produces an 8-bit shuffle immediate corresponding to
6057 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
6058 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
6061 /// NB: We rely heavily on "undef" masks preserving the input lane.
6062 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
6063 SelectionDAG &DAG) {
6064 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
6065 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
6066 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
6067 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
6068 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
6071 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
6072 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
6073 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
6074 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
6075 return DAG.getConstant(Imm, MVT::i8);
6078 /// \brief Try to emit a blend instruction for a shuffle using bit math.
6080 /// This is used as a fallback approach when first class blend instructions are
6081 /// unavailable. Currently it is only suitable for integer vectors, but could
6082 /// be generalized for floating point vectors if desirable.
6083 static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
6084 SDValue V2, ArrayRef<int> Mask,
6085 SelectionDAG &DAG) {
6086 assert(VT.isInteger() && "Only supports integer vector types!");
6087 MVT EltVT = VT.getScalarType();
6088 int NumEltBits = EltVT.getSizeInBits();
6089 SDValue Zero = DAG.getConstant(0, EltVT);
6090 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT);
6091 SmallVector<SDValue, 16> MaskOps;
6092 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6093 if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
6094 return SDValue(); // Shuffled input!
6095 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
6098 SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
6099 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
6100 // We have to cast V2 around.
6101 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
6102 V2 = DAG.getNode(ISD::BITCAST, DL, VT,
6103 DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
6104 DAG.getNode(ISD::BITCAST, DL, MaskVT, V1Mask),
6105 DAG.getNode(ISD::BITCAST, DL, MaskVT, V2)));
6106 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
6109 /// \brief Try to emit a blend instruction for a shuffle.
6111 /// This doesn't do any checks for the availability of instructions for blending
6112 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
6113 /// be matched in the backend with the type given. What it does check for is
6114 /// that the shuffle mask is in fact a blend.
6115 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
6116 SDValue V2, ArrayRef<int> Mask,
6117 const X86Subtarget *Subtarget,
6118 SelectionDAG &DAG) {
6119 unsigned BlendMask = 0;
6120 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6121 if (Mask[i] >= Size) {
6122 if (Mask[i] != i + Size)
6123 return SDValue(); // Shuffled V2 input!
6124 BlendMask |= 1u << i;
6127 if (Mask[i] >= 0 && Mask[i] != i)
6128 return SDValue(); // Shuffled V1 input!
6130 switch (VT.SimpleTy) {
6135 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
6136 DAG.getConstant(BlendMask, MVT::i8));
6140 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
6144 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
6145 // that instruction.
6146 if (Subtarget->hasAVX2()) {
6147 // Scale the blend by the number of 32-bit dwords per element.
6148 int Scale = VT.getScalarSizeInBits() / 32;
6150 for (int i = 0, Size = Mask.size(); i < Size; ++i)
6151 if (Mask[i] >= Size)
6152 for (int j = 0; j < Scale; ++j)
6153 BlendMask |= 1u << (i * Scale + j);
6155 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
6156 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
6157 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
6158 return DAG.getNode(ISD::BITCAST, DL, VT,
6159 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
6160 DAG.getConstant(BlendMask, MVT::i8)));
6164 // For integer shuffles we need to expand the mask and cast the inputs to
6165 // v8i16s prior to blending.
6166 int Scale = 8 / VT.getVectorNumElements();
6168 for (int i = 0, Size = Mask.size(); i < Size; ++i)
6169 if (Mask[i] >= Size)
6170 for (int j = 0; j < Scale; ++j)
6171 BlendMask |= 1u << (i * Scale + j);
6173 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
6174 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
6175 return DAG.getNode(ISD::BITCAST, DL, VT,
6176 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
6177 DAG.getConstant(BlendMask, MVT::i8)));
6181 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
6182 SmallVector<int, 8> RepeatedMask;
6183 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
6184 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
6185 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
6187 for (int i = 0; i < 8; ++i)
6188 if (RepeatedMask[i] >= 16)
6189 BlendMask |= 1u << i;
6190 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
6191 DAG.getConstant(BlendMask, MVT::i8));
6197 // Scale the blend by the number of bytes per element.
6198 int Scale = VT.getScalarSizeInBits() / 8;
6200 // This form of blend is always done on bytes. Compute the byte vector
6202 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
6204 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
6205 // mix of LLVM's code generator and the x86 backend. We tell the code
6206 // generator that boolean values in the elements of an x86 vector register
6207 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
6208 // mapping a select to operand #1, and 'false' mapping to operand #2. The
6209 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
6210 // of the element (the remaining are ignored) and 0 in that high bit would
6211 // mean operand #1 while 1 in the high bit would mean operand #2. So while
6212 // the LLVM model for boolean values in vector elements gets the relevant
6213 // bit set, it is set backwards and over constrained relative to x86's
6215 SmallVector<SDValue, 32> VSELECTMask;
6216 for (int i = 0, Size = Mask.size(); i < Size; ++i)
6217 for (int j = 0; j < Scale; ++j)
6218 VSELECTMask.push_back(
6219 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
6220 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
6222 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
6223 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
6225 ISD::BITCAST, DL, VT,
6226 DAG.getNode(ISD::VSELECT, DL, BlendVT,
6227 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
6232 llvm_unreachable("Not a supported integer vector type!");
6236 /// \brief Try to lower as a blend of elements from two inputs followed by
6237 /// a single-input permutation.
6239 /// This matches the pattern where we can blend elements from two inputs and
6240 /// then reduce the shuffle to a single-input permutation.
6241 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
6244 SelectionDAG &DAG) {
6245 // We build up the blend mask while checking whether a blend is a viable way
6246 // to reduce the shuffle.
6247 SmallVector<int, 32> BlendMask(Mask.size(), -1);
6248 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
6250 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6254 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
6256 if (BlendMask[Mask[i] % Size] == -1)
6257 BlendMask[Mask[i] % Size] = Mask[i];
6258 else if (BlendMask[Mask[i] % Size] != Mask[i])
6259 return SDValue(); // Can't blend in the needed input!
6261 PermuteMask[i] = Mask[i] % Size;
6264 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
6265 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
6268 /// \brief Generic routine to decompose a shuffle and blend into indepndent
6269 /// blends and permutes.
6271 /// This matches the extremely common pattern for handling combined
6272 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
6273 /// operations. It will try to pick the best arrangement of shuffles and
6275 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
6279 SelectionDAG &DAG) {
6280 // Shuffle the input elements into the desired positions in V1 and V2 and
6281 // blend them together.
6282 SmallVector<int, 32> V1Mask(Mask.size(), -1);
6283 SmallVector<int, 32> V2Mask(Mask.size(), -1);
6284 SmallVector<int, 32> BlendMask(Mask.size(), -1);
6285 for (int i = 0, Size = Mask.size(); i < Size; ++i)
6286 if (Mask[i] >= 0 && Mask[i] < Size) {
6287 V1Mask[i] = Mask[i];
6289 } else if (Mask[i] >= Size) {
6290 V2Mask[i] = Mask[i] - Size;
6291 BlendMask[i] = i + Size;
6294 // Try to lower with the simpler initial blend strategy unless one of the
6295 // input shuffles would be a no-op. We prefer to shuffle inputs as the
6296 // shuffle may be able to fold with a load or other benefit. However, when
6297 // we'll have to do 2x as many shuffles in order to achieve this, blending
6298 // first is a better strategy.
6299 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
6300 if (SDValue BlendPerm =
6301 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
6304 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
6305 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
6306 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
6309 /// \brief Try to lower a vector shuffle as a byte rotation.
6311 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
6312 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
6313 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
6314 /// try to generically lower a vector shuffle through such an pattern. It
6315 /// does not check for the profitability of lowering either as PALIGNR or
6316 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
6317 /// This matches shuffle vectors that look like:
6319 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
6321 /// Essentially it concatenates V1 and V2, shifts right by some number of
6322 /// elements, and takes the low elements as the result. Note that while this is
6323 /// specified as a *right shift* because x86 is little-endian, it is a *left
6324 /// rotate* of the vector lanes.
6325 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
6328 const X86Subtarget *Subtarget,
6329 SelectionDAG &DAG) {
6330 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
6332 int NumElts = Mask.size();
6333 int NumLanes = VT.getSizeInBits() / 128;
6334 int NumLaneElts = NumElts / NumLanes;
6336 // We need to detect various ways of spelling a rotation:
6337 // [11, 12, 13, 14, 15, 0, 1, 2]
6338 // [-1, 12, 13, 14, -1, -1, 1, -1]
6339 // [-1, -1, -1, -1, -1, -1, 1, 2]
6340 // [ 3, 4, 5, 6, 7, 8, 9, 10]
6341 // [-1, 4, 5, 6, -1, -1, 9, -1]
6342 // [-1, 4, 5, 6, -1, -1, -1, -1]
6345 for (int l = 0; l < NumElts; l += NumLaneElts) {
6346 for (int i = 0; i < NumLaneElts; ++i) {
6347 if (Mask[l + i] == -1)
6349 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
6351 // Get the mod-Size index and lane correct it.
6352 int LaneIdx = (Mask[l + i] % NumElts) - l;
6353 // Make sure it was in this lane.
6354 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
6357 // Determine where a rotated vector would have started.
6358 int StartIdx = i - LaneIdx;
6360 // The identity rotation isn't interesting, stop.
6363 // If we found the tail of a vector the rotation must be the missing
6364 // front. If we found the head of a vector, it must be how much of the
6366 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
6369 Rotation = CandidateRotation;
6370 else if (Rotation != CandidateRotation)
6371 // The rotations don't match, so we can't match this mask.
6374 // Compute which value this mask is pointing at.
6375 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
6377 // Compute which of the two target values this index should be assigned
6378 // to. This reflects whether the high elements are remaining or the low
6379 // elements are remaining.
6380 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
6382 // Either set up this value if we've not encountered it before, or check
6383 // that it remains consistent.
6386 else if (TargetV != MaskV)
6387 // This may be a rotation, but it pulls from the inputs in some
6388 // unsupported interleaving.
6393 // Check that we successfully analyzed the mask, and normalize the results.
6394 assert(Rotation != 0 && "Failed to locate a viable rotation!");
6395 assert((Lo || Hi) && "Failed to find a rotated input vector!");
6401 // The actual rotate instruction rotates bytes, so we need to scale the
6402 // rotation based on how many bytes are in the vector lane.
6403 int Scale = 16 / NumLaneElts;
6405 // SSSE3 targets can use the palignr instruction.
6406 if (Subtarget->hasSSSE3()) {
6407 // Cast the inputs to i8 vector of correct length to match PALIGNR.
6408 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
6409 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
6410 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
6412 return DAG.getNode(ISD::BITCAST, DL, VT,
6413 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
6414 DAG.getConstant(Rotation * Scale, MVT::i8)));
6417 assert(VT.getSizeInBits() == 128 &&
6418 "Rotate-based lowering only supports 128-bit lowering!");
6419 assert(Mask.size() <= 16 &&
6420 "Can shuffle at most 16 bytes in a 128-bit vector!");
6422 // Default SSE2 implementation
6423 int LoByteShift = 16 - Rotation * Scale;
6424 int HiByteShift = Rotation * Scale;
6426 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
6427 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
6428 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
6430 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
6431 DAG.getConstant(LoByteShift, MVT::i8));
6432 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
6433 DAG.getConstant(HiByteShift, MVT::i8));
6434 return DAG.getNode(ISD::BITCAST, DL, VT,
6435 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
6438 /// \brief Compute whether each element of a shuffle is zeroable.
6440 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
6441 /// Either it is an undef element in the shuffle mask, the element of the input
6442 /// referenced is undef, or the element of the input referenced is known to be
6443 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
6444 /// as many lanes with this technique as possible to simplify the remaining
6446 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
6447 SDValue V1, SDValue V2) {
6448 SmallBitVector Zeroable(Mask.size(), false);
6450 while (V1.getOpcode() == ISD::BITCAST)
6451 V1 = V1->getOperand(0);
6452 while (V2.getOpcode() == ISD::BITCAST)
6453 V2 = V2->getOperand(0);
6455 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
6456 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
6458 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6460 // Handle the easy cases.
6461 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
6466 // If this is an index into a build_vector node (which has the same number
6467 // of elements), dig out the input value and use it.
6468 SDValue V = M < Size ? V1 : V2;
6469 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
6472 SDValue Input = V.getOperand(M % Size);
6473 // The UNDEF opcode check really should be dead code here, but not quite
6474 // worth asserting on (it isn't invalid, just unexpected).
6475 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
6482 /// \brief Try to emit a bitmask instruction for a shuffle.
6484 /// This handles cases where we can model a blend exactly as a bitmask due to
6485 /// one of the inputs being zeroable.
6486 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
6487 SDValue V2, ArrayRef<int> Mask,
6488 SelectionDAG &DAG) {
6489 MVT EltVT = VT.getScalarType();
6490 int NumEltBits = EltVT.getSizeInBits();
6491 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
6492 SDValue Zero = DAG.getConstant(0, IntEltVT);
6493 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
6494 if (EltVT.isFloatingPoint()) {
6495 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
6496 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
6498 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
6499 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
6501 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6504 if (Mask[i] % Size != i)
6505 return SDValue(); // Not a blend.
6507 V = Mask[i] < Size ? V1 : V2;
6508 else if (V != (Mask[i] < Size ? V1 : V2))
6509 return SDValue(); // Can only let one input through the mask.
6511 VMaskOps[i] = AllOnes;
6514 return SDValue(); // No non-zeroable elements!
6516 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
6517 V = DAG.getNode(VT.isFloatingPoint()
6518 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
6523 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
6525 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
6526 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
6527 /// matches elements from one of the input vectors shuffled to the left or
6528 /// right with zeroable elements 'shifted in'. It handles both the strictly
6529 /// bit-wise element shifts and the byte shift across an entire 128-bit double
6532 /// PSHL : (little-endian) left bit shift.
6533 /// [ zz, 0, zz, 2 ]
6534 /// [ -1, 4, zz, -1 ]
6535 /// PSRL : (little-endian) right bit shift.
6537 /// [ -1, -1, 7, zz]
6538 /// PSLLDQ : (little-endian) left byte shift
6539 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
6540 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
6541 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
6542 /// PSRLDQ : (little-endian) right byte shift
6543 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
6544 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
6545 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
6546 static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
6547 SDValue V2, ArrayRef<int> Mask,
6548 SelectionDAG &DAG) {
6549 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
6551 int Size = Mask.size();
6552 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
6554 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
6555 for (int i = 0; i < Size; i += Scale)
6556 for (int j = 0; j < Shift; ++j)
6557 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
6563 auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
6564 for (int i = 0; i != Size; i += Scale) {
6565 unsigned Pos = Left ? i + Shift : i;
6566 unsigned Low = Left ? i : i + Shift;
6567 unsigned Len = Scale - Shift;
6568 if (!isSequentialOrUndefInRange(Mask, Pos, Len,
6569 Low + (V == V1 ? 0 : Size)))
6573 int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
6574 bool ByteShift = ShiftEltBits > 64;
6575 unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
6576 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
6577 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
6579 // Normalize the scale for byte shifts to still produce an i64 element
6581 Scale = ByteShift ? Scale / 2 : Scale;
6583 // We need to round trip through the appropriate type for the shift.
6584 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
6585 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
6586 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
6587 "Illegal integer vector type");
6588 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
6590 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
6591 return DAG.getNode(ISD::BITCAST, DL, VT, V);
6594 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
6595 // keep doubling the size of the integer elements up to that. We can
6596 // then shift the elements of the integer vector by whole multiples of
6597 // their width within the elements of the larger integer vector. Test each
6598 // multiple to see if we can find a match with the moved element indices
6599 // and that the shifted in elements are all zeroable.
6600 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
6601 for (int Shift = 1; Shift != Scale; ++Shift)
6602 for (bool Left : {true, false})
6603 if (CheckZeros(Shift, Scale, Left))
6604 for (SDValue V : {V1, V2})
6605 if (SDValue Match = MatchShift(Shift, Scale, Left, V))
6612 /// \brief Lower a vector shuffle as a zero or any extension.
6614 /// Given a specific number of elements, element bit width, and extension
6615 /// stride, produce either a zero or any extension based on the available
6616 /// features of the subtarget.
6617 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
6618 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
6619 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
6620 assert(Scale > 1 && "Need a scale to extend.");
6621 int NumElements = VT.getVectorNumElements();
6622 int EltBits = VT.getScalarSizeInBits();
6623 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
6624 "Only 8, 16, and 32 bit elements can be extended.");
6625 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
6627 // Found a valid zext mask! Try various lowering strategies based on the
6628 // input type and available ISA extensions.
6629 if (Subtarget->hasSSE41()) {
6630 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
6631 NumElements / Scale);
6632 return DAG.getNode(ISD::BITCAST, DL, VT,
6633 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
6636 // For any extends we can cheat for larger element sizes and use shuffle
6637 // instructions that can fold with a load and/or copy.
6638 if (AnyExt && EltBits == 32) {
6639 int PSHUFDMask[4] = {0, -1, 1, -1};
6641 ISD::BITCAST, DL, VT,
6642 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
6643 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
6644 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
6646 if (AnyExt && EltBits == 16 && Scale > 2) {
6647 int PSHUFDMask[4] = {0, -1, 0, -1};
6648 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
6649 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
6650 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
6651 int PSHUFHWMask[4] = {1, -1, -1, -1};
6653 ISD::BITCAST, DL, VT,
6654 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
6655 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
6656 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
6659 // If this would require more than 2 unpack instructions to expand, use
6660 // pshufb when available. We can only use more than 2 unpack instructions
6661 // when zero extending i8 elements which also makes it easier to use pshufb.
6662 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
6663 assert(NumElements == 16 && "Unexpected byte vector width!");
6664 SDValue PSHUFBMask[16];
6665 for (int i = 0; i < 16; ++i)
6667 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
6668 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
6669 return DAG.getNode(ISD::BITCAST, DL, VT,
6670 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
6671 DAG.getNode(ISD::BUILD_VECTOR, DL,
6672 MVT::v16i8, PSHUFBMask)));
6675 // Otherwise emit a sequence of unpacks.
6677 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
6678 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
6679 : getZeroVector(InputVT, Subtarget, DAG, DL);
6680 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
6681 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
6685 } while (Scale > 1);
6686 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
6689 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
6691 /// This routine will try to do everything in its power to cleverly lower
6692 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
6693 /// check for the profitability of this lowering, it tries to aggressively
6694 /// match this pattern. It will use all of the micro-architectural details it
6695 /// can to emit an efficient lowering. It handles both blends with all-zero
6696 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
6697 /// masking out later).
6699 /// The reason we have dedicated lowering for zext-style shuffles is that they
6700 /// are both incredibly common and often quite performance sensitive.
6701 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
6702 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
6703 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
6704 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
6706 int Bits = VT.getSizeInBits();
6707 int NumElements = VT.getVectorNumElements();
6708 assert(VT.getScalarSizeInBits() <= 32 &&
6709 "Exceeds 32-bit integer zero extension limit");
6710 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
6712 // Define a helper function to check a particular ext-scale and lower to it if
6714 auto Lower = [&](int Scale) -> SDValue {
6717 for (int i = 0; i < NumElements; ++i) {
6719 continue; // Valid anywhere but doesn't tell us anything.
6720 if (i % Scale != 0) {
6721 // Each of the extended elements need to be zeroable.
6725 // We no longer are in the anyext case.
6730 // Each of the base elements needs to be consecutive indices into the
6731 // same input vector.
6732 SDValue V = Mask[i] < NumElements ? V1 : V2;
6735 else if (InputV != V)
6736 return SDValue(); // Flip-flopping inputs.
6738 if (Mask[i] % NumElements != i / Scale)
6739 return SDValue(); // Non-consecutive strided elements.
6742 // If we fail to find an input, we have a zero-shuffle which should always
6743 // have already been handled.
6744 // FIXME: Maybe handle this here in case during blending we end up with one?
6748 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
6749 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
6752 // The widest scale possible for extending is to a 64-bit integer.
6753 assert(Bits % 64 == 0 &&
6754 "The number of bits in a vector must be divisible by 64 on x86!");
6755 int NumExtElements = Bits / 64;
6757 // Each iteration, try extending the elements half as much, but into twice as
6759 for (; NumExtElements < NumElements; NumExtElements *= 2) {
6760 assert(NumElements % NumExtElements == 0 &&
6761 "The input vector size must be divisible by the extended size.");
6762 if (SDValue V = Lower(NumElements / NumExtElements))
6766 // General extends failed, but 128-bit vectors may be able to use MOVQ.
6770 // Returns one of the source operands if the shuffle can be reduced to a
6771 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
6772 auto CanZExtLowHalf = [&]() {
6773 for (int i = NumElements / 2; i != NumElements; ++i)
6776 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
6778 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
6783 if (SDValue V = CanZExtLowHalf()) {
6784 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
6785 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
6786 return DAG.getNode(ISD::BITCAST, DL, VT, V);
6789 // No viable ext lowering found.
6793 /// \brief Try to get a scalar value for a specific element of a vector.
6795 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
6796 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
6797 SelectionDAG &DAG) {
6798 MVT VT = V.getSimpleValueType();
6799 MVT EltVT = VT.getVectorElementType();
6800 while (V.getOpcode() == ISD::BITCAST)
6801 V = V.getOperand(0);
6802 // If the bitcasts shift the element size, we can't extract an equivalent
6804 MVT NewVT = V.getSimpleValueType();
6805 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
6808 if (V.getOpcode() == ISD::BUILD_VECTOR ||
6809 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
6810 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
6815 /// \brief Helper to test for a load that can be folded with x86 shuffles.
6817 /// This is particularly important because the set of instructions varies
6818 /// significantly based on whether the operand is a load or not.
6819 static bool isShuffleFoldableLoad(SDValue V) {
6820 while (V.getOpcode() == ISD::BITCAST)
6821 V = V.getOperand(0);
6823 return ISD::isNON_EXTLoad(V.getNode());
6826 /// \brief Try to lower insertion of a single element into a zero vector.
6828 /// This is a common pattern that we have especially efficient patterns to lower
6829 /// across all subtarget feature sets.
6830 static SDValue lowerVectorShuffleAsElementInsertion(
6831 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
6832 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
6833 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
6835 MVT EltVT = VT.getVectorElementType();
6837 int V2Index = std::find_if(Mask.begin(), Mask.end(),
6838 [&Mask](int M) { return M >= (int)Mask.size(); }) -
6840 bool IsV1Zeroable = true;
6841 for (int i = 0, Size = Mask.size(); i < Size; ++i)
6842 if (i != V2Index && !Zeroable[i]) {
6843 IsV1Zeroable = false;
6847 // Check for a single input from a SCALAR_TO_VECTOR node.
6848 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
6849 // all the smarts here sunk into that routine. However, the current
6850 // lowering of BUILD_VECTOR makes that nearly impossible until the old
6851 // vector shuffle lowering is dead.
6852 if (SDValue V2S = getScalarValueForVectorElement(
6853 V2, Mask[V2Index] - Mask.size(), DAG)) {
6854 // We need to zext the scalar if it is smaller than an i32.
6855 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
6856 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
6857 // Using zext to expand a narrow element won't work for non-zero
6862 // Zero-extend directly to i32.
6864 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
6866 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
6867 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
6868 EltVT == MVT::i16) {
6869 // Either not inserting from the low element of the input or the input
6870 // element size is too small to use VZEXT_MOVL to clear the high bits.
6874 if (!IsV1Zeroable) {
6875 // If V1 can't be treated as a zero vector we have fewer options to lower
6876 // this. We can't support integer vectors or non-zero targets cheaply, and
6877 // the V1 elements can't be permuted in any way.
6878 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
6879 if (!VT.isFloatingPoint() || V2Index != 0)
6881 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
6882 V1Mask[V2Index] = -1;
6883 if (!isNoopShuffleMask(V1Mask))
6885 // This is essentially a special case blend operation, but if we have
6886 // general purpose blend operations, they are always faster. Bail and let
6887 // the rest of the lowering handle these as blends.
6888 if (Subtarget->hasSSE41())
6891 // Otherwise, use MOVSD or MOVSS.
6892 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
6893 "Only two types of floating point element types to handle!");
6894 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
6898 // This lowering only works for the low element with floating point vectors.
6899 if (VT.isFloatingPoint() && V2Index != 0)
6902 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
6904 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
6907 // If we have 4 or fewer lanes we can cheaply shuffle the element into
6908 // the desired position. Otherwise it is more efficient to do a vector
6909 // shift left. We know that we can do a vector shift left because all
6910 // the inputs are zero.
6911 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
6912 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
6913 V2Shuffle[V2Index] = 0;
6914 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
6916 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
6918 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
6920 V2Index * EltVT.getSizeInBits()/8,
6921 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
6922 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
6928 /// \brief Try to lower broadcast of a single element.
6930 /// For convenience, this code also bundles all of the subtarget feature set
6931 /// filtering. While a little annoying to re-dispatch on type here, there isn't
6932 /// a convenient way to factor it out.
6933 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
6935 const X86Subtarget *Subtarget,
6936 SelectionDAG &DAG) {
6937 if (!Subtarget->hasAVX())
6939 if (VT.isInteger() && !Subtarget->hasAVX2())
6942 // Check that the mask is a broadcast.
6943 int BroadcastIdx = -1;
6945 if (M >= 0 && BroadcastIdx == -1)
6947 else if (M >= 0 && M != BroadcastIdx)
6950 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
6951 "a sorted mask where the broadcast "
6954 // Go up the chain of (vector) values to try and find a scalar load that
6955 // we can combine with the broadcast.
6957 switch (V.getOpcode()) {
6958 case ISD::CONCAT_VECTORS: {
6959 int OperandSize = Mask.size() / V.getNumOperands();
6960 V = V.getOperand(BroadcastIdx / OperandSize);
6961 BroadcastIdx %= OperandSize;
6965 case ISD::INSERT_SUBVECTOR: {
6966 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
6967 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
6971 int BeginIdx = (int)ConstantIdx->getZExtValue();
6973 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
6974 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
6975 BroadcastIdx -= BeginIdx;
6986 // Check if this is a broadcast of a scalar. We special case lowering
6987 // for scalars so that we can more effectively fold with loads.
6988 if (V.getOpcode() == ISD::BUILD_VECTOR ||
6989 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
6990 V = V.getOperand(BroadcastIdx);
6992 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
6994 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
6996 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
6997 // We can't broadcast from a vector register w/o AVX2, and we can only
6998 // broadcast from the zero-element of a vector register.
7002 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
7005 // Check for whether we can use INSERTPS to perform the shuffle. We only use
7006 // INSERTPS when the V1 elements are already in the correct locations
7007 // because otherwise we can just always use two SHUFPS instructions which
7008 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
7009 // perform INSERTPS if a single V1 element is out of place and all V2
7010 // elements are zeroable.
7011 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
7013 SelectionDAG &DAG) {
7014 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
7015 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
7016 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
7017 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
7019 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7022 int V1DstIndex = -1;
7023 int V2DstIndex = -1;
7024 bool V1UsedInPlace = false;
7026 for (int i = 0; i < 4; ++i) {
7027 // Synthesize a zero mask from the zeroable elements (includes undefs).
7033 // Flag if we use any V1 inputs in place.
7035 V1UsedInPlace = true;
7039 // We can only insert a single non-zeroable element.
7040 if (V1DstIndex != -1 || V2DstIndex != -1)
7044 // V1 input out of place for insertion.
7047 // V2 input for insertion.
7052 // Don't bother if we have no (non-zeroable) element for insertion.
7053 if (V1DstIndex == -1 && V2DstIndex == -1)
7056 // Determine element insertion src/dst indices. The src index is from the
7057 // start of the inserted vector, not the start of the concatenated vector.
7058 unsigned V2SrcIndex = 0;
7059 if (V1DstIndex != -1) {
7060 // If we have a V1 input out of place, we use V1 as the V2 element insertion
7061 // and don't use the original V2 at all.
7062 V2SrcIndex = Mask[V1DstIndex];
7063 V2DstIndex = V1DstIndex;
7066 V2SrcIndex = Mask[V2DstIndex] - 4;
7069 // If no V1 inputs are used in place, then the result is created only from
7070 // the zero mask and the V2 insertion - so remove V1 dependency.
7072 V1 = DAG.getUNDEF(MVT::v4f32);
7074 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
7075 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
7077 // Insert the V2 element into the desired position.
7079 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7080 DAG.getConstant(InsertPSMask, MVT::i8));
7083 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
7084 /// UNPCK instruction.
7086 /// This specifically targets cases where we end up with alternating between
7087 /// the two inputs, and so can permute them into something that feeds a single
7088 /// UNPCK instruction. Note that this routine only targets integer vectors
7089 /// because for floating point vectors we have a generalized SHUFPS lowering
7090 /// strategy that handles everything that doesn't *exactly* match an unpack,
7091 /// making this clever lowering unnecessary.
7092 static SDValue lowerVectorShuffleAsUnpack(MVT VT, SDLoc DL, SDValue V1,
7093 SDValue V2, ArrayRef<int> Mask,
7094 SelectionDAG &DAG) {
7095 assert(!VT.isFloatingPoint() &&
7096 "This routine only supports integer vectors.");
7097 assert(!isSingleInputShuffleMask(Mask) &&
7098 "This routine should only be used when blending two inputs.");
7099 assert(Mask.size() >= 2 && "Single element masks are invalid.");
7101 int Size = Mask.size();
7103 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
7104 return M >= 0 && M % Size < Size / 2;
7106 int NumHiInputs = std::count_if(
7107 Mask.begin(), Mask.end(), [Size](int M) { return M % Size >= Size / 2; });
7109 bool UnpackLo = NumLoInputs >= NumHiInputs;
7111 auto TryUnpack = [&](MVT UnpackVT, int Scale) {
7112 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7113 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7115 for (int i = 0; i < Size; ++i) {
7119 // Each element of the unpack contains Scale elements from this mask.
7120 int UnpackIdx = i / Scale;
7122 // We only handle the case where V1 feeds the first slots of the unpack.
7123 // We rely on canonicalization to ensure this is the case.
7124 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
7127 // Setup the mask for this input. The indexing is tricky as we have to
7128 // handle the unpack stride.
7129 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
7130 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
7134 // If we will have to shuffle both inputs to use the unpack, check whether
7135 // we can just unpack first and shuffle the result. If so, skip this unpack.
7136 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
7137 !isNoopShuffleMask(V2Mask))
7140 // Shuffle the inputs into place.
7141 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7142 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7144 // Cast the inputs to the type we will use to unpack them.
7145 V1 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V1);
7146 V2 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V2);
7148 // Unpack the inputs and cast the result back to the desired type.
7149 return DAG.getNode(ISD::BITCAST, DL, VT,
7150 DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
7151 DL, UnpackVT, V1, V2));
7154 // We try each unpack from the largest to the smallest to try and find one
7155 // that fits this mask.
7156 int OrigNumElements = VT.getVectorNumElements();
7157 int OrigScalarSize = VT.getScalarSizeInBits();
7158 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
7159 int Scale = ScalarSize / OrigScalarSize;
7160 int NumElements = OrigNumElements / Scale;
7161 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
7162 if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
7166 // If none of the unpack-rooted lowerings worked (or were profitable) try an
7168 if (NumLoInputs == 0 || NumHiInputs == 0) {
7169 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
7170 "We have to have *some* inputs!");
7171 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
7173 // FIXME: We could consider the total complexity of the permute of each
7174 // possible unpacking. Or at the least we should consider how many
7175 // half-crossings are created.
7176 // FIXME: We could consider commuting the unpacks.
7178 SmallVector<int, 32> PermMask;
7179 PermMask.assign(Size, -1);
7180 for (int i = 0; i < Size; ++i) {
7184 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
7187 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
7189 return DAG.getVectorShuffle(
7190 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
7192 DAG.getUNDEF(VT), PermMask);
7198 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
7200 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
7201 /// support for floating point shuffles but not integer shuffles. These
7202 /// instructions will incur a domain crossing penalty on some chips though so
7203 /// it is better to avoid lowering through this for integer vectors where
7205 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
7206 const X86Subtarget *Subtarget,
7207 SelectionDAG &DAG) {
7209 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
7210 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
7211 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
7212 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
7213 ArrayRef<int> Mask = SVOp->getMask();
7214 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
7216 if (isSingleInputShuffleMask(Mask)) {
7217 // Use low duplicate instructions for masks that match their pattern.
7218 if (Subtarget->hasSSE3())
7219 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
7220 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
7222 // Straight shuffle of a single input vector. Simulate this by using the
7223 // single input as both of the "inputs" to this instruction..
7224 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
7226 if (Subtarget->hasAVX()) {
7227 // If we have AVX, we can use VPERMILPS which will allow folding a load
7228 // into the shuffle.
7229 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
7230 DAG.getConstant(SHUFPDMask, MVT::i8));
7233 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
7234 DAG.getConstant(SHUFPDMask, MVT::i8));
7236 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
7237 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
7239 // If we have a single input, insert that into V1 if we can do so cheaply.
7240 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
7241 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
7242 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
7244 // Try inverting the insertion since for v2 masks it is easy to do and we
7245 // can't reliably sort the mask one way or the other.
7246 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
7247 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
7248 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
7249 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
7253 // Try to use one of the special instruction patterns to handle two common
7254 // blend patterns if a zero-blend above didn't work.
7255 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
7256 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
7257 // We can either use a special instruction to load over the low double or
7258 // to move just the low double.
7260 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
7262 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
7264 if (Subtarget->hasSSE41())
7265 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
7269 // Use dedicated unpack instructions for masks that match their pattern.
7270 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
7271 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
7272 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
7273 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
7275 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
7276 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
7277 DAG.getConstant(SHUFPDMask, MVT::i8));
7280 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
7282 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
7283 /// the integer unit to minimize domain crossing penalties. However, for blends
7284 /// it falls back to the floating point shuffle operation with appropriate bit
7286 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
7287 const X86Subtarget *Subtarget,
7288 SelectionDAG &DAG) {
7290 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
7291 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
7292 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
7293 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
7294 ArrayRef<int> Mask = SVOp->getMask();
7295 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
7297 if (isSingleInputShuffleMask(Mask)) {
7298 // Check for being able to broadcast a single element.
7299 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
7300 Mask, Subtarget, DAG))
7303 // Straight shuffle of a single input vector. For everything from SSE2
7304 // onward this has a single fast instruction with no scary immediates.
7305 // We have to map the mask as it is actually a v4i32 shuffle instruction.
7306 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
7307 int WidenedMask[4] = {
7308 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
7309 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
7311 ISD::BITCAST, DL, MVT::v2i64,
7312 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
7313 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
7315 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
7316 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
7317 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
7318 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
7320 // If we have a blend of two PACKUS operations an the blend aligns with the
7321 // low and half halves, we can just merge the PACKUS operations. This is
7322 // particularly important as it lets us merge shuffles that this routine itself
7324 auto GetPackNode = [](SDValue V) {
7325 while (V.getOpcode() == ISD::BITCAST)
7326 V = V.getOperand(0);
7328 return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
7330 if (SDValue V1Pack = GetPackNode(V1))
7331 if (SDValue V2Pack = GetPackNode(V2))
7332 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
7333 DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
7334 Mask[0] == 0 ? V1Pack.getOperand(0)
7335 : V1Pack.getOperand(1),
7336 Mask[1] == 2 ? V2Pack.getOperand(0)
7337 : V2Pack.getOperand(1)));
7339 // Try to use shift instructions.
7341 lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
7344 // When loading a scalar and then shuffling it into a vector we can often do
7345 // the insertion cheaply.
7346 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
7347 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
7349 // Try inverting the insertion since for v2 masks it is easy to do and we
7350 // can't reliably sort the mask one way or the other.
7351 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
7352 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
7353 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
7356 // We have different paths for blend lowering, but they all must use the
7357 // *exact* same predicate.
7358 bool IsBlendSupported = Subtarget->hasSSE41();
7359 if (IsBlendSupported)
7360 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
7364 // Use dedicated unpack instructions for masks that match their pattern.
7365 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
7366 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
7367 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
7368 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
7370 // Try to use byte rotation instructions.
7371 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
7372 if (Subtarget->hasSSSE3())
7373 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
7374 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
7377 // If we have direct support for blends, we should lower by decomposing into
7378 // a permute. That will be faster than the domain cross.
7379 if (IsBlendSupported)
7380 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
7383 // We implement this with SHUFPD which is pretty lame because it will likely
7384 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
7385 // However, all the alternatives are still more cycles and newer chips don't
7386 // have this problem. It would be really nice if x86 had better shuffles here.
7387 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
7388 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
7389 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
7390 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
7393 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
7395 /// This is used to disable more specialized lowerings when the shufps lowering
7396 /// will happen to be efficient.
7397 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
7398 // This routine only handles 128-bit shufps.
7399 assert(Mask.size() == 4 && "Unsupported mask size!");
7401 // To lower with a single SHUFPS we need to have the low half and high half
7402 // each requiring a single input.
7403 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
7405 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
7411 /// \brief Lower a vector shuffle using the SHUFPS instruction.
7413 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
7414 /// It makes no assumptions about whether this is the *best* lowering, it simply
7416 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
7417 ArrayRef<int> Mask, SDValue V1,
7418 SDValue V2, SelectionDAG &DAG) {
7419 SDValue LowV = V1, HighV = V2;
7420 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
7423 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
7425 if (NumV2Elements == 1) {
7427 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
7430 // Compute the index adjacent to V2Index and in the same half by toggling
7432 int V2AdjIndex = V2Index ^ 1;
7434 if (Mask[V2AdjIndex] == -1) {
7435 // Handles all the cases where we have a single V2 element and an undef.
7436 // This will only ever happen in the high lanes because we commute the
7437 // vector otherwise.
7439 std::swap(LowV, HighV);
7440 NewMask[V2Index] -= 4;
7442 // Handle the case where the V2 element ends up adjacent to a V1 element.
7443 // To make this work, blend them together as the first step.
7444 int V1Index = V2AdjIndex;
7445 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
7446 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
7447 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
7449 // Now proceed to reconstruct the final blend as we have the necessary
7450 // high or low half formed.
7457 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
7458 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
7460 } else if (NumV2Elements == 2) {
7461 if (Mask[0] < 4 && Mask[1] < 4) {
7462 // Handle the easy case where we have V1 in the low lanes and V2 in the
7466 } else if (Mask[2] < 4 && Mask[3] < 4) {
7467 // We also handle the reversed case because this utility may get called
7468 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
7469 // arrange things in the right direction.
7475 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
7476 // trying to place elements directly, just blend them and set up the final
7477 // shuffle to place them.
7479 // The first two blend mask elements are for V1, the second two are for
7481 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
7482 Mask[2] < 4 ? Mask[2] : Mask[3],
7483 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
7484 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
7485 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
7486 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
7488 // Now we do a normal shuffle of V1 by giving V1 as both operands to
7491 NewMask[0] = Mask[0] < 4 ? 0 : 2;
7492 NewMask[1] = Mask[0] < 4 ? 2 : 0;
7493 NewMask[2] = Mask[2] < 4 ? 1 : 3;
7494 NewMask[3] = Mask[2] < 4 ? 3 : 1;
7497 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
7498 getV4X86ShuffleImm8ForMask(NewMask, DAG));
7501 /// \brief Lower 4-lane 32-bit floating point shuffles.
7503 /// Uses instructions exclusively from the floating point unit to minimize
7504 /// domain crossing penalties, as these are sufficient to implement all v4f32
7506 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
7507 const X86Subtarget *Subtarget,
7508 SelectionDAG &DAG) {
7510 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
7511 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
7512 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
7513 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
7514 ArrayRef<int> Mask = SVOp->getMask();
7515 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
7518 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
7520 if (NumV2Elements == 0) {
7521 // Check for being able to broadcast a single element.
7522 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
7523 Mask, Subtarget, DAG))
7526 // Use even/odd duplicate instructions for masks that match their pattern.
7527 if (Subtarget->hasSSE3()) {
7528 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
7529 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
7530 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
7531 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
7534 if (Subtarget->hasAVX()) {
7535 // If we have AVX, we can use VPERMILPS which will allow folding a load
7536 // into the shuffle.
7537 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
7538 getV4X86ShuffleImm8ForMask(Mask, DAG));
7541 // Otherwise, use a straight shuffle of a single input vector. We pass the
7542 // input vector to both operands to simulate this with a SHUFPS.
7543 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
7544 getV4X86ShuffleImm8ForMask(Mask, DAG));
7547 // There are special ways we can lower some single-element blends. However, we
7548 // have custom ways we can lower more complex single-element blends below that
7549 // we defer to if both this and BLENDPS fail to match, so restrict this to
7550 // when the V2 input is targeting element 0 of the mask -- that is the fast
7552 if (NumV2Elements == 1 && Mask[0] >= 4)
7553 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
7554 Mask, Subtarget, DAG))
7557 if (Subtarget->hasSSE41()) {
7558 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
7562 // Use INSERTPS if we can complete the shuffle efficiently.
7563 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
7566 if (!isSingleSHUFPSMask(Mask))
7567 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
7568 DL, MVT::v4f32, V1, V2, Mask, DAG))
7572 // Use dedicated unpack instructions for masks that match their pattern.
7573 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
7574 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
7575 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
7576 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
7577 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
7578 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V2, V1);
7579 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
7580 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V2, V1);
7582 // Otherwise fall back to a SHUFPS lowering strategy.
7583 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
7586 /// \brief Lower 4-lane i32 vector shuffles.
7588 /// We try to handle these with integer-domain shuffles where we can, but for
7589 /// blends we use the floating point domain blend instructions.
7590 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
7591 const X86Subtarget *Subtarget,
7592 SelectionDAG &DAG) {
7594 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
7595 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
7596 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
7597 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
7598 ArrayRef<int> Mask = SVOp->getMask();
7599 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
7601 // Whenever we can lower this as a zext, that instruction is strictly faster
7602 // than any alternative. It also allows us to fold memory operands into the
7603 // shuffle in many cases.
7604 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
7605 Mask, Subtarget, DAG))
7609 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
7611 if (NumV2Elements == 0) {
7612 // Check for being able to broadcast a single element.
7613 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
7614 Mask, Subtarget, DAG))
7617 // Straight shuffle of a single input vector. For everything from SSE2
7618 // onward this has a single fast instruction with no scary immediates.
7619 // We coerce the shuffle pattern to be compatible with UNPCK instructions
7620 // but we aren't actually going to use the UNPCK instruction because doing
7621 // so prevents folding a load into this instruction or making a copy.
7622 const int UnpackLoMask[] = {0, 0, 1, 1};
7623 const int UnpackHiMask[] = {2, 2, 3, 3};
7624 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
7625 Mask = UnpackLoMask;
7626 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
7627 Mask = UnpackHiMask;
7629 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
7630 getV4X86ShuffleImm8ForMask(Mask, DAG));
7633 // Try to use shift instructions.
7635 lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
7638 // There are special ways we can lower some single-element blends.
7639 if (NumV2Elements == 1)
7640 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
7641 Mask, Subtarget, DAG))
7644 // We have different paths for blend lowering, but they all must use the
7645 // *exact* same predicate.
7646 bool IsBlendSupported = Subtarget->hasSSE41();
7647 if (IsBlendSupported)
7648 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
7652 if (SDValue Masked =
7653 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
7656 // Use dedicated unpack instructions for masks that match their pattern.
7657 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
7658 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
7659 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
7660 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
7661 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
7662 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V2, V1);
7663 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
7664 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V2, V1);
7666 // Try to use byte rotation instructions.
7667 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
7668 if (Subtarget->hasSSSE3())
7669 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
7670 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
7673 // If we have direct support for blends, we should lower by decomposing into
7674 // a permute. That will be faster than the domain cross.
7675 if (IsBlendSupported)
7676 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
7679 // Try to lower by permuting the inputs into an unpack instruction.
7680 if (SDValue Unpack =
7681 lowerVectorShuffleAsUnpack(MVT::v4i32, DL, V1, V2, Mask, DAG))
7684 // We implement this with SHUFPS because it can blend from two vectors.
7685 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
7686 // up the inputs, bypassing domain shift penalties that we would encur if we
7687 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
7689 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
7690 DAG.getVectorShuffle(
7692 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
7693 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
7696 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
7697 /// shuffle lowering, and the most complex part.
7699 /// The lowering strategy is to try to form pairs of input lanes which are
7700 /// targeted at the same half of the final vector, and then use a dword shuffle
7701 /// to place them onto the right half, and finally unpack the paired lanes into
7702 /// their final position.
7704 /// The exact breakdown of how to form these dword pairs and align them on the
7705 /// correct sides is really tricky. See the comments within the function for
7706 /// more of the details.
7707 static SDValue lowerV8I16SingleInputVectorShuffle(
7708 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
7709 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7710 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
7711 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
7712 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
7714 SmallVector<int, 4> LoInputs;
7715 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
7716 [](int M) { return M >= 0; });
7717 std::sort(LoInputs.begin(), LoInputs.end());
7718 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
7719 SmallVector<int, 4> HiInputs;
7720 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
7721 [](int M) { return M >= 0; });
7722 std::sort(HiInputs.begin(), HiInputs.end());
7723 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
7725 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
7726 int NumHToL = LoInputs.size() - NumLToL;
7728 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
7729 int NumHToH = HiInputs.size() - NumLToH;
7730 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
7731 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
7732 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
7733 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
7735 // Check for being able to broadcast a single element.
7736 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
7737 Mask, Subtarget, DAG))
7740 // Try to use shift instructions.
7742 lowerVectorShuffleAsShift(DL, MVT::v8i16, V, V, Mask, DAG))
7745 // Use dedicated unpack instructions for masks that match their pattern.
7746 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
7747 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
7748 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
7749 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
7751 // Try to use byte rotation instructions.
7752 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
7753 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
7756 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
7757 // such inputs we can swap two of the dwords across the half mark and end up
7758 // with <=2 inputs to each half in each half. Once there, we can fall through
7759 // to the generic code below. For example:
7761 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
7762 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
7764 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
7765 // and an existing 2-into-2 on the other half. In this case we may have to
7766 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
7767 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
7768 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
7769 // because any other situation (including a 3-into-1 or 1-into-3 in the other
7770 // half than the one we target for fixing) will be fixed when we re-enter this
7771 // path. We will also combine away any sequence of PSHUFD instructions that
7772 // result into a single instruction. Here is an example of the tricky case:
7774 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
7775 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
7777 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
7779 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
7780 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
7782 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
7783 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
7785 // The result is fine to be handled by the generic logic.
7786 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
7787 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
7788 int AOffset, int BOffset) {
7789 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
7790 "Must call this with A having 3 or 1 inputs from the A half.");
7791 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
7792 "Must call this with B having 1 or 3 inputs from the B half.");
7793 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
7794 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
7796 // Compute the index of dword with only one word among the three inputs in
7797 // a half by taking the sum of the half with three inputs and subtracting
7798 // the sum of the actual three inputs. The difference is the remaining
7801 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
7802 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
7803 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
7804 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
7805 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
7806 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
7807 int TripleNonInputIdx =
7808 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
7809 TripleDWord = TripleNonInputIdx / 2;
7811 // We use xor with one to compute the adjacent DWord to whichever one the
7813 OneInputDWord = (OneInput / 2) ^ 1;
7815 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
7816 // and BToA inputs. If there is also such a problem with the BToB and AToB
7817 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
7818 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
7819 // is essential that we don't *create* a 3<-1 as then we might oscillate.
7820 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
7821 // Compute how many inputs will be flipped by swapping these DWords. We
7823 // to balance this to ensure we don't form a 3-1 shuffle in the other
7825 int NumFlippedAToBInputs =
7826 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
7827 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
7828 int NumFlippedBToBInputs =
7829 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
7830 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
7831 if ((NumFlippedAToBInputs == 1 &&
7832 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
7833 (NumFlippedBToBInputs == 1 &&
7834 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
7835 // We choose whether to fix the A half or B half based on whether that
7836 // half has zero flipped inputs. At zero, we may not be able to fix it
7837 // with that half. We also bias towards fixing the B half because that
7838 // will more commonly be the high half, and we have to bias one way.
7839 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
7840 ArrayRef<int> Inputs) {
7841 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
7842 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
7843 PinnedIdx ^ 1) != Inputs.end();
7844 // Determine whether the free index is in the flipped dword or the
7845 // unflipped dword based on where the pinned index is. We use this bit
7846 // in an xor to conditionally select the adjacent dword.
7847 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
7848 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
7849 FixFreeIdx) != Inputs.end();
7850 if (IsFixIdxInput == IsFixFreeIdxInput)
7852 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
7853 FixFreeIdx) != Inputs.end();
7854 assert(IsFixIdxInput != IsFixFreeIdxInput &&
7855 "We need to be changing the number of flipped inputs!");
7856 int PSHUFHalfMask[] = {0, 1, 2, 3};
7857 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
7858 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
7860 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
7863 if (M != -1 && M == FixIdx)
7865 else if (M != -1 && M == FixFreeIdx)
7868 if (NumFlippedBToBInputs != 0) {
7870 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
7871 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
7873 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
7875 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
7876 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
7881 int PSHUFDMask[] = {0, 1, 2, 3};
7882 PSHUFDMask[ADWord] = BDWord;
7883 PSHUFDMask[BDWord] = ADWord;
7884 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
7885 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7886 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
7887 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7889 // Adjust the mask to match the new locations of A and B.
7891 if (M != -1 && M/2 == ADWord)
7892 M = 2 * BDWord + M % 2;
7893 else if (M != -1 && M/2 == BDWord)
7894 M = 2 * ADWord + M % 2;
7896 // Recurse back into this routine to re-compute state now that this isn't
7897 // a 3 and 1 problem.
7898 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
7901 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
7902 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
7903 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
7904 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
7906 // At this point there are at most two inputs to the low and high halves from
7907 // each half. That means the inputs can always be grouped into dwords and
7908 // those dwords can then be moved to the correct half with a dword shuffle.
7909 // We use at most one low and one high word shuffle to collect these paired
7910 // inputs into dwords, and finally a dword shuffle to place them.
7911 int PSHUFLMask[4] = {-1, -1, -1, -1};
7912 int PSHUFHMask[4] = {-1, -1, -1, -1};
7913 int PSHUFDMask[4] = {-1, -1, -1, -1};
7915 // First fix the masks for all the inputs that are staying in their
7916 // original halves. This will then dictate the targets of the cross-half
7918 auto fixInPlaceInputs =
7919 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
7920 MutableArrayRef<int> SourceHalfMask,
7921 MutableArrayRef<int> HalfMask, int HalfOffset) {
7922 if (InPlaceInputs.empty())
7924 if (InPlaceInputs.size() == 1) {
7925 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
7926 InPlaceInputs[0] - HalfOffset;
7927 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
7930 if (IncomingInputs.empty()) {
7931 // Just fix all of the in place inputs.
7932 for (int Input : InPlaceInputs) {
7933 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
7934 PSHUFDMask[Input / 2] = Input / 2;
7939 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
7940 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
7941 InPlaceInputs[0] - HalfOffset;
7942 // Put the second input next to the first so that they are packed into
7943 // a dword. We find the adjacent index by toggling the low bit.
7944 int AdjIndex = InPlaceInputs[0] ^ 1;
7945 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
7946 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
7947 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
7949 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
7950 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
7952 // Now gather the cross-half inputs and place them into a free dword of
7953 // their target half.
7954 // FIXME: This operation could almost certainly be simplified dramatically to
7955 // look more like the 3-1 fixing operation.
7956 auto moveInputsToRightHalf = [&PSHUFDMask](
7957 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
7958 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
7959 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
7961 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
7962 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
7964 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
7966 int LowWord = Word & ~1;
7967 int HighWord = Word | 1;
7968 return isWordClobbered(SourceHalfMask, LowWord) ||
7969 isWordClobbered(SourceHalfMask, HighWord);
7972 if (IncomingInputs.empty())
7975 if (ExistingInputs.empty()) {
7976 // Map any dwords with inputs from them into the right half.
7977 for (int Input : IncomingInputs) {
7978 // If the source half mask maps over the inputs, turn those into
7979 // swaps and use the swapped lane.
7980 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
7981 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
7982 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
7983 Input - SourceOffset;
7984 // We have to swap the uses in our half mask in one sweep.
7985 for (int &M : HalfMask)
7986 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
7988 else if (M == Input)
7989 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
7991 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
7992 Input - SourceOffset &&
7993 "Previous placement doesn't match!");
7995 // Note that this correctly re-maps both when we do a swap and when
7996 // we observe the other side of the swap above. We rely on that to
7997 // avoid swapping the members of the input list directly.
7998 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
8001 // Map the input's dword into the correct half.
8002 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
8003 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
8005 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
8007 "Previous placement doesn't match!");
8010 // And just directly shift any other-half mask elements to be same-half
8011 // as we will have mirrored the dword containing the element into the
8012 // same position within that half.
8013 for (int &M : HalfMask)
8014 if (M >= SourceOffset && M < SourceOffset + 4) {
8015 M = M - SourceOffset + DestOffset;
8016 assert(M >= 0 && "This should never wrap below zero!");
8021 // Ensure we have the input in a viable dword of its current half. This
8022 // is particularly tricky because the original position may be clobbered
8023 // by inputs being moved and *staying* in that half.
8024 if (IncomingInputs.size() == 1) {
8025 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
8026 int InputFixed = std::find(std::begin(SourceHalfMask),
8027 std::end(SourceHalfMask), -1) -
8028 std::begin(SourceHalfMask) + SourceOffset;
8029 SourceHalfMask[InputFixed - SourceOffset] =
8030 IncomingInputs[0] - SourceOffset;
8031 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
8033 IncomingInputs[0] = InputFixed;
8035 } else if (IncomingInputs.size() == 2) {
8036 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
8037 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
8038 // We have two non-adjacent or clobbered inputs we need to extract from
8039 // the source half. To do this, we need to map them into some adjacent
8040 // dword slot in the source mask.
8041 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
8042 IncomingInputs[1] - SourceOffset};
8044 // If there is a free slot in the source half mask adjacent to one of
8045 // the inputs, place the other input in it. We use (Index XOR 1) to
8046 // compute an adjacent index.
8047 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
8048 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
8049 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
8050 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
8051 InputsFixed[1] = InputsFixed[0] ^ 1;
8052 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
8053 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
8054 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
8055 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
8056 InputsFixed[0] = InputsFixed[1] ^ 1;
8057 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
8058 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
8059 // The two inputs are in the same DWord but it is clobbered and the
8060 // adjacent DWord isn't used at all. Move both inputs to the free
8062 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
8063 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
8064 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
8065 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
8067 // The only way we hit this point is if there is no clobbering
8068 // (because there are no off-half inputs to this half) and there is no
8069 // free slot adjacent to one of the inputs. In this case, we have to
8070 // swap an input with a non-input.
8071 for (int i = 0; i < 4; ++i)
8072 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
8073 "We can't handle any clobbers here!");
8074 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
8075 "Cannot have adjacent inputs here!");
8077 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
8078 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
8080 // We also have to update the final source mask in this case because
8081 // it may need to undo the above swap.
8082 for (int &M : FinalSourceHalfMask)
8083 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
8084 M = InputsFixed[1] + SourceOffset;
8085 else if (M == InputsFixed[1] + SourceOffset)
8086 M = (InputsFixed[0] ^ 1) + SourceOffset;
8088 InputsFixed[1] = InputsFixed[0] ^ 1;
8091 // Point everything at the fixed inputs.
8092 for (int &M : HalfMask)
8093 if (M == IncomingInputs[0])
8094 M = InputsFixed[0] + SourceOffset;
8095 else if (M == IncomingInputs[1])
8096 M = InputsFixed[1] + SourceOffset;
8098 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
8099 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
8102 llvm_unreachable("Unhandled input size!");
8105 // Now hoist the DWord down to the right half.
8106 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
8107 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
8108 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
8109 for (int &M : HalfMask)
8110 for (int Input : IncomingInputs)
8112 M = FreeDWord * 2 + Input % 2;
8114 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
8115 /*SourceOffset*/ 4, /*DestOffset*/ 0);
8116 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
8117 /*SourceOffset*/ 0, /*DestOffset*/ 4);
8119 // Now enact all the shuffles we've computed to move the inputs into their
8121 if (!isNoopShuffleMask(PSHUFLMask))
8122 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
8123 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
8124 if (!isNoopShuffleMask(PSHUFHMask))
8125 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
8126 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
8127 if (!isNoopShuffleMask(PSHUFDMask))
8128 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8129 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8130 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
8131 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8133 // At this point, each half should contain all its inputs, and we can then
8134 // just shuffle them into their final position.
8135 assert(std::count_if(LoMask.begin(), LoMask.end(),
8136 [](int M) { return M >= 4; }) == 0 &&
8137 "Failed to lift all the high half inputs to the low mask!");
8138 assert(std::count_if(HiMask.begin(), HiMask.end(),
8139 [](int M) { return M >= 0 && M < 4; }) == 0 &&
8140 "Failed to lift all the low half inputs to the high mask!");
8142 // Do a half shuffle for the low mask.
8143 if (!isNoopShuffleMask(LoMask))
8144 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
8145 getV4X86ShuffleImm8ForMask(LoMask, DAG));
8147 // Do a half shuffle with the high mask after shifting its values down.
8148 for (int &M : HiMask)
8151 if (!isNoopShuffleMask(HiMask))
8152 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
8153 getV4X86ShuffleImm8ForMask(HiMask, DAG));
8158 /// \brief Helper to form a PSHUFB-based shuffle+blend.
8159 static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
8160 SDValue V2, ArrayRef<int> Mask,
8161 SelectionDAG &DAG, bool &V1InUse,
8163 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8169 int Size = Mask.size();
8170 int Scale = 16 / Size;
8171 for (int i = 0; i < 16; ++i) {
8172 if (Mask[i / Scale] == -1) {
8173 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
8175 const int ZeroMask = 0x80;
8176 int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
8178 int V2Idx = Mask[i / Scale] < Size
8180 : (Mask[i / Scale] - Size) * Scale + i % Scale;
8181 if (Zeroable[i / Scale])
8182 V1Idx = V2Idx = ZeroMask;
8183 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
8184 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
8185 V1InUse |= (ZeroMask != V1Idx);
8186 V2InUse |= (ZeroMask != V2Idx);
8191 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
8192 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V1),
8193 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
8195 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
8196 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V2),
8197 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
8199 // If we need shuffled inputs from both, blend the two.
8201 if (V1InUse && V2InUse)
8202 V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
8204 V = V1InUse ? V1 : V2;
8206 // Cast the result back to the correct type.
8207 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8210 /// \brief Generic lowering of 8-lane i16 shuffles.
8212 /// This handles both single-input shuffles and combined shuffle/blends with
8213 /// two inputs. The single input shuffles are immediately delegated to
8214 /// a dedicated lowering routine.
8216 /// The blends are lowered in one of three fundamental ways. If there are few
8217 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
8218 /// of the input is significantly cheaper when lowered as an interleaving of
8219 /// the two inputs, try to interleave them. Otherwise, blend the low and high
8220 /// halves of the inputs separately (making them have relatively few inputs)
8221 /// and then concatenate them.
8222 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8223 const X86Subtarget *Subtarget,
8224 SelectionDAG &DAG) {
8226 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
8227 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
8228 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
8229 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8230 ArrayRef<int> OrigMask = SVOp->getMask();
8231 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
8232 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
8233 MutableArrayRef<int> Mask(MaskStorage);
8235 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
8237 // Whenever we can lower this as a zext, that instruction is strictly faster
8238 // than any alternative.
8239 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
8240 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
8243 auto isV1 = [](int M) { return M >= 0 && M < 8; };
8244 auto isV2 = [](int M) { return M >= 8; };
8246 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
8248 if (NumV2Inputs == 0)
8249 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
8251 assert(std::any_of(Mask.begin(), Mask.end(), isV1) &&
8252 "All single-input shuffles should be canonicalized to be V1-input "
8255 // Try to use shift instructions.
8257 lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
8260 // There are special ways we can lower some single-element blends.
8261 if (NumV2Inputs == 1)
8262 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
8263 Mask, Subtarget, DAG))
8266 // We have different paths for blend lowering, but they all must use the
8267 // *exact* same predicate.
8268 bool IsBlendSupported = Subtarget->hasSSE41();
8269 if (IsBlendSupported)
8270 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
8274 if (SDValue Masked =
8275 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
8278 // Use dedicated unpack instructions for masks that match their pattern.
8279 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
8280 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
8281 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
8282 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
8284 // Try to use byte rotation instructions.
8285 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8286 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
8289 if (SDValue BitBlend =
8290 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
8293 if (SDValue Unpack =
8294 lowerVectorShuffleAsUnpack(MVT::v8i16, DL, V1, V2, Mask, DAG))
8297 // If we can't directly blend but can use PSHUFB, that will be better as it
8298 // can both shuffle and set up the inefficient blend.
8299 if (!IsBlendSupported && Subtarget->hasSSSE3()) {
8300 bool V1InUse, V2InUse;
8301 return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
8305 // We can always bit-blend if we have to so the fallback strategy is to
8306 // decompose into single-input permutes and blends.
8307 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
8311 /// \brief Check whether a compaction lowering can be done by dropping even
8312 /// elements and compute how many times even elements must be dropped.
8314 /// This handles shuffles which take every Nth element where N is a power of
8315 /// two. Example shuffle masks:
8317 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
8318 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
8319 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
8320 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
8321 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
8322 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
8324 /// Any of these lanes can of course be undef.
8326 /// This routine only supports N <= 3.
8327 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
8330 /// \returns N above, or the number of times even elements must be dropped if
8331 /// there is such a number. Otherwise returns zero.
8332 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
8333 // Figure out whether we're looping over two inputs or just one.
8334 bool IsSingleInput = isSingleInputShuffleMask(Mask);
8336 // The modulus for the shuffle vector entries is based on whether this is
8337 // a single input or not.
8338 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
8339 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
8340 "We should only be called with masks with a power-of-2 size!");
8342 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
8344 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
8345 // and 2^3 simultaneously. This is because we may have ambiguity with
8346 // partially undef inputs.
8347 bool ViableForN[3] = {true, true, true};
8349 for (int i = 0, e = Mask.size(); i < e; ++i) {
8350 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
8355 bool IsAnyViable = false;
8356 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
8357 if (ViableForN[j]) {
8360 // The shuffle mask must be equal to (i * 2^N) % M.
8361 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
8364 ViableForN[j] = false;
8366 // Early exit if we exhaust the possible powers of two.
8371 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
8375 // Return 0 as there is no viable power of two.
8379 /// \brief Generic lowering of v16i8 shuffles.
8381 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
8382 /// detect any complexity reducing interleaving. If that doesn't help, it uses
8383 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
8384 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
8386 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8387 const X86Subtarget *Subtarget,
8388 SelectionDAG &DAG) {
8390 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
8391 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
8392 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
8393 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8394 ArrayRef<int> Mask = SVOp->getMask();
8395 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
8397 // Try to use shift instructions.
8399 lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
8402 // Try to use byte rotation instructions.
8403 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8404 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
8407 // Try to use a zext lowering.
8408 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
8409 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
8413 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
8415 // For single-input shuffles, there are some nicer lowering tricks we can use.
8416 if (NumV2Elements == 0) {
8417 // Check for being able to broadcast a single element.
8418 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
8419 Mask, Subtarget, DAG))
8422 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
8423 // Notably, this handles splat and partial-splat shuffles more efficiently.
8424 // However, it only makes sense if the pre-duplication shuffle simplifies
8425 // things significantly. Currently, this means we need to be able to
8426 // express the pre-duplication shuffle as an i16 shuffle.
8428 // FIXME: We should check for other patterns which can be widened into an
8429 // i16 shuffle as well.
8430 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
8431 for (int i = 0; i < 16; i += 2)
8432 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
8437 auto tryToWidenViaDuplication = [&]() -> SDValue {
8438 if (!canWidenViaDuplication(Mask))
8440 SmallVector<int, 4> LoInputs;
8441 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
8442 [](int M) { return M >= 0 && M < 8; });
8443 std::sort(LoInputs.begin(), LoInputs.end());
8444 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
8446 SmallVector<int, 4> HiInputs;
8447 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
8448 [](int M) { return M >= 8; });
8449 std::sort(HiInputs.begin(), HiInputs.end());
8450 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
8453 bool TargetLo = LoInputs.size() >= HiInputs.size();
8454 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
8455 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
8457 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
8458 SmallDenseMap<int, int, 8> LaneMap;
8459 for (int I : InPlaceInputs) {
8460 PreDupI16Shuffle[I/2] = I/2;
8463 int j = TargetLo ? 0 : 4, je = j + 4;
8464 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
8465 // Check if j is already a shuffle of this input. This happens when
8466 // there are two adjacent bytes after we move the low one.
8467 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
8468 // If we haven't yet mapped the input, search for a slot into which
8470 while (j < je && PreDupI16Shuffle[j] != -1)
8474 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
8477 // Map this input with the i16 shuffle.
8478 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
8481 // Update the lane map based on the mapping we ended up with.
8482 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
8485 ISD::BITCAST, DL, MVT::v16i8,
8486 DAG.getVectorShuffle(MVT::v8i16, DL,
8487 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
8488 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
8490 // Unpack the bytes to form the i16s that will be shuffled into place.
8491 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
8492 MVT::v16i8, V1, V1);
8494 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
8495 for (int i = 0; i < 16; ++i)
8496 if (Mask[i] != -1) {
8497 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
8498 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
8499 if (PostDupI16Shuffle[i / 2] == -1)
8500 PostDupI16Shuffle[i / 2] = MappedMask;
8502 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
8503 "Conflicting entrties in the original shuffle!");
8506 ISD::BITCAST, DL, MVT::v16i8,
8507 DAG.getVectorShuffle(MVT::v8i16, DL,
8508 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
8509 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
8511 if (SDValue V = tryToWidenViaDuplication())
8515 // Use dedicated unpack instructions for masks that match their pattern.
8516 if (isShuffleEquivalent(V1, V2, Mask,
8517 0, 16, 1, 17, 2, 18, 3, 19,
8518 4, 20, 5, 21, 6, 22, 7, 23))
8519 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V1, V2);
8520 if (isShuffleEquivalent(V1, V2, Mask,
8521 8, 24, 9, 25, 10, 26, 11, 27,
8522 12, 28, 13, 29, 14, 30, 15, 31))
8523 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V1, V2);
8525 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
8526 // with PSHUFB. It is important to do this before we attempt to generate any
8527 // blends but after all of the single-input lowerings. If the single input
8528 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
8529 // want to preserve that and we can DAG combine any longer sequences into
8530 // a PSHUFB in the end. But once we start blending from multiple inputs,
8531 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
8532 // and there are *very* few patterns that would actually be faster than the
8533 // PSHUFB approach because of its ability to zero lanes.
8535 // FIXME: The only exceptions to the above are blends which are exact
8536 // interleavings with direct instructions supporting them. We currently don't
8537 // handle those well here.
8538 if (Subtarget->hasSSSE3()) {
8539 bool V1InUse = false;
8540 bool V2InUse = false;
8542 SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
8543 DAG, V1InUse, V2InUse);
8545 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
8546 // do so. This avoids using them to handle blends-with-zero which is
8547 // important as a single pshufb is significantly faster for that.
8548 if (V1InUse && V2InUse) {
8549 if (Subtarget->hasSSE41())
8550 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
8551 Mask, Subtarget, DAG))
8554 // We can use an unpack to do the blending rather than an or in some
8555 // cases. Even though the or may be (very minorly) more efficient, we
8556 // preference this lowering because there are common cases where part of
8557 // the complexity of the shuffles goes away when we do the final blend as
8559 // FIXME: It might be worth trying to detect if the unpack-feeding
8560 // shuffles will both be pshufb, in which case we shouldn't bother with
8562 if (SDValue Unpack =
8563 lowerVectorShuffleAsUnpack(MVT::v16i8, DL, V1, V2, Mask, DAG))
8570 // There are special ways we can lower some single-element blends.
8571 if (NumV2Elements == 1)
8572 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
8573 Mask, Subtarget, DAG))
8576 if (SDValue BitBlend =
8577 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
8580 // Check whether a compaction lowering can be done. This handles shuffles
8581 // which take every Nth element for some even N. See the helper function for
8584 // We special case these as they can be particularly efficiently handled with
8585 // the PACKUSB instruction on x86 and they show up in common patterns of
8586 // rearranging bytes to truncate wide elements.
8587 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
8588 // NumEvenDrops is the power of two stride of the elements. Another way of
8589 // thinking about it is that we need to drop the even elements this many
8590 // times to get the original input.
8591 bool IsSingleInput = isSingleInputShuffleMask(Mask);
8593 // First we need to zero all the dropped bytes.
8594 assert(NumEvenDrops <= 3 &&
8595 "No support for dropping even elements more than 3 times.");
8596 // We use the mask type to pick which bytes are preserved based on how many
8597 // elements are dropped.
8598 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
8599 SDValue ByteClearMask =
8600 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
8601 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
8602 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
8604 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
8606 // Now pack things back together.
8607 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
8608 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
8609 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
8610 for (int i = 1; i < NumEvenDrops; ++i) {
8611 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
8612 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
8618 // Handle multi-input cases by blending single-input shuffles.
8619 if (NumV2Elements > 0)
8620 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
8623 // The fallback path for single-input shuffles widens this into two v8i16
8624 // vectors with unpacks, shuffles those, and then pulls them back together
8628 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
8629 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
8630 for (int i = 0; i < 16; ++i)
8632 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
8634 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
8636 SDValue VLoHalf, VHiHalf;
8637 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
8638 // them out and avoid using UNPCK{L,H} to extract the elements of V as
8640 if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
8641 [](int M) { return M >= 0 && M % 2 == 1; }) &&
8642 std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
8643 [](int M) { return M >= 0 && M % 2 == 1; })) {
8644 // Use a mask to drop the high bytes.
8645 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
8646 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
8647 DAG.getConstant(0x00FF, MVT::v8i16));
8649 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
8650 VHiHalf = DAG.getUNDEF(MVT::v8i16);
8652 // Squash the masks to point directly into VLoHalf.
8653 for (int &M : LoBlendMask)
8656 for (int &M : HiBlendMask)
8660 // Otherwise just unpack the low half of V into VLoHalf and the high half into
8661 // VHiHalf so that we can blend them as i16s.
8662 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8663 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
8664 VHiHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8665 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
8668 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
8669 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
8671 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
8674 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
8676 /// This routine breaks down the specific type of 128-bit shuffle and
8677 /// dispatches to the lowering routines accordingly.
8678 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8679 MVT VT, const X86Subtarget *Subtarget,
8680 SelectionDAG &DAG) {
8681 switch (VT.SimpleTy) {
8683 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
8685 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
8687 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
8689 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
8691 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
8693 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
8696 llvm_unreachable("Unimplemented!");
8700 /// \brief Helper function to test whether a shuffle mask could be
8701 /// simplified by widening the elements being shuffled.
8703 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
8704 /// leaves it in an unspecified state.
8706 /// NOTE: This must handle normal vector shuffle masks and *target* vector
8707 /// shuffle masks. The latter have the special property of a '-2' representing
8708 /// a zero-ed lane of a vector.
8709 static bool canWidenShuffleElements(ArrayRef<int> Mask,
8710 SmallVectorImpl<int> &WidenedMask) {
8711 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
8712 // If both elements are undef, its trivial.
8713 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
8714 WidenedMask.push_back(SM_SentinelUndef);
8718 // Check for an undef mask and a mask value properly aligned to fit with
8719 // a pair of values. If we find such a case, use the non-undef mask's value.
8720 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
8721 WidenedMask.push_back(Mask[i + 1] / 2);
8724 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
8725 WidenedMask.push_back(Mask[i] / 2);
8729 // When zeroing, we need to spread the zeroing across both lanes to widen.
8730 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
8731 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
8732 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
8733 WidenedMask.push_back(SM_SentinelZero);
8739 // Finally check if the two mask values are adjacent and aligned with
8741 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
8742 WidenedMask.push_back(Mask[i] / 2);
8746 // Otherwise we can't safely widen the elements used in this shuffle.
8749 assert(WidenedMask.size() == Mask.size() / 2 &&
8750 "Incorrect size of mask after widening the elements!");
8755 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
8757 /// This routine just extracts two subvectors, shuffles them independently, and
8758 /// then concatenates them back together. This should work effectively with all
8759 /// AVX vector shuffle types.
8760 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
8761 SDValue V2, ArrayRef<int> Mask,
8762 SelectionDAG &DAG) {
8763 assert(VT.getSizeInBits() >= 256 &&
8764 "Only for 256-bit or wider vector shuffles!");
8765 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
8766 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
8768 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
8769 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
8771 int NumElements = VT.getVectorNumElements();
8772 int SplitNumElements = NumElements / 2;
8773 MVT ScalarVT = VT.getScalarType();
8774 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
8776 // Rather than splitting build-vectors, just build two narrower build
8777 // vectors. This helps shuffling with splats and zeros.
8778 auto SplitVector = [&](SDValue V) {
8779 while (V.getOpcode() == ISD::BITCAST)
8780 V = V->getOperand(0);
8782 MVT OrigVT = V.getSimpleValueType();
8783 int OrigNumElements = OrigVT.getVectorNumElements();
8784 int OrigSplitNumElements = OrigNumElements / 2;
8785 MVT OrigScalarVT = OrigVT.getScalarType();
8786 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
8790 auto *BV = dyn_cast<BuildVectorSDNode>(V);
8792 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
8793 DAG.getIntPtrConstant(0));
8794 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
8795 DAG.getIntPtrConstant(OrigSplitNumElements));
8798 SmallVector<SDValue, 16> LoOps, HiOps;
8799 for (int i = 0; i < OrigSplitNumElements; ++i) {
8800 LoOps.push_back(BV->getOperand(i));
8801 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
8803 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
8804 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
8806 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
8807 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
8810 SDValue LoV1, HiV1, LoV2, HiV2;
8811 std::tie(LoV1, HiV1) = SplitVector(V1);
8812 std::tie(LoV2, HiV2) = SplitVector(V2);
8814 // Now create two 4-way blends of these half-width vectors.
8815 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
8816 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
8817 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
8818 for (int i = 0; i < SplitNumElements; ++i) {
8819 int M = HalfMask[i];
8820 if (M >= NumElements) {
8821 if (M >= NumElements + SplitNumElements)
8825 V2BlendMask.push_back(M - NumElements);
8826 V1BlendMask.push_back(-1);
8827 BlendMask.push_back(SplitNumElements + i);
8828 } else if (M >= 0) {
8829 if (M >= SplitNumElements)
8833 V2BlendMask.push_back(-1);
8834 V1BlendMask.push_back(M);
8835 BlendMask.push_back(i);
8837 V2BlendMask.push_back(-1);
8838 V1BlendMask.push_back(-1);
8839 BlendMask.push_back(-1);
8843 // Because the lowering happens after all combining takes place, we need to
8844 // manually combine these blend masks as much as possible so that we create
8845 // a minimal number of high-level vector shuffle nodes.
8847 // First try just blending the halves of V1 or V2.
8848 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
8849 return DAG.getUNDEF(SplitVT);
8850 if (!UseLoV2 && !UseHiV2)
8851 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
8852 if (!UseLoV1 && !UseHiV1)
8853 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
8855 SDValue V1Blend, V2Blend;
8856 if (UseLoV1 && UseHiV1) {
8858 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
8860 // We only use half of V1 so map the usage down into the final blend mask.
8861 V1Blend = UseLoV1 ? LoV1 : HiV1;
8862 for (int i = 0; i < SplitNumElements; ++i)
8863 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
8864 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
8866 if (UseLoV2 && UseHiV2) {
8868 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
8870 // We only use half of V2 so map the usage down into the final blend mask.
8871 V2Blend = UseLoV2 ? LoV2 : HiV2;
8872 for (int i = 0; i < SplitNumElements; ++i)
8873 if (BlendMask[i] >= SplitNumElements)
8874 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
8876 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
8878 SDValue Lo = HalfBlend(LoMask);
8879 SDValue Hi = HalfBlend(HiMask);
8880 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
8883 /// \brief Either split a vector in halves or decompose the shuffles and the
8886 /// This is provided as a good fallback for many lowerings of non-single-input
8887 /// shuffles with more than one 128-bit lane. In those cases, we want to select
8888 /// between splitting the shuffle into 128-bit components and stitching those
8889 /// back together vs. extracting the single-input shuffles and blending those
8891 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
8892 SDValue V2, ArrayRef<int> Mask,
8893 SelectionDAG &DAG) {
8894 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
8895 "lower single-input shuffles as it "
8896 "could then recurse on itself.");
8897 int Size = Mask.size();
8899 // If this can be modeled as a broadcast of two elements followed by a blend,
8900 // prefer that lowering. This is especially important because broadcasts can
8901 // often fold with memory operands.
8902 auto DoBothBroadcast = [&] {
8903 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
8906 if (V2BroadcastIdx == -1)
8907 V2BroadcastIdx = M - Size;
8908 else if (M - Size != V2BroadcastIdx)
8910 } else if (M >= 0) {
8911 if (V1BroadcastIdx == -1)
8913 else if (M != V1BroadcastIdx)
8918 if (DoBothBroadcast())
8919 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
8922 // If the inputs all stem from a single 128-bit lane of each input, then we
8923 // split them rather than blending because the split will decompose to
8924 // unusually few instructions.
8925 int LaneCount = VT.getSizeInBits() / 128;
8926 int LaneSize = Size / LaneCount;
8927 SmallBitVector LaneInputs[2];
8928 LaneInputs[0].resize(LaneCount, false);
8929 LaneInputs[1].resize(LaneCount, false);
8930 for (int i = 0; i < Size; ++i)
8932 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
8933 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
8934 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
8936 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
8937 // that the decomposed single-input shuffles don't end up here.
8938 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
8941 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
8942 /// a permutation and blend of those lanes.
8944 /// This essentially blends the out-of-lane inputs to each lane into the lane
8945 /// from a permuted copy of the vector. This lowering strategy results in four
8946 /// instructions in the worst case for a single-input cross lane shuffle which
8947 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
8948 /// of. Special cases for each particular shuffle pattern should be handled
8949 /// prior to trying this lowering.
8950 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
8951 SDValue V1, SDValue V2,
8953 SelectionDAG &DAG) {
8954 // FIXME: This should probably be generalized for 512-bit vectors as well.
8955 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
8956 int LaneSize = Mask.size() / 2;
8958 // If there are only inputs from one 128-bit lane, splitting will in fact be
8959 // less expensive. The flags track wether the given lane contains an element
8960 // that crosses to another lane.
8961 bool LaneCrossing[2] = {false, false};
8962 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8963 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
8964 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
8965 if (!LaneCrossing[0] || !LaneCrossing[1])
8966 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
8968 if (isSingleInputShuffleMask(Mask)) {
8969 SmallVector<int, 32> FlippedBlendMask;
8970 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8971 FlippedBlendMask.push_back(
8972 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
8974 : Mask[i] % LaneSize +
8975 (i / LaneSize) * LaneSize + Size));
8977 // Flip the vector, and blend the results which should now be in-lane. The
8978 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
8979 // 5 for the high source. The value 3 selects the high half of source 2 and
8980 // the value 2 selects the low half of source 2. We only use source 2 to
8981 // allow folding it into a memory operand.
8982 unsigned PERMMask = 3 | 2 << 4;
8983 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
8984 V1, DAG.getConstant(PERMMask, MVT::i8));
8985 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
8988 // This now reduces to two single-input shuffles of V1 and V2 which at worst
8989 // will be handled by the above logic and a blend of the results, much like
8990 // other patterns in AVX.
8991 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
8994 /// \brief Handle lowering 2-lane 128-bit shuffles.
8995 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
8996 SDValue V2, ArrayRef<int> Mask,
8997 const X86Subtarget *Subtarget,
8998 SelectionDAG &DAG) {
8999 // Blends are faster and handle all the non-lane-crossing cases.
9000 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
9004 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
9005 VT.getVectorNumElements() / 2);
9006 // Check for patterns which can be matched with a single insert of a 128-bit
9008 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
9009 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
9010 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
9011 DAG.getIntPtrConstant(0));
9012 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
9013 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
9014 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
9016 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
9017 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
9018 DAG.getIntPtrConstant(0));
9019 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
9020 DAG.getIntPtrConstant(2));
9021 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
9024 // Otherwise form a 128-bit permutation.
9025 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
9026 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
9027 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
9028 DAG.getConstant(PermMask, MVT::i8));
9031 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
9032 /// shuffling each lane.
9034 /// This will only succeed when the result of fixing the 128-bit lanes results
9035 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
9036 /// each 128-bit lanes. This handles many cases where we can quickly blend away
9037 /// the lane crosses early and then use simpler shuffles within each lane.
9039 /// FIXME: It might be worthwhile at some point to support this without
9040 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
9041 /// in x86 only floating point has interesting non-repeating shuffles, and even
9042 /// those are still *marginally* more expensive.
9043 static SDValue lowerVectorShuffleByMerging128BitLanes(
9044 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
9045 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
9046 assert(!isSingleInputShuffleMask(Mask) &&
9047 "This is only useful with multiple inputs.");
9049 int Size = Mask.size();
9050 int LaneSize = 128 / VT.getScalarSizeInBits();
9051 int NumLanes = Size / LaneSize;
9052 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
9054 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
9055 // check whether the in-128-bit lane shuffles share a repeating pattern.
9056 SmallVector<int, 4> Lanes;
9057 Lanes.resize(NumLanes, -1);
9058 SmallVector<int, 4> InLaneMask;
9059 InLaneMask.resize(LaneSize, -1);
9060 for (int i = 0; i < Size; ++i) {
9064 int j = i / LaneSize;
9067 // First entry we've seen for this lane.
9068 Lanes[j] = Mask[i] / LaneSize;
9069 } else if (Lanes[j] != Mask[i] / LaneSize) {
9070 // This doesn't match the lane selected previously!
9074 // Check that within each lane we have a consistent shuffle mask.
9075 int k = i % LaneSize;
9076 if (InLaneMask[k] < 0) {
9077 InLaneMask[k] = Mask[i] % LaneSize;
9078 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
9079 // This doesn't fit a repeating in-lane mask.
9084 // First shuffle the lanes into place.
9085 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
9086 VT.getSizeInBits() / 64);
9087 SmallVector<int, 8> LaneMask;
9088 LaneMask.resize(NumLanes * 2, -1);
9089 for (int i = 0; i < NumLanes; ++i)
9090 if (Lanes[i] >= 0) {
9091 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
9092 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
9095 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
9096 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
9097 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
9099 // Cast it back to the type we actually want.
9100 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
9102 // Now do a simple shuffle that isn't lane crossing.
9103 SmallVector<int, 8> NewMask;
9104 NewMask.resize(Size, -1);
9105 for (int i = 0; i < Size; ++i)
9107 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
9108 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
9109 "Must not introduce lane crosses at this point!");
9111 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
9114 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
9117 /// This returns true if the elements from a particular input are already in the
9118 /// slot required by the given mask and require no permutation.
9119 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
9120 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
9121 int Size = Mask.size();
9122 for (int i = 0; i < Size; ++i)
9123 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
9129 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
9131 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
9132 /// isn't available.
9133 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9134 const X86Subtarget *Subtarget,
9135 SelectionDAG &DAG) {
9137 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
9138 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
9139 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9140 ArrayRef<int> Mask = SVOp->getMask();
9141 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
9143 SmallVector<int, 4> WidenedMask;
9144 if (canWidenShuffleElements(Mask, WidenedMask))
9145 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
9148 if (isSingleInputShuffleMask(Mask)) {
9149 // Check for being able to broadcast a single element.
9150 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
9151 Mask, Subtarget, DAG))
9154 // Use low duplicate instructions for masks that match their pattern.
9155 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
9156 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
9158 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
9159 // Non-half-crossing single input shuffles can be lowerid with an
9160 // interleaved permutation.
9161 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
9162 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
9163 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
9164 DAG.getConstant(VPERMILPMask, MVT::i8));
9167 // With AVX2 we have direct support for this permutation.
9168 if (Subtarget->hasAVX2())
9169 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
9170 getV4X86ShuffleImm8ForMask(Mask, DAG));
9172 // Otherwise, fall back.
9173 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
9177 // X86 has dedicated unpack instructions that can handle specific blend
9178 // operations: UNPCKH and UNPCKL.
9179 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
9180 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
9181 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
9182 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
9183 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
9184 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V2, V1);
9185 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
9186 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
9188 // If we have a single input to the zero element, insert that into V1 if we
9189 // can do so cheaply.
9191 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
9192 if (NumV2Elements == 1 && Mask[0] >= 4)
9193 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
9194 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
9197 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
9201 // Check if the blend happens to exactly fit that of SHUFPD.
9202 if ((Mask[0] == -1 || Mask[0] < 2) &&
9203 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
9204 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
9205 (Mask[3] == -1 || Mask[3] >= 6)) {
9206 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
9207 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
9208 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
9209 DAG.getConstant(SHUFPDMask, MVT::i8));
9211 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
9212 (Mask[1] == -1 || Mask[1] < 2) &&
9213 (Mask[2] == -1 || Mask[2] >= 6) &&
9214 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
9215 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
9216 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
9217 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
9218 DAG.getConstant(SHUFPDMask, MVT::i8));
9221 // Try to simplify this by merging 128-bit lanes to enable a lane-based
9222 // shuffle. However, if we have AVX2 and either inputs are already in place,
9223 // we will be able to shuffle even across lanes the other input in a single
9224 // instruction so skip this pattern.
9225 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
9226 isShuffleMaskInputInPlace(1, Mask))))
9227 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
9228 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
9231 // If we have AVX2 then we always want to lower with a blend because an v4 we
9232 // can fully permute the elements.
9233 if (Subtarget->hasAVX2())
9234 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
9237 // Otherwise fall back on generic lowering.
9238 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
9241 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
9243 /// This routine is only called when we have AVX2 and thus a reasonable
9244 /// instruction set for v4i64 shuffling..
9245 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9246 const X86Subtarget *Subtarget,
9247 SelectionDAG &DAG) {
9249 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
9250 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
9251 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9252 ArrayRef<int> Mask = SVOp->getMask();
9253 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
9254 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
9256 SmallVector<int, 4> WidenedMask;
9257 if (canWidenShuffleElements(Mask, WidenedMask))
9258 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
9261 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
9265 // Check for being able to broadcast a single element.
9266 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
9267 Mask, Subtarget, DAG))
9270 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
9271 // use lower latency instructions that will operate on both 128-bit lanes.
9272 SmallVector<int, 2> RepeatedMask;
9273 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
9274 if (isSingleInputShuffleMask(Mask)) {
9275 int PSHUFDMask[] = {-1, -1, -1, -1};
9276 for (int i = 0; i < 2; ++i)
9277 if (RepeatedMask[i] >= 0) {
9278 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
9279 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
9282 ISD::BITCAST, DL, MVT::v4i64,
9283 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
9284 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
9285 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9289 // AVX2 provides a direct instruction for permuting a single input across
9291 if (isSingleInputShuffleMask(Mask))
9292 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
9293 getV4X86ShuffleImm8ForMask(Mask, DAG));
9295 // Try to use shift instructions.
9297 lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
9300 // Use dedicated unpack instructions for masks that match their pattern.
9301 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
9302 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
9303 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
9304 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
9305 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
9306 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V2, V1);
9307 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
9308 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V2, V1);
9310 // Try to simplify this by merging 128-bit lanes to enable a lane-based
9311 // shuffle. However, if we have AVX2 and either inputs are already in place,
9312 // we will be able to shuffle even across lanes the other input in a single
9313 // instruction so skip this pattern.
9314 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
9315 isShuffleMaskInputInPlace(1, Mask))))
9316 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
9317 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
9320 // Otherwise fall back on generic blend lowering.
9321 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
9325 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
9327 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
9328 /// isn't available.
9329 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9330 const X86Subtarget *Subtarget,
9331 SelectionDAG &DAG) {
9333 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
9334 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
9335 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9336 ArrayRef<int> Mask = SVOp->getMask();
9337 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9339 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
9343 // Check for being able to broadcast a single element.
9344 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
9345 Mask, Subtarget, DAG))
9348 // If the shuffle mask is repeated in each 128-bit lane, we have many more
9349 // options to efficiently lower the shuffle.
9350 SmallVector<int, 4> RepeatedMask;
9351 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
9352 assert(RepeatedMask.size() == 4 &&
9353 "Repeated masks must be half the mask width!");
9355 // Use even/odd duplicate instructions for masks that match their pattern.
9356 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
9357 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
9358 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
9359 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
9361 if (isSingleInputShuffleMask(Mask))
9362 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
9363 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
9365 // Use dedicated unpack instructions for masks that match their pattern.
9366 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
9367 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
9368 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
9369 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
9370 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
9371 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V2, V1);
9372 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
9373 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V2, V1);
9375 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
9376 // have already handled any direct blends. We also need to squash the
9377 // repeated mask into a simulated v4f32 mask.
9378 for (int i = 0; i < 4; ++i)
9379 if (RepeatedMask[i] >= 8)
9380 RepeatedMask[i] -= 4;
9381 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
9384 // If we have a single input shuffle with different shuffle patterns in the
9385 // two 128-bit lanes use the variable mask to VPERMILPS.
9386 if (isSingleInputShuffleMask(Mask)) {
9387 SDValue VPermMask[8];
9388 for (int i = 0; i < 8; ++i)
9389 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
9390 : DAG.getConstant(Mask[i], MVT::i32);
9391 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
9393 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
9394 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
9396 if (Subtarget->hasAVX2())
9397 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
9398 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
9399 DAG.getNode(ISD::BUILD_VECTOR, DL,
9400 MVT::v8i32, VPermMask)),
9403 // Otherwise, fall back.
9404 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
9408 // Try to simplify this by merging 128-bit lanes to enable a lane-based
9410 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
9411 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
9414 // If we have AVX2 then we always want to lower with a blend because at v8 we
9415 // can fully permute the elements.
9416 if (Subtarget->hasAVX2())
9417 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
9420 // Otherwise fall back on generic lowering.
9421 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
9424 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
9426 /// This routine is only called when we have AVX2 and thus a reasonable
9427 /// instruction set for v8i32 shuffling..
9428 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9429 const X86Subtarget *Subtarget,
9430 SelectionDAG &DAG) {
9432 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
9433 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
9434 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9435 ArrayRef<int> Mask = SVOp->getMask();
9436 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9437 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
9439 // Whenever we can lower this as a zext, that instruction is strictly faster
9440 // than any alternative. It also allows us to fold memory operands into the
9441 // shuffle in many cases.
9442 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
9443 Mask, Subtarget, DAG))
9446 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
9450 // Check for being able to broadcast a single element.
9451 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
9452 Mask, Subtarget, DAG))
9455 // If the shuffle mask is repeated in each 128-bit lane we can use more
9456 // efficient instructions that mirror the shuffles across the two 128-bit
9458 SmallVector<int, 4> RepeatedMask;
9459 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
9460 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
9461 if (isSingleInputShuffleMask(Mask))
9462 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
9463 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
9465 // Use dedicated unpack instructions for masks that match their pattern.
9466 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
9467 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
9468 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
9469 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
9470 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
9471 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V2, V1);
9472 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
9473 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V2, V1);
9476 // Try to use shift instructions.
9478 lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
9481 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9482 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
9485 // If the shuffle patterns aren't repeated but it is a single input, directly
9486 // generate a cross-lane VPERMD instruction.
9487 if (isSingleInputShuffleMask(Mask)) {
9488 SDValue VPermMask[8];
9489 for (int i = 0; i < 8; ++i)
9490 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
9491 : DAG.getConstant(Mask[i], MVT::i32);
9493 X86ISD::VPERMV, DL, MVT::v8i32,
9494 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
9497 // Try to simplify this by merging 128-bit lanes to enable a lane-based
9499 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
9500 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
9503 // Otherwise fall back on generic blend lowering.
9504 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
9508 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
9510 /// This routine is only called when we have AVX2 and thus a reasonable
9511 /// instruction set for v16i16 shuffling..
9512 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9513 const X86Subtarget *Subtarget,
9514 SelectionDAG &DAG) {
9516 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
9517 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
9518 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9519 ArrayRef<int> Mask = SVOp->getMask();
9520 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9521 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
9523 // Whenever we can lower this as a zext, that instruction is strictly faster
9524 // than any alternative. It also allows us to fold memory operands into the
9525 // shuffle in many cases.
9526 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
9527 Mask, Subtarget, DAG))
9530 // Check for being able to broadcast a single element.
9531 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
9532 Mask, Subtarget, DAG))
9535 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
9539 // Use dedicated unpack instructions for masks that match their pattern.
9540 if (isShuffleEquivalent(V1, V2, Mask,
9541 // First 128-bit lane:
9542 0, 16, 1, 17, 2, 18, 3, 19,
9543 // Second 128-bit lane:
9544 8, 24, 9, 25, 10, 26, 11, 27))
9545 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
9546 if (isShuffleEquivalent(V1, V2, Mask,
9547 // First 128-bit lane:
9548 4, 20, 5, 21, 6, 22, 7, 23,
9549 // Second 128-bit lane:
9550 12, 28, 13, 29, 14, 30, 15, 31))
9551 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
9553 // Try to use shift instructions.
9555 lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
9558 // Try to use byte rotation instructions.
9559 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9560 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
9563 if (isSingleInputShuffleMask(Mask)) {
9564 // There are no generalized cross-lane shuffle operations available on i16
9566 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
9567 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
9570 SDValue PSHUFBMask[32];
9571 for (int i = 0; i < 16; ++i) {
9572 if (Mask[i] == -1) {
9573 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
9577 int M = i < 8 ? Mask[i] : Mask[i] - 8;
9578 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
9579 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
9580 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
9583 ISD::BITCAST, DL, MVT::v16i16,
9585 X86ISD::PSHUFB, DL, MVT::v32i8,
9586 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
9587 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
9590 // Try to simplify this by merging 128-bit lanes to enable a lane-based
9592 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
9593 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
9596 // Otherwise fall back on generic lowering.
9597 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
9600 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
9602 /// This routine is only called when we have AVX2 and thus a reasonable
9603 /// instruction set for v32i8 shuffling..
9604 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9605 const X86Subtarget *Subtarget,
9606 SelectionDAG &DAG) {
9608 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
9609 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
9610 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9611 ArrayRef<int> Mask = SVOp->getMask();
9612 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
9613 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
9615 // Whenever we can lower this as a zext, that instruction is strictly faster
9616 // than any alternative. It also allows us to fold memory operands into the
9617 // shuffle in many cases.
9618 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
9619 Mask, Subtarget, DAG))
9622 // Check for being able to broadcast a single element.
9623 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
9624 Mask, Subtarget, DAG))
9627 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
9631 // Use dedicated unpack instructions for masks that match their pattern.
9632 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
9634 if (isShuffleEquivalent(
9636 // First 128-bit lane:
9637 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
9638 // Second 128-bit lane:
9639 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
9640 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
9641 if (isShuffleEquivalent(
9643 // First 128-bit lane:
9644 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
9645 // Second 128-bit lane:
9646 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
9647 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
9649 // Try to use shift instructions.
9651 lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
9654 // Try to use byte rotation instructions.
9655 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9656 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
9659 if (isSingleInputShuffleMask(Mask)) {
9660 // There are no generalized cross-lane shuffle operations available on i8
9662 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
9663 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
9666 SDValue PSHUFBMask[32];
9667 for (int i = 0; i < 32; ++i)
9670 ? DAG.getUNDEF(MVT::i8)
9671 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
9674 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
9675 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
9678 // Try to simplify this by merging 128-bit lanes to enable a lane-based
9680 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
9681 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
9684 // Otherwise fall back on generic lowering.
9685 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
9688 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
9690 /// This routine either breaks down the specific type of a 256-bit x86 vector
9691 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
9692 /// together based on the available instructions.
9693 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9694 MVT VT, const X86Subtarget *Subtarget,
9695 SelectionDAG &DAG) {
9697 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9698 ArrayRef<int> Mask = SVOp->getMask();
9700 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
9701 // check for those subtargets here and avoid much of the subtarget querying in
9702 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
9703 // ability to manipulate a 256-bit vector with integer types. Since we'll use
9704 // floating point types there eventually, just immediately cast everything to
9705 // a float and operate entirely in that domain.
9706 if (VT.isInteger() && !Subtarget->hasAVX2()) {
9707 int ElementBits = VT.getScalarSizeInBits();
9708 if (ElementBits < 32)
9709 // No floating point type available, decompose into 128-bit vectors.
9710 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
9712 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
9713 VT.getVectorNumElements());
9714 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
9715 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
9716 return DAG.getNode(ISD::BITCAST, DL, VT,
9717 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
9720 switch (VT.SimpleTy) {
9722 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9724 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9726 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9728 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9730 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9732 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9735 llvm_unreachable("Not a valid 256-bit x86 vector type!");
9739 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
9740 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9741 const X86Subtarget *Subtarget,
9742 SelectionDAG &DAG) {
9744 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
9745 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
9746 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9747 ArrayRef<int> Mask = SVOp->getMask();
9748 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9750 // X86 has dedicated unpack instructions that can handle specific blend
9751 // operations: UNPCKH and UNPCKL.
9752 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
9753 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
9754 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
9755 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
9757 // FIXME: Implement direct support for this type!
9758 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
9761 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
9762 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9763 const X86Subtarget *Subtarget,
9764 SelectionDAG &DAG) {
9766 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
9767 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
9768 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9769 ArrayRef<int> Mask = SVOp->getMask();
9770 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9772 // Use dedicated unpack instructions for masks that match their pattern.
9773 if (isShuffleEquivalent(V1, V2, Mask,
9774 0, 16, 1, 17, 4, 20, 5, 21,
9775 8, 24, 9, 25, 12, 28, 13, 29))
9776 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
9777 if (isShuffleEquivalent(V1, V2, Mask,
9778 2, 18, 3, 19, 6, 22, 7, 23,
9779 10, 26, 11, 27, 14, 30, 15, 31))
9780 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
9782 // FIXME: Implement direct support for this type!
9783 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
9786 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
9787 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9788 const X86Subtarget *Subtarget,
9789 SelectionDAG &DAG) {
9791 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
9792 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
9793 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9794 ArrayRef<int> Mask = SVOp->getMask();
9795 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9797 // X86 has dedicated unpack instructions that can handle specific blend
9798 // operations: UNPCKH and UNPCKL.
9799 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
9800 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
9801 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
9802 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
9804 // FIXME: Implement direct support for this type!
9805 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
9808 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
9809 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9810 const X86Subtarget *Subtarget,
9811 SelectionDAG &DAG) {
9813 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
9814 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
9815 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9816 ArrayRef<int> Mask = SVOp->getMask();
9817 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9819 // Use dedicated unpack instructions for masks that match their pattern.
9820 if (isShuffleEquivalent(V1, V2, Mask,
9821 0, 16, 1, 17, 4, 20, 5, 21,
9822 8, 24, 9, 25, 12, 28, 13, 29))
9823 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
9824 if (isShuffleEquivalent(V1, V2, Mask,
9825 2, 18, 3, 19, 6, 22, 7, 23,
9826 10, 26, 11, 27, 14, 30, 15, 31))
9827 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
9829 // FIXME: Implement direct support for this type!
9830 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
9833 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
9834 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9835 const X86Subtarget *Subtarget,
9836 SelectionDAG &DAG) {
9838 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
9839 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
9840 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9841 ArrayRef<int> Mask = SVOp->getMask();
9842 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
9843 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
9845 // FIXME: Implement direct support for this type!
9846 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
9849 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
9850 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9851 const X86Subtarget *Subtarget,
9852 SelectionDAG &DAG) {
9854 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
9855 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
9856 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9857 ArrayRef<int> Mask = SVOp->getMask();
9858 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
9859 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
9861 // FIXME: Implement direct support for this type!
9862 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
9865 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
9867 /// This routine either breaks down the specific type of a 512-bit x86 vector
9868 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
9869 /// together based on the available instructions.
9870 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9871 MVT VT, const X86Subtarget *Subtarget,
9872 SelectionDAG &DAG) {
9874 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9875 ArrayRef<int> Mask = SVOp->getMask();
9876 assert(Subtarget->hasAVX512() &&
9877 "Cannot lower 512-bit vectors w/ basic ISA!");
9879 // Check for being able to broadcast a single element.
9880 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
9881 Mask, Subtarget, DAG))
9884 // Dispatch to each element type for lowering. If we don't have supprot for
9885 // specific element type shuffles at 512 bits, immediately split them and
9886 // lower them. Each lowering routine of a given type is allowed to assume that
9887 // the requisite ISA extensions for that element type are available.
9888 switch (VT.SimpleTy) {
9890 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9892 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9894 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9896 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9898 if (Subtarget->hasBWI())
9899 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9902 if (Subtarget->hasBWI())
9903 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9907 llvm_unreachable("Not a valid 512-bit x86 vector type!");
9910 // Otherwise fall back on splitting.
9911 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
9914 /// \brief Top-level lowering for x86 vector shuffles.
9916 /// This handles decomposition, canonicalization, and lowering of all x86
9917 /// vector shuffles. Most of the specific lowering strategies are encapsulated
9918 /// above in helper routines. The canonicalization attempts to widen shuffles
9919 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
9920 /// s.t. only one of the two inputs needs to be tested, etc.
9921 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
9922 SelectionDAG &DAG) {
9923 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9924 ArrayRef<int> Mask = SVOp->getMask();
9925 SDValue V1 = Op.getOperand(0);
9926 SDValue V2 = Op.getOperand(1);
9927 MVT VT = Op.getSimpleValueType();
9928 int NumElements = VT.getVectorNumElements();
9931 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
9933 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
9934 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
9935 if (V1IsUndef && V2IsUndef)
9936 return DAG.getUNDEF(VT);
9938 // When we create a shuffle node we put the UNDEF node to second operand,
9939 // but in some cases the first operand may be transformed to UNDEF.
9940 // In this case we should just commute the node.
9942 return DAG.getCommutedVectorShuffle(*SVOp);
9944 // Check for non-undef masks pointing at an undef vector and make the masks
9945 // undef as well. This makes it easier to match the shuffle based solely on
9949 if (M >= NumElements) {
9950 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
9951 for (int &M : NewMask)
9952 if (M >= NumElements)
9954 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
9957 // We actually see shuffles that are entirely re-arrangements of a set of
9958 // zero inputs. This mostly happens while decomposing complex shuffles into
9959 // simple ones. Directly lower these as a buildvector of zeros.
9960 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9962 return getZeroVector(VT, Subtarget, DAG, dl);
9964 // Try to collapse shuffles into using a vector type with fewer elements but
9965 // wider element types. We cap this to not form integers or floating point
9966 // elements wider than 64 bits, but it might be interesting to form i128
9967 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
9968 SmallVector<int, 16> WidenedMask;
9969 if (VT.getScalarSizeInBits() < 64 &&
9970 canWidenShuffleElements(Mask, WidenedMask)) {
9971 MVT NewEltVT = VT.isFloatingPoint()
9972 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
9973 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
9974 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
9975 // Make sure that the new vector type is legal. For example, v2f64 isn't
9977 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
9978 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
9979 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
9980 return DAG.getNode(ISD::BITCAST, dl, VT,
9981 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
9985 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
9986 for (int M : SVOp->getMask())
9989 else if (M < NumElements)
9994 // Commute the shuffle as needed such that more elements come from V1 than
9995 // V2. This allows us to match the shuffle pattern strictly on how many
9996 // elements come from V1 without handling the symmetric cases.
9997 if (NumV2Elements > NumV1Elements)
9998 return DAG.getCommutedVectorShuffle(*SVOp);
10000 // When the number of V1 and V2 elements are the same, try to minimize the
10001 // number of uses of V2 in the low half of the vector. When that is tied,
10002 // ensure that the sum of indices for V1 is equal to or lower than the sum
10003 // indices for V2. When those are equal, try to ensure that the number of odd
10004 // indices for V1 is lower than the number of odd indices for V2.
10005 if (NumV1Elements == NumV2Elements) {
10006 int LowV1Elements = 0, LowV2Elements = 0;
10007 for (int M : SVOp->getMask().slice(0, NumElements / 2))
10008 if (M >= NumElements)
10012 if (LowV2Elements > LowV1Elements) {
10013 return DAG.getCommutedVectorShuffle(*SVOp);
10014 } else if (LowV2Elements == LowV1Elements) {
10015 int SumV1Indices = 0, SumV2Indices = 0;
10016 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
10017 if (SVOp->getMask()[i] >= NumElements)
10019 else if (SVOp->getMask()[i] >= 0)
10021 if (SumV2Indices < SumV1Indices) {
10022 return DAG.getCommutedVectorShuffle(*SVOp);
10023 } else if (SumV2Indices == SumV1Indices) {
10024 int NumV1OddIndices = 0, NumV2OddIndices = 0;
10025 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
10026 if (SVOp->getMask()[i] >= NumElements)
10027 NumV2OddIndices += i % 2;
10028 else if (SVOp->getMask()[i] >= 0)
10029 NumV1OddIndices += i % 2;
10030 if (NumV2OddIndices < NumV1OddIndices)
10031 return DAG.getCommutedVectorShuffle(*SVOp);
10036 // For each vector width, delegate to a specialized lowering routine.
10037 if (VT.getSizeInBits() == 128)
10038 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
10040 if (VT.getSizeInBits() == 256)
10041 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
10043 // Force AVX-512 vectors to be scalarized for now.
10044 // FIXME: Implement AVX-512 support!
10045 if (VT.getSizeInBits() == 512)
10046 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
10048 llvm_unreachable("Unimplemented!");
10051 // This function assumes its argument is a BUILD_VECTOR of constants or
10052 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
10054 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
10055 unsigned &MaskValue) {
10057 unsigned NumElems = BuildVector->getNumOperands();
10058 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
10059 unsigned NumLanes = (NumElems - 1) / 8 + 1;
10060 unsigned NumElemsInLane = NumElems / NumLanes;
10062 // Blend for v16i16 should be symetric for the both lanes.
10063 for (unsigned i = 0; i < NumElemsInLane; ++i) {
10064 SDValue EltCond = BuildVector->getOperand(i);
10065 SDValue SndLaneEltCond =
10066 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
10068 int Lane1Cond = -1, Lane2Cond = -1;
10069 if (isa<ConstantSDNode>(EltCond))
10070 Lane1Cond = !isZero(EltCond);
10071 if (isa<ConstantSDNode>(SndLaneEltCond))
10072 Lane2Cond = !isZero(SndLaneEltCond);
10074 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
10075 // Lane1Cond != 0, means we want the first argument.
10076 // Lane1Cond == 0, means we want the second argument.
10077 // The encoding of this argument is 0 for the first argument, 1
10078 // for the second. Therefore, invert the condition.
10079 MaskValue |= !Lane1Cond << i;
10080 else if (Lane1Cond < 0)
10081 MaskValue |= !Lane2Cond << i;
10088 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
10089 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
10090 const X86Subtarget *Subtarget,
10091 SelectionDAG &DAG) {
10092 SDValue Cond = Op.getOperand(0);
10093 SDValue LHS = Op.getOperand(1);
10094 SDValue RHS = Op.getOperand(2);
10096 MVT VT = Op.getSimpleValueType();
10098 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10100 auto *CondBV = cast<BuildVectorSDNode>(Cond);
10102 // Only non-legal VSELECTs reach this lowering, convert those into generic
10103 // shuffles and re-use the shuffle lowering path for blends.
10104 SmallVector<int, 32> Mask;
10105 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
10106 SDValue CondElt = CondBV->getOperand(i);
10108 isa<ConstantSDNode>(CondElt) ? i + (isZero(CondElt) ? Size : 0) : -1);
10110 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
10113 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
10114 // A vselect where all conditions and data are constants can be optimized into
10115 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
10116 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
10117 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
10118 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
10121 // Try to lower this to a blend-style vector shuffle. This can handle all
10122 // constant condition cases.
10123 SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG);
10124 if (BlendOp.getNode())
10127 // Variable blends are only legal from SSE4.1 onward.
10128 if (!Subtarget->hasSSE41())
10131 // Some types for vselect were previously set to Expand, not Legal or
10132 // Custom. Return an empty SDValue so we fall-through to Expand, after
10133 // the Custom lowering phase.
10134 MVT VT = Op.getSimpleValueType();
10135 switch (VT.SimpleTy) {
10140 if (Subtarget->hasBWI() && Subtarget->hasVLX())
10145 // We couldn't create a "Blend with immediate" node.
10146 // This node should still be legal, but we'll have to emit a blendv*
10151 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
10152 MVT VT = Op.getSimpleValueType();
10155 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
10158 if (VT.getSizeInBits() == 8) {
10159 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
10160 Op.getOperand(0), Op.getOperand(1));
10161 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
10162 DAG.getValueType(VT));
10163 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
10166 if (VT.getSizeInBits() == 16) {
10167 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
10168 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
10170 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
10171 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
10172 DAG.getNode(ISD::BITCAST, dl,
10175 Op.getOperand(1)));
10176 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
10177 Op.getOperand(0), Op.getOperand(1));
10178 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
10179 DAG.getValueType(VT));
10180 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
10183 if (VT == MVT::f32) {
10184 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
10185 // the result back to FR32 register. It's only worth matching if the
10186 // result has a single use which is a store or a bitcast to i32. And in
10187 // the case of a store, it's not worth it if the index is a constant 0,
10188 // because a MOVSSmr can be used instead, which is smaller and faster.
10189 if (!Op.hasOneUse())
10191 SDNode *User = *Op.getNode()->use_begin();
10192 if ((User->getOpcode() != ISD::STORE ||
10193 (isa<ConstantSDNode>(Op.getOperand(1)) &&
10194 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
10195 (User->getOpcode() != ISD::BITCAST ||
10196 User->getValueType(0) != MVT::i32))
10198 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
10199 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
10202 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
10205 if (VT == MVT::i32 || VT == MVT::i64) {
10206 // ExtractPS/pextrq works with constant index.
10207 if (isa<ConstantSDNode>(Op.getOperand(1)))
10213 /// Extract one bit from mask vector, like v16i1 or v8i1.
10214 /// AVX-512 feature.
10216 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
10217 SDValue Vec = Op.getOperand(0);
10219 MVT VecVT = Vec.getSimpleValueType();
10220 SDValue Idx = Op.getOperand(1);
10221 MVT EltVT = Op.getSimpleValueType();
10223 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
10224 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
10225 "Unexpected vector type in ExtractBitFromMaskVector");
10227 // variable index can't be handled in mask registers,
10228 // extend vector to VR512
10229 if (!isa<ConstantSDNode>(Idx)) {
10230 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
10231 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
10232 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
10233 ExtVT.getVectorElementType(), Ext, Idx);
10234 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
10237 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
10238 const TargetRegisterClass* rc = getRegClassFor(VecVT);
10239 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
10240 rc = getRegClassFor(MVT::v16i1);
10241 unsigned MaxSift = rc->getSize()*8 - 1;
10242 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
10243 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
10244 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
10245 DAG.getConstant(MaxSift, MVT::i8));
10246 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
10247 DAG.getIntPtrConstant(0));
10251 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
10252 SelectionDAG &DAG) const {
10254 SDValue Vec = Op.getOperand(0);
10255 MVT VecVT = Vec.getSimpleValueType();
10256 SDValue Idx = Op.getOperand(1);
10258 if (Op.getSimpleValueType() == MVT::i1)
10259 return ExtractBitFromMaskVector(Op, DAG);
10261 if (!isa<ConstantSDNode>(Idx)) {
10262 if (VecVT.is512BitVector() ||
10263 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
10264 VecVT.getVectorElementType().getSizeInBits() == 32)) {
10267 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
10268 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
10269 MaskEltVT.getSizeInBits());
10271 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
10272 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
10273 getZeroVector(MaskVT, Subtarget, DAG, dl),
10274 Idx, DAG.getConstant(0, getPointerTy()));
10275 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
10276 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
10277 Perm, DAG.getConstant(0, getPointerTy()));
10282 // If this is a 256-bit vector result, first extract the 128-bit vector and
10283 // then extract the element from the 128-bit vector.
10284 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
10286 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
10287 // Get the 128-bit vector.
10288 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
10289 MVT EltVT = VecVT.getVectorElementType();
10291 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
10293 //if (IdxVal >= NumElems/2)
10294 // IdxVal -= NumElems/2;
10295 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
10296 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
10297 DAG.getConstant(IdxVal, MVT::i32));
10300 assert(VecVT.is128BitVector() && "Unexpected vector length");
10302 if (Subtarget->hasSSE41()) {
10303 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
10308 MVT VT = Op.getSimpleValueType();
10309 // TODO: handle v16i8.
10310 if (VT.getSizeInBits() == 16) {
10311 SDValue Vec = Op.getOperand(0);
10312 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
10314 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
10315 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
10316 DAG.getNode(ISD::BITCAST, dl,
10318 Op.getOperand(1)));
10319 // Transform it so it match pextrw which produces a 32-bit result.
10320 MVT EltVT = MVT::i32;
10321 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
10322 Op.getOperand(0), Op.getOperand(1));
10323 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
10324 DAG.getValueType(VT));
10325 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
10328 if (VT.getSizeInBits() == 32) {
10329 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
10333 // SHUFPS the element to the lowest double word, then movss.
10334 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
10335 MVT VVT = Op.getOperand(0).getSimpleValueType();
10336 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
10337 DAG.getUNDEF(VVT), Mask);
10338 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
10339 DAG.getIntPtrConstant(0));
10342 if (VT.getSizeInBits() == 64) {
10343 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
10344 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
10345 // to match extract_elt for f64.
10346 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
10350 // UNPCKHPD the element to the lowest double word, then movsd.
10351 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
10352 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
10353 int Mask[2] = { 1, -1 };
10354 MVT VVT = Op.getOperand(0).getSimpleValueType();
10355 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
10356 DAG.getUNDEF(VVT), Mask);
10357 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
10358 DAG.getIntPtrConstant(0));
10364 /// Insert one bit to mask vector, like v16i1 or v8i1.
10365 /// AVX-512 feature.
10367 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
10369 SDValue Vec = Op.getOperand(0);
10370 SDValue Elt = Op.getOperand(1);
10371 SDValue Idx = Op.getOperand(2);
10372 MVT VecVT = Vec.getSimpleValueType();
10374 if (!isa<ConstantSDNode>(Idx)) {
10375 // Non constant index. Extend source and destination,
10376 // insert element and then truncate the result.
10377 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
10378 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
10379 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
10380 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
10381 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
10382 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
10385 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
10386 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
10387 if (Vec.getOpcode() == ISD::UNDEF)
10388 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
10389 DAG.getConstant(IdxVal, MVT::i8));
10390 const TargetRegisterClass* rc = getRegClassFor(VecVT);
10391 unsigned MaxSift = rc->getSize()*8 - 1;
10392 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
10393 DAG.getConstant(MaxSift, MVT::i8));
10394 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
10395 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
10396 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
10399 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10400 SelectionDAG &DAG) const {
10401 MVT VT = Op.getSimpleValueType();
10402 MVT EltVT = VT.getVectorElementType();
10404 if (EltVT == MVT::i1)
10405 return InsertBitToMaskVector(Op, DAG);
10408 SDValue N0 = Op.getOperand(0);
10409 SDValue N1 = Op.getOperand(1);
10410 SDValue N2 = Op.getOperand(2);
10411 if (!isa<ConstantSDNode>(N2))
10413 auto *N2C = cast<ConstantSDNode>(N2);
10414 unsigned IdxVal = N2C->getZExtValue();
10416 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
10417 // into that, and then insert the subvector back into the result.
10418 if (VT.is256BitVector() || VT.is512BitVector()) {
10419 // Get the desired 128-bit vector half.
10420 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
10422 // Insert the element into the desired half.
10423 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
10424 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
10426 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
10427 DAG.getConstant(IdxIn128, MVT::i32));
10429 // Insert the changed part back to the 256-bit vector
10430 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
10432 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
10434 if (Subtarget->hasSSE41()) {
10435 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
10437 if (VT == MVT::v8i16) {
10438 Opc = X86ISD::PINSRW;
10440 assert(VT == MVT::v16i8);
10441 Opc = X86ISD::PINSRB;
10444 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
10446 if (N1.getValueType() != MVT::i32)
10447 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
10448 if (N2.getValueType() != MVT::i32)
10449 N2 = DAG.getIntPtrConstant(IdxVal);
10450 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
10453 if (EltVT == MVT::f32) {
10454 // Bits [7:6] of the constant are the source select. This will always be
10455 // zero here. The DAG Combiner may combine an extract_elt index into
10457 // bits. For example (insert (extract, 3), 2) could be matched by
10459 // the '3' into bits [7:6] of X86ISD::INSERTPS.
10460 // Bits [5:4] of the constant are the destination select. This is the
10461 // value of the incoming immediate.
10462 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
10463 // combine either bitwise AND or insert of float 0.0 to set these bits.
10464 N2 = DAG.getIntPtrConstant(IdxVal << 4);
10465 // Create this as a scalar to vector..
10466 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
10467 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
10470 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
10471 // PINSR* works with constant index.
10476 if (EltVT == MVT::i8)
10479 if (EltVT.getSizeInBits() == 16) {
10480 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
10481 // as its second argument.
10482 if (N1.getValueType() != MVT::i32)
10483 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
10484 if (N2.getValueType() != MVT::i32)
10485 N2 = DAG.getIntPtrConstant(IdxVal);
10486 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
10491 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
10493 MVT OpVT = Op.getSimpleValueType();
10495 // If this is a 256-bit vector result, first insert into a 128-bit
10496 // vector and then insert into the 256-bit vector.
10497 if (!OpVT.is128BitVector()) {
10498 // Insert into a 128-bit vector.
10499 unsigned SizeFactor = OpVT.getSizeInBits()/128;
10500 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
10501 OpVT.getVectorNumElements() / SizeFactor);
10503 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
10505 // Insert the 128-bit vector.
10506 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
10509 if (OpVT == MVT::v1i64 &&
10510 Op.getOperand(0).getValueType() == MVT::i64)
10511 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
10513 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
10514 assert(OpVT.is128BitVector() && "Expected an SSE type!");
10515 return DAG.getNode(ISD::BITCAST, dl, OpVT,
10516 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
10519 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
10520 // a simple subregister reference or explicit instructions to grab
10521 // upper bits of a vector.
10522 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
10523 SelectionDAG &DAG) {
10525 SDValue In = Op.getOperand(0);
10526 SDValue Idx = Op.getOperand(1);
10527 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
10528 MVT ResVT = Op.getSimpleValueType();
10529 MVT InVT = In.getSimpleValueType();
10531 if (Subtarget->hasFp256()) {
10532 if (ResVT.is128BitVector() &&
10533 (InVT.is256BitVector() || InVT.is512BitVector()) &&
10534 isa<ConstantSDNode>(Idx)) {
10535 return Extract128BitVector(In, IdxVal, DAG, dl);
10537 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
10538 isa<ConstantSDNode>(Idx)) {
10539 return Extract256BitVector(In, IdxVal, DAG, dl);
10545 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
10546 // simple superregister reference or explicit instructions to insert
10547 // the upper bits of a vector.
10548 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
10549 SelectionDAG &DAG) {
10550 if (!Subtarget->hasAVX())
10554 SDValue Vec = Op.getOperand(0);
10555 SDValue SubVec = Op.getOperand(1);
10556 SDValue Idx = Op.getOperand(2);
10558 if (!isa<ConstantSDNode>(Idx))
10561 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
10562 MVT OpVT = Op.getSimpleValueType();
10563 MVT SubVecVT = SubVec.getSimpleValueType();
10565 // Fold two 16-byte subvector loads into one 32-byte load:
10566 // (insert_subvector (insert_subvector undef, (load addr), 0),
10567 // (load addr + 16), Elts/2)
10569 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
10570 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
10571 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
10572 !Subtarget->isUnalignedMem32Slow()) {
10573 SDValue SubVec2 = Vec.getOperand(1);
10574 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
10575 if (Idx2->getZExtValue() == 0) {
10576 SDValue Ops[] = { SubVec2, SubVec };
10577 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
10584 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
10585 SubVecVT.is128BitVector())
10586 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
10588 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
10589 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
10594 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
10595 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
10596 // one of the above mentioned nodes. It has to be wrapped because otherwise
10597 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
10598 // be used to form addressing mode. These wrapped nodes will be selected
10601 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
10602 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
10604 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
10605 // global base reg.
10606 unsigned char OpFlag = 0;
10607 unsigned WrapperKind = X86ISD::Wrapper;
10608 CodeModel::Model M = DAG.getTarget().getCodeModel();
10610 if (Subtarget->isPICStyleRIPRel() &&
10611 (M == CodeModel::Small || M == CodeModel::Kernel))
10612 WrapperKind = X86ISD::WrapperRIP;
10613 else if (Subtarget->isPICStyleGOT())
10614 OpFlag = X86II::MO_GOTOFF;
10615 else if (Subtarget->isPICStyleStubPIC())
10616 OpFlag = X86II::MO_PIC_BASE_OFFSET;
10618 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
10619 CP->getAlignment(),
10620 CP->getOffset(), OpFlag);
10622 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
10623 // With PIC, the address is actually $g + Offset.
10625 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
10626 DAG.getNode(X86ISD::GlobalBaseReg,
10627 SDLoc(), getPointerTy()),
10634 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
10635 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
10637 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
10638 // global base reg.
10639 unsigned char OpFlag = 0;
10640 unsigned WrapperKind = X86ISD::Wrapper;
10641 CodeModel::Model M = DAG.getTarget().getCodeModel();
10643 if (Subtarget->isPICStyleRIPRel() &&
10644 (M == CodeModel::Small || M == CodeModel::Kernel))
10645 WrapperKind = X86ISD::WrapperRIP;
10646 else if (Subtarget->isPICStyleGOT())
10647 OpFlag = X86II::MO_GOTOFF;
10648 else if (Subtarget->isPICStyleStubPIC())
10649 OpFlag = X86II::MO_PIC_BASE_OFFSET;
10651 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
10654 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
10656 // With PIC, the address is actually $g + Offset.
10658 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
10659 DAG.getNode(X86ISD::GlobalBaseReg,
10660 SDLoc(), getPointerTy()),
10667 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
10668 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
10670 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
10671 // global base reg.
10672 unsigned char OpFlag = 0;
10673 unsigned WrapperKind = X86ISD::Wrapper;
10674 CodeModel::Model M = DAG.getTarget().getCodeModel();
10676 if (Subtarget->isPICStyleRIPRel() &&
10677 (M == CodeModel::Small || M == CodeModel::Kernel)) {
10678 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
10679 OpFlag = X86II::MO_GOTPCREL;
10680 WrapperKind = X86ISD::WrapperRIP;
10681 } else if (Subtarget->isPICStyleGOT()) {
10682 OpFlag = X86II::MO_GOT;
10683 } else if (Subtarget->isPICStyleStubPIC()) {
10684 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
10685 } else if (Subtarget->isPICStyleStubNoDynamic()) {
10686 OpFlag = X86II::MO_DARWIN_NONLAZY;
10689 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
10692 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
10694 // With PIC, the address is actually $g + Offset.
10695 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
10696 !Subtarget->is64Bit()) {
10697 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
10698 DAG.getNode(X86ISD::GlobalBaseReg,
10699 SDLoc(), getPointerTy()),
10703 // For symbols that require a load from a stub to get the address, emit the
10705 if (isGlobalStubReference(OpFlag))
10706 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
10707 MachinePointerInfo::getGOT(), false, false, false, 0);
10713 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
10714 // Create the TargetBlockAddressAddress node.
10715 unsigned char OpFlags =
10716 Subtarget->ClassifyBlockAddressReference();
10717 CodeModel::Model M = DAG.getTarget().getCodeModel();
10718 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
10719 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
10721 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
10724 if (Subtarget->isPICStyleRIPRel() &&
10725 (M == CodeModel::Small || M == CodeModel::Kernel))
10726 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
10728 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
10730 // With PIC, the address is actually $g + Offset.
10731 if (isGlobalRelativeToPICBase(OpFlags)) {
10732 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
10733 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
10741 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
10742 int64_t Offset, SelectionDAG &DAG) const {
10743 // Create the TargetGlobalAddress node, folding in the constant
10744 // offset if it is legal.
10745 unsigned char OpFlags =
10746 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
10747 CodeModel::Model M = DAG.getTarget().getCodeModel();
10749 if (OpFlags == X86II::MO_NO_FLAG &&
10750 X86::isOffsetSuitableForCodeModel(Offset, M)) {
10751 // A direct static reference to a global.
10752 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
10755 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
10758 if (Subtarget->isPICStyleRIPRel() &&
10759 (M == CodeModel::Small || M == CodeModel::Kernel))
10760 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
10762 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
10764 // With PIC, the address is actually $g + Offset.
10765 if (isGlobalRelativeToPICBase(OpFlags)) {
10766 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
10767 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
10771 // For globals that require a load from a stub to get the address, emit the
10773 if (isGlobalStubReference(OpFlags))
10774 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
10775 MachinePointerInfo::getGOT(), false, false, false, 0);
10777 // If there was a non-zero offset that we didn't fold, create an explicit
10778 // addition for it.
10780 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
10781 DAG.getConstant(Offset, getPointerTy()));
10787 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
10788 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
10789 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
10790 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
10794 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
10795 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
10796 unsigned char OperandFlags, bool LocalDynamic = false) {
10797 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
10798 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10800 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
10801 GA->getValueType(0),
10805 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
10809 SDValue Ops[] = { Chain, TGA, *InFlag };
10810 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
10812 SDValue Ops[] = { Chain, TGA };
10813 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
10816 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
10817 MFI->setAdjustsStack(true);
10818 MFI->setHasCalls(true);
10820 SDValue Flag = Chain.getValue(1);
10821 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
10824 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
10826 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
10829 SDLoc dl(GA); // ? function entry point might be better
10830 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
10831 DAG.getNode(X86ISD::GlobalBaseReg,
10832 SDLoc(), PtrVT), InFlag);
10833 InFlag = Chain.getValue(1);
10835 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
10838 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
10840 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
10842 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
10843 X86::RAX, X86II::MO_TLSGD);
10846 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
10852 // Get the start address of the TLS block for this module.
10853 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
10854 .getInfo<X86MachineFunctionInfo>();
10855 MFI->incNumLocalDynamicTLSAccesses();
10859 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
10860 X86II::MO_TLSLD, /*LocalDynamic=*/true);
10863 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
10864 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
10865 InFlag = Chain.getValue(1);
10866 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
10867 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
10870 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
10874 unsigned char OperandFlags = X86II::MO_DTPOFF;
10875 unsigned WrapperKind = X86ISD::Wrapper;
10876 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
10877 GA->getValueType(0),
10878 GA->getOffset(), OperandFlags);
10879 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
10881 // Add x@dtpoff with the base.
10882 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
10885 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
10886 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
10887 const EVT PtrVT, TLSModel::Model model,
10888 bool is64Bit, bool isPIC) {
10891 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
10892 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
10893 is64Bit ? 257 : 256));
10895 SDValue ThreadPointer =
10896 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
10897 MachinePointerInfo(Ptr), false, false, false, 0);
10899 unsigned char OperandFlags = 0;
10900 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
10902 unsigned WrapperKind = X86ISD::Wrapper;
10903 if (model == TLSModel::LocalExec) {
10904 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
10905 } else if (model == TLSModel::InitialExec) {
10907 OperandFlags = X86II::MO_GOTTPOFF;
10908 WrapperKind = X86ISD::WrapperRIP;
10910 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
10913 llvm_unreachable("Unexpected model");
10916 // emit "addl x@ntpoff,%eax" (local exec)
10917 // or "addl x@indntpoff,%eax" (initial exec)
10918 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
10920 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
10921 GA->getOffset(), OperandFlags);
10922 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
10924 if (model == TLSModel::InitialExec) {
10925 if (isPIC && !is64Bit) {
10926 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
10927 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
10931 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
10932 MachinePointerInfo::getGOT(), false, false, false, 0);
10935 // The address of the thread local variable is the add of the thread
10936 // pointer with the offset of the variable.
10937 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
10941 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
10943 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
10944 const GlobalValue *GV = GA->getGlobal();
10946 if (Subtarget->isTargetELF()) {
10947 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
10950 case TLSModel::GeneralDynamic:
10951 if (Subtarget->is64Bit())
10952 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
10953 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
10954 case TLSModel::LocalDynamic:
10955 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
10956 Subtarget->is64Bit());
10957 case TLSModel::InitialExec:
10958 case TLSModel::LocalExec:
10959 return LowerToTLSExecModel(
10960 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
10961 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
10963 llvm_unreachable("Unknown TLS model.");
10966 if (Subtarget->isTargetDarwin()) {
10967 // Darwin only has one model of TLS. Lower to that.
10968 unsigned char OpFlag = 0;
10969 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
10970 X86ISD::WrapperRIP : X86ISD::Wrapper;
10972 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
10973 // global base reg.
10974 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
10975 !Subtarget->is64Bit();
10977 OpFlag = X86II::MO_TLVP_PIC_BASE;
10979 OpFlag = X86II::MO_TLVP;
10981 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
10982 GA->getValueType(0),
10983 GA->getOffset(), OpFlag);
10984 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
10986 // With PIC32, the address is actually $g + Offset.
10988 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
10989 DAG.getNode(X86ISD::GlobalBaseReg,
10990 SDLoc(), getPointerTy()),
10993 // Lowering the machine isd will make sure everything is in the right
10995 SDValue Chain = DAG.getEntryNode();
10996 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10997 SDValue Args[] = { Chain, Offset };
10998 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
11000 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
11001 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
11002 MFI->setAdjustsStack(true);
11004 // And our return value (tls address) is in the standard call return value
11006 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
11007 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
11008 Chain.getValue(1));
11011 if (Subtarget->isTargetKnownWindowsMSVC() ||
11012 Subtarget->isTargetWindowsGNU()) {
11013 // Just use the implicit TLS architecture
11014 // Need to generate someting similar to:
11015 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
11017 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
11018 // mov rcx, qword [rdx+rcx*8]
11019 // mov eax, .tls$:tlsvar
11020 // [rax+rcx] contains the address
11021 // Windows 64bit: gs:0x58
11022 // Windows 32bit: fs:__tls_array
11025 SDValue Chain = DAG.getEntryNode();
11027 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
11028 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
11029 // use its literal value of 0x2C.
11030 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
11031 ? Type::getInt8PtrTy(*DAG.getContext(),
11033 : Type::getInt32PtrTy(*DAG.getContext(),
11037 Subtarget->is64Bit()
11038 ? DAG.getIntPtrConstant(0x58)
11039 : (Subtarget->isTargetWindowsGNU()
11040 ? DAG.getIntPtrConstant(0x2C)
11041 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
11043 SDValue ThreadPointer =
11044 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
11045 MachinePointerInfo(Ptr), false, false, false, 0);
11047 // Load the _tls_index variable
11048 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
11049 if (Subtarget->is64Bit())
11050 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
11051 IDX, MachinePointerInfo(), MVT::i32,
11052 false, false, false, 0);
11054 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
11055 false, false, false, 0);
11057 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
11059 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
11061 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
11062 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
11063 false, false, false, 0);
11065 // Get the offset of start of .tls section
11066 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
11067 GA->getValueType(0),
11068 GA->getOffset(), X86II::MO_SECREL);
11069 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
11071 // The address of the thread local variable is the add of the thread
11072 // pointer with the offset of the variable.
11073 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
11076 llvm_unreachable("TLS not implemented for this target.");
11079 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
11080 /// and take a 2 x i32 value to shift plus a shift amount.
11081 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
11082 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
11083 MVT VT = Op.getSimpleValueType();
11084 unsigned VTBits = VT.getSizeInBits();
11086 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
11087 SDValue ShOpLo = Op.getOperand(0);
11088 SDValue ShOpHi = Op.getOperand(1);
11089 SDValue ShAmt = Op.getOperand(2);
11090 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
11091 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
11093 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
11094 DAG.getConstant(VTBits - 1, MVT::i8));
11095 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
11096 DAG.getConstant(VTBits - 1, MVT::i8))
11097 : DAG.getConstant(0, VT);
11099 SDValue Tmp2, Tmp3;
11100 if (Op.getOpcode() == ISD::SHL_PARTS) {
11101 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
11102 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
11104 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
11105 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
11108 // If the shift amount is larger or equal than the width of a part we can't
11109 // rely on the results of shld/shrd. Insert a test and select the appropriate
11110 // values for large shift amounts.
11111 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
11112 DAG.getConstant(VTBits, MVT::i8));
11113 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
11114 AndNode, DAG.getConstant(0, MVT::i8));
11117 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
11118 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
11119 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
11121 if (Op.getOpcode() == ISD::SHL_PARTS) {
11122 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
11123 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
11125 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
11126 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
11129 SDValue Ops[2] = { Lo, Hi };
11130 return DAG.getMergeValues(Ops, dl);
11133 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
11134 SelectionDAG &DAG) const {
11135 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
11138 if (SrcVT.isVector()) {
11139 if (SrcVT.getVectorElementType() == MVT::i1) {
11140 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
11141 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
11142 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
11143 Op.getOperand(0)));
11148 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
11149 "Unknown SINT_TO_FP to lower!");
11151 // These are really Legal; return the operand so the caller accepts it as
11153 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
11155 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
11156 Subtarget->is64Bit()) {
11160 unsigned Size = SrcVT.getSizeInBits()/8;
11161 MachineFunction &MF = DAG.getMachineFunction();
11162 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
11163 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
11164 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
11166 MachinePointerInfo::getFixedStack(SSFI),
11168 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
11171 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
11173 SelectionDAG &DAG) const {
11177 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
11179 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
11181 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
11183 unsigned ByteSize = SrcVT.getSizeInBits()/8;
11185 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
11186 MachineMemOperand *MMO;
11188 int SSFI = FI->getIndex();
11190 DAG.getMachineFunction()
11191 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
11192 MachineMemOperand::MOLoad, ByteSize, ByteSize);
11194 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
11195 StackSlot = StackSlot.getOperand(1);
11197 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
11198 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
11200 Tys, Ops, SrcVT, MMO);
11203 Chain = Result.getValue(1);
11204 SDValue InFlag = Result.getValue(2);
11206 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
11207 // shouldn't be necessary except that RFP cannot be live across
11208 // multiple blocks. When stackifier is fixed, they can be uncoupled.
11209 MachineFunction &MF = DAG.getMachineFunction();
11210 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
11211 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
11212 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
11213 Tys = DAG.getVTList(MVT::Other);
11215 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
11217 MachineMemOperand *MMO =
11218 DAG.getMachineFunction()
11219 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
11220 MachineMemOperand::MOStore, SSFISize, SSFISize);
11222 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
11223 Ops, Op.getValueType(), MMO);
11224 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
11225 MachinePointerInfo::getFixedStack(SSFI),
11226 false, false, false, 0);
11232 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
11233 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
11234 SelectionDAG &DAG) const {
11235 // This algorithm is not obvious. Here it is what we're trying to output:
11238 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
11239 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
11241 haddpd %xmm0, %xmm0
11243 pshufd $0x4e, %xmm0, %xmm1
11249 LLVMContext *Context = DAG.getContext();
11251 // Build some magic constants.
11252 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
11253 Constant *C0 = ConstantDataVector::get(*Context, CV0);
11254 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
11256 SmallVector<Constant*,2> CV1;
11258 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
11259 APInt(64, 0x4330000000000000ULL))));
11261 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
11262 APInt(64, 0x4530000000000000ULL))));
11263 Constant *C1 = ConstantVector::get(CV1);
11264 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
11266 // Load the 64-bit value into an XMM register.
11267 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
11269 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
11270 MachinePointerInfo::getConstantPool(),
11271 false, false, false, 16);
11272 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
11273 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
11276 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
11277 MachinePointerInfo::getConstantPool(),
11278 false, false, false, 16);
11279 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
11280 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
11283 if (Subtarget->hasSSE3()) {
11284 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
11285 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
11287 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
11288 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
11290 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
11291 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
11295 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
11296 DAG.getIntPtrConstant(0));
11299 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
11300 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
11301 SelectionDAG &DAG) const {
11303 // FP constant to bias correct the final result.
11304 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
11307 // Load the 32-bit value into an XMM register.
11308 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
11311 // Zero out the upper parts of the register.
11312 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
11314 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
11315 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
11316 DAG.getIntPtrConstant(0));
11318 // Or the load with the bias.
11319 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
11320 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
11321 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11322 MVT::v2f64, Load)),
11323 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
11324 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11325 MVT::v2f64, Bias)));
11326 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
11327 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
11328 DAG.getIntPtrConstant(0));
11330 // Subtract the bias.
11331 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
11333 // Handle final rounding.
11334 EVT DestVT = Op.getValueType();
11336 if (DestVT.bitsLT(MVT::f64))
11337 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
11338 DAG.getIntPtrConstant(0));
11339 if (DestVT.bitsGT(MVT::f64))
11340 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
11342 // Handle final rounding.
11346 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
11347 const X86Subtarget &Subtarget) {
11348 // The algorithm is the following:
11349 // #ifdef __SSE4_1__
11350 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
11351 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
11352 // (uint4) 0x53000000, 0xaa);
11354 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
11355 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
11357 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
11358 // return (float4) lo + fhi;
11361 SDValue V = Op->getOperand(0);
11362 EVT VecIntVT = V.getValueType();
11363 bool Is128 = VecIntVT == MVT::v4i32;
11364 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
11365 // If we convert to something else than the supported type, e.g., to v4f64,
11367 if (VecFloatVT != Op->getValueType(0))
11370 unsigned NumElts = VecIntVT.getVectorNumElements();
11371 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
11372 "Unsupported custom type");
11373 assert(NumElts <= 8 && "The size of the constant array must be fixed");
11375 // In the #idef/#else code, we have in common:
11376 // - The vector of constants:
11382 // Create the splat vector for 0x4b000000.
11383 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
11384 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
11385 CstLow, CstLow, CstLow, CstLow};
11386 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
11387 makeArrayRef(&CstLowArray[0], NumElts));
11388 // Create the splat vector for 0x53000000.
11389 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
11390 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
11391 CstHigh, CstHigh, CstHigh, CstHigh};
11392 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
11393 makeArrayRef(&CstHighArray[0], NumElts));
11395 // Create the right shift.
11396 SDValue CstShift = DAG.getConstant(16, MVT::i32);
11397 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
11398 CstShift, CstShift, CstShift, CstShift};
11399 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
11400 makeArrayRef(&CstShiftArray[0], NumElts));
11401 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
11404 if (Subtarget.hasSSE41()) {
11405 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
11406 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
11407 SDValue VecCstLowBitcast =
11408 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
11409 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
11410 // Low will be bitcasted right away, so do not bother bitcasting back to its
11412 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
11413 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
11414 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
11415 // (uint4) 0x53000000, 0xaa);
11416 SDValue VecCstHighBitcast =
11417 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
11418 SDValue VecShiftBitcast =
11419 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
11420 // High will be bitcasted right away, so do not bother bitcasting back to
11421 // its original type.
11422 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
11423 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
11425 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
11426 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
11427 CstMask, CstMask, CstMask);
11428 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
11429 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
11430 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
11432 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
11433 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
11436 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
11437 SDValue CstFAdd = DAG.getConstantFP(
11438 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
11439 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
11440 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
11441 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
11442 makeArrayRef(&CstFAddArray[0], NumElts));
11444 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
11445 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
11447 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
11448 // return (float4) lo + fhi;
11449 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
11450 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
11453 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
11454 SelectionDAG &DAG) const {
11455 SDValue N0 = Op.getOperand(0);
11456 MVT SVT = N0.getSimpleValueType();
11459 switch (SVT.SimpleTy) {
11461 llvm_unreachable("Custom UINT_TO_FP is not supported!");
11466 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
11467 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
11468 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
11472 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
11474 llvm_unreachable(nullptr);
11477 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
11478 SelectionDAG &DAG) const {
11479 SDValue N0 = Op.getOperand(0);
11482 if (Op.getValueType().isVector())
11483 return lowerUINT_TO_FP_vec(Op, DAG);
11485 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
11486 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
11487 // the optimization here.
11488 if (DAG.SignBitIsZero(N0))
11489 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
11491 MVT SrcVT = N0.getSimpleValueType();
11492 MVT DstVT = Op.getSimpleValueType();
11493 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
11494 return LowerUINT_TO_FP_i64(Op, DAG);
11495 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
11496 return LowerUINT_TO_FP_i32(Op, DAG);
11497 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
11500 // Make a 64-bit buffer, and use it to build an FILD.
11501 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
11502 if (SrcVT == MVT::i32) {
11503 SDValue WordOff = DAG.getConstant(4, getPointerTy());
11504 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
11505 getPointerTy(), StackSlot, WordOff);
11506 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
11507 StackSlot, MachinePointerInfo(),
11509 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
11510 OffsetSlot, MachinePointerInfo(),
11512 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
11516 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
11517 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
11518 StackSlot, MachinePointerInfo(),
11520 // For i64 source, we need to add the appropriate power of 2 if the input
11521 // was negative. This is the same as the optimization in
11522 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
11523 // we must be careful to do the computation in x87 extended precision, not
11524 // in SSE. (The generic code can't know it's OK to do this, or how to.)
11525 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
11526 MachineMemOperand *MMO =
11527 DAG.getMachineFunction()
11528 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
11529 MachineMemOperand::MOLoad, 8, 8);
11531 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
11532 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
11533 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
11536 APInt FF(32, 0x5F800000ULL);
11538 // Check whether the sign bit is set.
11539 SDValue SignSet = DAG.getSetCC(dl,
11540 getSetCCResultType(*DAG.getContext(), MVT::i64),
11541 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
11544 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
11545 SDValue FudgePtr = DAG.getConstantPool(
11546 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
11549 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
11550 SDValue Zero = DAG.getIntPtrConstant(0);
11551 SDValue Four = DAG.getIntPtrConstant(4);
11552 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
11554 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
11556 // Load the value out, extending it from f32 to f80.
11557 // FIXME: Avoid the extend by constructing the right constant pool?
11558 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
11559 FudgePtr, MachinePointerInfo::getConstantPool(),
11560 MVT::f32, false, false, false, 4);
11561 // Extend everything to 80 bits to force it to be done on x87.
11562 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
11563 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
11566 std::pair<SDValue,SDValue>
11567 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
11568 bool IsSigned, bool IsReplace) const {
11571 EVT DstTy = Op.getValueType();
11573 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
11574 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
11578 assert(DstTy.getSimpleVT() <= MVT::i64 &&
11579 DstTy.getSimpleVT() >= MVT::i16 &&
11580 "Unknown FP_TO_INT to lower!");
11582 // These are really Legal.
11583 if (DstTy == MVT::i32 &&
11584 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
11585 return std::make_pair(SDValue(), SDValue());
11586 if (Subtarget->is64Bit() &&
11587 DstTy == MVT::i64 &&
11588 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
11589 return std::make_pair(SDValue(), SDValue());
11591 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
11592 // stack slot, or into the FTOL runtime function.
11593 MachineFunction &MF = DAG.getMachineFunction();
11594 unsigned MemSize = DstTy.getSizeInBits()/8;
11595 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
11596 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
11599 if (!IsSigned && isIntegerTypeFTOL(DstTy))
11600 Opc = X86ISD::WIN_FTOL;
11602 switch (DstTy.getSimpleVT().SimpleTy) {
11603 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
11604 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
11605 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
11606 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
11609 SDValue Chain = DAG.getEntryNode();
11610 SDValue Value = Op.getOperand(0);
11611 EVT TheVT = Op.getOperand(0).getValueType();
11612 // FIXME This causes a redundant load/store if the SSE-class value is already
11613 // in memory, such as if it is on the callstack.
11614 if (isScalarFPTypeInSSEReg(TheVT)) {
11615 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
11616 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
11617 MachinePointerInfo::getFixedStack(SSFI),
11619 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
11621 Chain, StackSlot, DAG.getValueType(TheVT)
11624 MachineMemOperand *MMO =
11625 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
11626 MachineMemOperand::MOLoad, MemSize, MemSize);
11627 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
11628 Chain = Value.getValue(1);
11629 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
11630 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
11633 MachineMemOperand *MMO =
11634 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
11635 MachineMemOperand::MOStore, MemSize, MemSize);
11637 if (Opc != X86ISD::WIN_FTOL) {
11638 // Build the FP_TO_INT*_IN_MEM
11639 SDValue Ops[] = { Chain, Value, StackSlot };
11640 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
11642 return std::make_pair(FIST, StackSlot);
11644 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
11645 DAG.getVTList(MVT::Other, MVT::Glue),
11647 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
11648 MVT::i32, ftol.getValue(1));
11649 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
11650 MVT::i32, eax.getValue(2));
11651 SDValue Ops[] = { eax, edx };
11652 SDValue pair = IsReplace
11653 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
11654 : DAG.getMergeValues(Ops, DL);
11655 return std::make_pair(pair, SDValue());
11659 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
11660 const X86Subtarget *Subtarget) {
11661 MVT VT = Op->getSimpleValueType(0);
11662 SDValue In = Op->getOperand(0);
11663 MVT InVT = In.getSimpleValueType();
11666 // Optimize vectors in AVX mode:
11669 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
11670 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
11671 // Concat upper and lower parts.
11674 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
11675 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
11676 // Concat upper and lower parts.
11679 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
11680 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
11681 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
11684 if (Subtarget->hasInt256())
11685 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
11687 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
11688 SDValue Undef = DAG.getUNDEF(InVT);
11689 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
11690 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
11691 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
11693 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
11694 VT.getVectorNumElements()/2);
11696 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
11697 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
11699 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
11702 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
11703 SelectionDAG &DAG) {
11704 MVT VT = Op->getSimpleValueType(0);
11705 SDValue In = Op->getOperand(0);
11706 MVT InVT = In.getSimpleValueType();
11708 unsigned int NumElts = VT.getVectorNumElements();
11709 if (NumElts != 8 && NumElts != 16)
11712 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
11713 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
11715 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
11716 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11717 // Now we have only mask extension
11718 assert(InVT.getVectorElementType() == MVT::i1);
11719 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
11720 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
11721 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
11722 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
11723 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
11724 MachinePointerInfo::getConstantPool(),
11725 false, false, false, Alignment);
11727 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
11728 if (VT.is512BitVector())
11730 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
11733 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
11734 SelectionDAG &DAG) {
11735 if (Subtarget->hasFp256()) {
11736 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
11744 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
11745 SelectionDAG &DAG) {
11747 MVT VT = Op.getSimpleValueType();
11748 SDValue In = Op.getOperand(0);
11749 MVT SVT = In.getSimpleValueType();
11751 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
11752 return LowerZERO_EXTEND_AVX512(Op, DAG);
11754 if (Subtarget->hasFp256()) {
11755 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
11760 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
11761 VT.getVectorNumElements() != SVT.getVectorNumElements());
11765 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
11767 MVT VT = Op.getSimpleValueType();
11768 SDValue In = Op.getOperand(0);
11769 MVT InVT = In.getSimpleValueType();
11771 if (VT == MVT::i1) {
11772 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
11773 "Invalid scalar TRUNCATE operation");
11774 if (InVT.getSizeInBits() >= 32)
11776 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
11777 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
11779 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
11780 "Invalid TRUNCATE operation");
11782 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
11783 if (VT.getVectorElementType().getSizeInBits() >=8)
11784 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
11786 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
11787 unsigned NumElts = InVT.getVectorNumElements();
11788 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
11789 if (InVT.getSizeInBits() < 512) {
11790 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
11791 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
11795 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
11796 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
11797 SDValue CP = DAG.getConstantPool(C, getPointerTy());
11798 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
11799 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
11800 MachinePointerInfo::getConstantPool(),
11801 false, false, false, Alignment);
11802 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
11803 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
11804 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
11807 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
11808 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
11809 if (Subtarget->hasInt256()) {
11810 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
11811 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
11812 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
11814 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
11815 DAG.getIntPtrConstant(0));
11818 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
11819 DAG.getIntPtrConstant(0));
11820 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
11821 DAG.getIntPtrConstant(2));
11822 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
11823 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
11824 static const int ShufMask[] = {0, 2, 4, 6};
11825 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
11828 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
11829 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
11830 if (Subtarget->hasInt256()) {
11831 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
11833 SmallVector<SDValue,32> pshufbMask;
11834 for (unsigned i = 0; i < 2; ++i) {
11835 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
11836 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
11837 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
11838 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
11839 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
11840 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
11841 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
11842 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
11843 for (unsigned j = 0; j < 8; ++j)
11844 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
11846 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
11847 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
11848 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
11850 static const int ShufMask[] = {0, 2, -1, -1};
11851 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
11853 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
11854 DAG.getIntPtrConstant(0));
11855 return DAG.getNode(ISD::BITCAST, DL, VT, In);
11858 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
11859 DAG.getIntPtrConstant(0));
11861 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
11862 DAG.getIntPtrConstant(4));
11864 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
11865 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
11867 // The PSHUFB mask:
11868 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
11869 -1, -1, -1, -1, -1, -1, -1, -1};
11871 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
11872 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
11873 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
11875 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
11876 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
11878 // The MOVLHPS Mask:
11879 static const int ShufMask2[] = {0, 1, 4, 5};
11880 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
11881 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
11884 // Handle truncation of V256 to V128 using shuffles.
11885 if (!VT.is128BitVector() || !InVT.is256BitVector())
11888 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
11890 unsigned NumElems = VT.getVectorNumElements();
11891 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
11893 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
11894 // Prepare truncation shuffle mask
11895 for (unsigned i = 0; i != NumElems; ++i)
11896 MaskVec[i] = i * 2;
11897 SDValue V = DAG.getVectorShuffle(NVT, DL,
11898 DAG.getNode(ISD::BITCAST, DL, NVT, In),
11899 DAG.getUNDEF(NVT), &MaskVec[0]);
11900 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
11901 DAG.getIntPtrConstant(0));
11904 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
11905 SelectionDAG &DAG) const {
11906 assert(!Op.getSimpleValueType().isVector());
11908 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
11909 /*IsSigned=*/ true, /*IsReplace=*/ false);
11910 SDValue FIST = Vals.first, StackSlot = Vals.second;
11911 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
11912 if (!FIST.getNode()) return Op;
11914 if (StackSlot.getNode())
11915 // Load the result.
11916 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
11917 FIST, StackSlot, MachinePointerInfo(),
11918 false, false, false, 0);
11920 // The node is the result.
11924 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
11925 SelectionDAG &DAG) const {
11926 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
11927 /*IsSigned=*/ false, /*IsReplace=*/ false);
11928 SDValue FIST = Vals.first, StackSlot = Vals.second;
11929 assert(FIST.getNode() && "Unexpected failure");
11931 if (StackSlot.getNode())
11932 // Load the result.
11933 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
11934 FIST, StackSlot, MachinePointerInfo(),
11935 false, false, false, 0);
11937 // The node is the result.
11941 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
11943 MVT VT = Op.getSimpleValueType();
11944 SDValue In = Op.getOperand(0);
11945 MVT SVT = In.getSimpleValueType();
11947 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
11949 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
11950 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
11951 In, DAG.getUNDEF(SVT)));
11954 /// The only differences between FABS and FNEG are the mask and the logic op.
11955 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
11956 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
11957 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
11958 "Wrong opcode for lowering FABS or FNEG.");
11960 bool IsFABS = (Op.getOpcode() == ISD::FABS);
11962 // If this is a FABS and it has an FNEG user, bail out to fold the combination
11963 // into an FNABS. We'll lower the FABS after that if it is still in use.
11965 for (SDNode *User : Op->uses())
11966 if (User->getOpcode() == ISD::FNEG)
11969 SDValue Op0 = Op.getOperand(0);
11970 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
11973 MVT VT = Op.getSimpleValueType();
11974 // Assume scalar op for initialization; update for vector if needed.
11975 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
11976 // generate a 16-byte vector constant and logic op even for the scalar case.
11977 // Using a 16-byte mask allows folding the load of the mask with
11978 // the logic op, so it can save (~4 bytes) on code size.
11980 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
11981 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
11982 // decide if we should generate a 16-byte constant mask when we only need 4 or
11983 // 8 bytes for the scalar case.
11984 if (VT.isVector()) {
11985 EltVT = VT.getVectorElementType();
11986 NumElts = VT.getVectorNumElements();
11989 unsigned EltBits = EltVT.getSizeInBits();
11990 LLVMContext *Context = DAG.getContext();
11991 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
11993 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
11994 Constant *C = ConstantInt::get(*Context, MaskElt);
11995 C = ConstantVector::getSplat(NumElts, C);
11996 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11997 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
11998 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
11999 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
12000 MachinePointerInfo::getConstantPool(),
12001 false, false, false, Alignment);
12003 if (VT.isVector()) {
12004 // For a vector, cast operands to a vector type, perform the logic op,
12005 // and cast the result back to the original value type.
12006 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
12007 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
12008 SDValue Operand = IsFNABS ?
12009 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
12010 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
12011 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
12012 return DAG.getNode(ISD::BITCAST, dl, VT,
12013 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
12016 // If not vector, then scalar.
12017 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
12018 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
12019 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
12022 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
12023 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12024 LLVMContext *Context = DAG.getContext();
12025 SDValue Op0 = Op.getOperand(0);
12026 SDValue Op1 = Op.getOperand(1);
12028 MVT VT = Op.getSimpleValueType();
12029 MVT SrcVT = Op1.getSimpleValueType();
12031 // If second operand is smaller, extend it first.
12032 if (SrcVT.bitsLT(VT)) {
12033 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
12036 // And if it is bigger, shrink it first.
12037 if (SrcVT.bitsGT(VT)) {
12038 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
12042 // At this point the operands and the result should have the same
12043 // type, and that won't be f80 since that is not custom lowered.
12045 const fltSemantics &Sem =
12046 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
12047 const unsigned SizeInBits = VT.getSizeInBits();
12049 SmallVector<Constant *, 4> CV(
12050 VT == MVT::f64 ? 2 : 4,
12051 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
12053 // First, clear all bits but the sign bit from the second operand (sign).
12054 CV[0] = ConstantFP::get(*Context,
12055 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
12056 Constant *C = ConstantVector::get(CV);
12057 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
12058 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
12059 MachinePointerInfo::getConstantPool(),
12060 false, false, false, 16);
12061 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
12063 // Next, clear the sign bit from the first operand (magnitude).
12064 // If it's a constant, we can clear it here.
12065 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
12066 APFloat APF = Op0CN->getValueAPF();
12067 // If the magnitude is a positive zero, the sign bit alone is enough.
12068 if (APF.isPosZero())
12071 CV[0] = ConstantFP::get(*Context, APF);
12073 CV[0] = ConstantFP::get(
12075 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
12077 C = ConstantVector::get(CV);
12078 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
12079 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
12080 MachinePointerInfo::getConstantPool(),
12081 false, false, false, 16);
12082 // If the magnitude operand wasn't a constant, we need to AND out the sign.
12083 if (!isa<ConstantFPSDNode>(Op0))
12084 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
12086 // OR the magnitude value with the sign bit.
12087 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
12090 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
12091 SDValue N0 = Op.getOperand(0);
12093 MVT VT = Op.getSimpleValueType();
12095 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
12096 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
12097 DAG.getConstant(1, VT));
12098 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
12101 // Check whether an OR'd tree is PTEST-able.
12102 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
12103 SelectionDAG &DAG) {
12104 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
12106 if (!Subtarget->hasSSE41())
12109 if (!Op->hasOneUse())
12112 SDNode *N = Op.getNode();
12115 SmallVector<SDValue, 8> Opnds;
12116 DenseMap<SDValue, unsigned> VecInMap;
12117 SmallVector<SDValue, 8> VecIns;
12118 EVT VT = MVT::Other;
12120 // Recognize a special case where a vector is casted into wide integer to
12122 Opnds.push_back(N->getOperand(0));
12123 Opnds.push_back(N->getOperand(1));
12125 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
12126 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
12127 // BFS traverse all OR'd operands.
12128 if (I->getOpcode() == ISD::OR) {
12129 Opnds.push_back(I->getOperand(0));
12130 Opnds.push_back(I->getOperand(1));
12131 // Re-evaluate the number of nodes to be traversed.
12132 e += 2; // 2 more nodes (LHS and RHS) are pushed.
12136 // Quit if a non-EXTRACT_VECTOR_ELT
12137 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12140 // Quit if without a constant index.
12141 SDValue Idx = I->getOperand(1);
12142 if (!isa<ConstantSDNode>(Idx))
12145 SDValue ExtractedFromVec = I->getOperand(0);
12146 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
12147 if (M == VecInMap.end()) {
12148 VT = ExtractedFromVec.getValueType();
12149 // Quit if not 128/256-bit vector.
12150 if (!VT.is128BitVector() && !VT.is256BitVector())
12152 // Quit if not the same type.
12153 if (VecInMap.begin() != VecInMap.end() &&
12154 VT != VecInMap.begin()->first.getValueType())
12156 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
12157 VecIns.push_back(ExtractedFromVec);
12159 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
12162 assert((VT.is128BitVector() || VT.is256BitVector()) &&
12163 "Not extracted from 128-/256-bit vector.");
12165 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
12167 for (DenseMap<SDValue, unsigned>::const_iterator
12168 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
12169 // Quit if not all elements are used.
12170 if (I->second != FullMask)
12174 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
12176 // Cast all vectors into TestVT for PTEST.
12177 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
12178 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
12180 // If more than one full vectors are evaluated, OR them first before PTEST.
12181 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
12182 // Each iteration will OR 2 nodes and append the result until there is only
12183 // 1 node left, i.e. the final OR'd value of all vectors.
12184 SDValue LHS = VecIns[Slot];
12185 SDValue RHS = VecIns[Slot + 1];
12186 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
12189 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
12190 VecIns.back(), VecIns.back());
12193 /// \brief return true if \c Op has a use that doesn't just read flags.
12194 static bool hasNonFlagsUse(SDValue Op) {
12195 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
12197 SDNode *User = *UI;
12198 unsigned UOpNo = UI.getOperandNo();
12199 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
12200 // Look pass truncate.
12201 UOpNo = User->use_begin().getOperandNo();
12202 User = *User->use_begin();
12205 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
12206 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
12212 /// Emit nodes that will be selected as "test Op0,Op0", or something
12214 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
12215 SelectionDAG &DAG) const {
12216 if (Op.getValueType() == MVT::i1) {
12217 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
12218 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
12219 DAG.getConstant(0, MVT::i8));
12221 // CF and OF aren't always set the way we want. Determine which
12222 // of these we need.
12223 bool NeedCF = false;
12224 bool NeedOF = false;
12227 case X86::COND_A: case X86::COND_AE:
12228 case X86::COND_B: case X86::COND_BE:
12231 case X86::COND_G: case X86::COND_GE:
12232 case X86::COND_L: case X86::COND_LE:
12233 case X86::COND_O: case X86::COND_NO: {
12234 // Check if we really need to set the
12235 // Overflow flag. If NoSignedWrap is present
12236 // that is not actually needed.
12237 switch (Op->getOpcode()) {
12242 const BinaryWithFlagsSDNode *BinNode =
12243 cast<BinaryWithFlagsSDNode>(Op.getNode());
12244 if (BinNode->hasNoSignedWrap())
12254 // See if we can use the EFLAGS value from the operand instead of
12255 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
12256 // we prove that the arithmetic won't overflow, we can't use OF or CF.
12257 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
12258 // Emit a CMP with 0, which is the TEST pattern.
12259 //if (Op.getValueType() == MVT::i1)
12260 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
12261 // DAG.getConstant(0, MVT::i1));
12262 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
12263 DAG.getConstant(0, Op.getValueType()));
12265 unsigned Opcode = 0;
12266 unsigned NumOperands = 0;
12268 // Truncate operations may prevent the merge of the SETCC instruction
12269 // and the arithmetic instruction before it. Attempt to truncate the operands
12270 // of the arithmetic instruction and use a reduced bit-width instruction.
12271 bool NeedTruncation = false;
12272 SDValue ArithOp = Op;
12273 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
12274 SDValue Arith = Op->getOperand(0);
12275 // Both the trunc and the arithmetic op need to have one user each.
12276 if (Arith->hasOneUse())
12277 switch (Arith.getOpcode()) {
12284 NeedTruncation = true;
12290 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
12291 // which may be the result of a CAST. We use the variable 'Op', which is the
12292 // non-casted variable when we check for possible users.
12293 switch (ArithOp.getOpcode()) {
12295 // Due to an isel shortcoming, be conservative if this add is likely to be
12296 // selected as part of a load-modify-store instruction. When the root node
12297 // in a match is a store, isel doesn't know how to remap non-chain non-flag
12298 // uses of other nodes in the match, such as the ADD in this case. This
12299 // leads to the ADD being left around and reselected, with the result being
12300 // two adds in the output. Alas, even if none our users are stores, that
12301 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
12302 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
12303 // climbing the DAG back to the root, and it doesn't seem to be worth the
12305 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
12306 UE = Op.getNode()->use_end(); UI != UE; ++UI)
12307 if (UI->getOpcode() != ISD::CopyToReg &&
12308 UI->getOpcode() != ISD::SETCC &&
12309 UI->getOpcode() != ISD::STORE)
12312 if (ConstantSDNode *C =
12313 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
12314 // An add of one will be selected as an INC.
12315 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
12316 Opcode = X86ISD::INC;
12321 // An add of negative one (subtract of one) will be selected as a DEC.
12322 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
12323 Opcode = X86ISD::DEC;
12329 // Otherwise use a regular EFLAGS-setting add.
12330 Opcode = X86ISD::ADD;
12335 // If we have a constant logical shift that's only used in a comparison
12336 // against zero turn it into an equivalent AND. This allows turning it into
12337 // a TEST instruction later.
12338 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
12339 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
12340 EVT VT = Op.getValueType();
12341 unsigned BitWidth = VT.getSizeInBits();
12342 unsigned ShAmt = Op->getConstantOperandVal(1);
12343 if (ShAmt >= BitWidth) // Avoid undefined shifts.
12345 APInt Mask = ArithOp.getOpcode() == ISD::SRL
12346 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
12347 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
12348 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
12350 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
12351 DAG.getConstant(Mask, VT));
12352 DAG.ReplaceAllUsesWith(Op, New);
12358 // If the primary and result isn't used, don't bother using X86ISD::AND,
12359 // because a TEST instruction will be better.
12360 if (!hasNonFlagsUse(Op))
12366 // Due to the ISEL shortcoming noted above, be conservative if this op is
12367 // likely to be selected as part of a load-modify-store instruction.
12368 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
12369 UE = Op.getNode()->use_end(); UI != UE; ++UI)
12370 if (UI->getOpcode() == ISD::STORE)
12373 // Otherwise use a regular EFLAGS-setting instruction.
12374 switch (ArithOp.getOpcode()) {
12375 default: llvm_unreachable("unexpected operator!");
12376 case ISD::SUB: Opcode = X86ISD::SUB; break;
12377 case ISD::XOR: Opcode = X86ISD::XOR; break;
12378 case ISD::AND: Opcode = X86ISD::AND; break;
12380 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
12381 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
12382 if (EFLAGS.getNode())
12385 Opcode = X86ISD::OR;
12399 return SDValue(Op.getNode(), 1);
12405 // If we found that truncation is beneficial, perform the truncation and
12407 if (NeedTruncation) {
12408 EVT VT = Op.getValueType();
12409 SDValue WideVal = Op->getOperand(0);
12410 EVT WideVT = WideVal.getValueType();
12411 unsigned ConvertedOp = 0;
12412 // Use a target machine opcode to prevent further DAGCombine
12413 // optimizations that may separate the arithmetic operations
12414 // from the setcc node.
12415 switch (WideVal.getOpcode()) {
12417 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
12418 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
12419 case ISD::AND: ConvertedOp = X86ISD::AND; break;
12420 case ISD::OR: ConvertedOp = X86ISD::OR; break;
12421 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
12425 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12426 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
12427 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
12428 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
12429 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
12435 // Emit a CMP with 0, which is the TEST pattern.
12436 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
12437 DAG.getConstant(0, Op.getValueType()));
12439 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
12440 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
12442 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
12443 DAG.ReplaceAllUsesWith(Op, New);
12444 return SDValue(New.getNode(), 1);
12447 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
12449 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
12450 SDLoc dl, SelectionDAG &DAG) const {
12451 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
12452 if (C->getAPIntValue() == 0)
12453 return EmitTest(Op0, X86CC, dl, DAG);
12455 if (Op0.getValueType() == MVT::i1)
12456 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
12459 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
12460 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
12461 // Do the comparison at i32 if it's smaller, besides the Atom case.
12462 // This avoids subregister aliasing issues. Keep the smaller reference
12463 // if we're optimizing for size, however, as that'll allow better folding
12464 // of memory operations.
12465 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
12466 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
12467 Attribute::MinSize) &&
12468 !Subtarget->isAtom()) {
12469 unsigned ExtendOp =
12470 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
12471 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
12472 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
12474 // Use SUB instead of CMP to enable CSE between SUB and CMP.
12475 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
12476 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
12478 return SDValue(Sub.getNode(), 1);
12480 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
12483 /// Convert a comparison if required by the subtarget.
12484 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
12485 SelectionDAG &DAG) const {
12486 // If the subtarget does not support the FUCOMI instruction, floating-point
12487 // comparisons have to be converted.
12488 if (Subtarget->hasCMov() ||
12489 Cmp.getOpcode() != X86ISD::CMP ||
12490 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
12491 !Cmp.getOperand(1).getValueType().isFloatingPoint())
12494 // The instruction selector will select an FUCOM instruction instead of
12495 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
12496 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
12497 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
12499 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
12500 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
12501 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
12502 DAG.getConstant(8, MVT::i8));
12503 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
12504 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
12507 /// The minimum architected relative accuracy is 2^-12. We need one
12508 /// Newton-Raphson step to have a good float result (24 bits of precision).
12509 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
12510 DAGCombinerInfo &DCI,
12511 unsigned &RefinementSteps,
12512 bool &UseOneConstNR) const {
12513 // FIXME: We should use instruction latency models to calculate the cost of
12514 // each potential sequence, but this is very hard to do reliably because
12515 // at least Intel's Core* chips have variable timing based on the number of
12516 // significant digits in the divisor and/or sqrt operand.
12517 if (!Subtarget->useSqrtEst())
12520 EVT VT = Op.getValueType();
12522 // SSE1 has rsqrtss and rsqrtps.
12523 // TODO: Add support for AVX512 (v16f32).
12524 // It is likely not profitable to do this for f64 because a double-precision
12525 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
12526 // instructions: convert to single, rsqrtss, convert back to double, refine
12527 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
12528 // along with FMA, this could be a throughput win.
12529 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
12530 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
12531 RefinementSteps = 1;
12532 UseOneConstNR = false;
12533 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
12538 /// The minimum architected relative accuracy is 2^-12. We need one
12539 /// Newton-Raphson step to have a good float result (24 bits of precision).
12540 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
12541 DAGCombinerInfo &DCI,
12542 unsigned &RefinementSteps) const {
12543 // FIXME: We should use instruction latency models to calculate the cost of
12544 // each potential sequence, but this is very hard to do reliably because
12545 // at least Intel's Core* chips have variable timing based on the number of
12546 // significant digits in the divisor.
12547 if (!Subtarget->useReciprocalEst())
12550 EVT VT = Op.getValueType();
12552 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
12553 // TODO: Add support for AVX512 (v16f32).
12554 // It is likely not profitable to do this for f64 because a double-precision
12555 // reciprocal estimate with refinement on x86 prior to FMA requires
12556 // 15 instructions: convert to single, rcpss, convert back to double, refine
12557 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
12558 // along with FMA, this could be a throughput win.
12559 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
12560 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
12561 RefinementSteps = ReciprocalEstimateRefinementSteps;
12562 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
12567 static bool isAllOnes(SDValue V) {
12568 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
12569 return C && C->isAllOnesValue();
12572 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
12573 /// if it's possible.
12574 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
12575 SDLoc dl, SelectionDAG &DAG) const {
12576 SDValue Op0 = And.getOperand(0);
12577 SDValue Op1 = And.getOperand(1);
12578 if (Op0.getOpcode() == ISD::TRUNCATE)
12579 Op0 = Op0.getOperand(0);
12580 if (Op1.getOpcode() == ISD::TRUNCATE)
12581 Op1 = Op1.getOperand(0);
12584 if (Op1.getOpcode() == ISD::SHL)
12585 std::swap(Op0, Op1);
12586 if (Op0.getOpcode() == ISD::SHL) {
12587 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
12588 if (And00C->getZExtValue() == 1) {
12589 // If we looked past a truncate, check that it's only truncating away
12591 unsigned BitWidth = Op0.getValueSizeInBits();
12592 unsigned AndBitWidth = And.getValueSizeInBits();
12593 if (BitWidth > AndBitWidth) {
12595 DAG.computeKnownBits(Op0, Zeros, Ones);
12596 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
12600 RHS = Op0.getOperand(1);
12602 } else if (Op1.getOpcode() == ISD::Constant) {
12603 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
12604 uint64_t AndRHSVal = AndRHS->getZExtValue();
12605 SDValue AndLHS = Op0;
12607 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
12608 LHS = AndLHS.getOperand(0);
12609 RHS = AndLHS.getOperand(1);
12612 // Use BT if the immediate can't be encoded in a TEST instruction.
12613 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
12615 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
12619 if (LHS.getNode()) {
12620 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
12621 // instruction. Since the shift amount is in-range-or-undefined, we know
12622 // that doing a bittest on the i32 value is ok. We extend to i32 because
12623 // the encoding for the i16 version is larger than the i32 version.
12624 // Also promote i16 to i32 for performance / code size reason.
12625 if (LHS.getValueType() == MVT::i8 ||
12626 LHS.getValueType() == MVT::i16)
12627 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
12629 // If the operand types disagree, extend the shift amount to match. Since
12630 // BT ignores high bits (like shifts) we can use anyextend.
12631 if (LHS.getValueType() != RHS.getValueType())
12632 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
12634 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
12635 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
12636 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
12637 DAG.getConstant(Cond, MVT::i8), BT);
12643 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
12645 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
12650 // SSE Condition code mapping:
12659 switch (SetCCOpcode) {
12660 default: llvm_unreachable("Unexpected SETCC condition");
12662 case ISD::SETEQ: SSECC = 0; break;
12664 case ISD::SETGT: Swap = true; // Fallthrough
12666 case ISD::SETOLT: SSECC = 1; break;
12668 case ISD::SETGE: Swap = true; // Fallthrough
12670 case ISD::SETOLE: SSECC = 2; break;
12671 case ISD::SETUO: SSECC = 3; break;
12673 case ISD::SETNE: SSECC = 4; break;
12674 case ISD::SETULE: Swap = true; // Fallthrough
12675 case ISD::SETUGE: SSECC = 5; break;
12676 case ISD::SETULT: Swap = true; // Fallthrough
12677 case ISD::SETUGT: SSECC = 6; break;
12678 case ISD::SETO: SSECC = 7; break;
12680 case ISD::SETONE: SSECC = 8; break;
12683 std::swap(Op0, Op1);
12688 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
12689 // ones, and then concatenate the result back.
12690 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
12691 MVT VT = Op.getSimpleValueType();
12693 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
12694 "Unsupported value type for operation");
12696 unsigned NumElems = VT.getVectorNumElements();
12698 SDValue CC = Op.getOperand(2);
12700 // Extract the LHS vectors
12701 SDValue LHS = Op.getOperand(0);
12702 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
12703 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
12705 // Extract the RHS vectors
12706 SDValue RHS = Op.getOperand(1);
12707 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
12708 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
12710 // Issue the operation on the smaller types and concatenate the result back
12711 MVT EltVT = VT.getVectorElementType();
12712 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
12713 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
12714 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
12715 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
12718 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
12719 const X86Subtarget *Subtarget) {
12720 SDValue Op0 = Op.getOperand(0);
12721 SDValue Op1 = Op.getOperand(1);
12722 SDValue CC = Op.getOperand(2);
12723 MVT VT = Op.getSimpleValueType();
12726 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
12727 Op.getValueType().getScalarType() == MVT::i1 &&
12728 "Cannot set masked compare for this operation");
12730 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
12732 bool Unsigned = false;
12735 switch (SetCCOpcode) {
12736 default: llvm_unreachable("Unexpected SETCC condition");
12737 case ISD::SETNE: SSECC = 4; break;
12738 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
12739 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
12740 case ISD::SETLT: Swap = true; //fall-through
12741 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
12742 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
12743 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
12744 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
12745 case ISD::SETULE: Unsigned = true; //fall-through
12746 case ISD::SETLE: SSECC = 2; break;
12750 std::swap(Op0, Op1);
12752 return DAG.getNode(Opc, dl, VT, Op0, Op1);
12753 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
12754 return DAG.getNode(Opc, dl, VT, Op0, Op1,
12755 DAG.getConstant(SSECC, MVT::i8));
12758 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
12759 /// operand \p Op1. If non-trivial (for example because it's not constant)
12760 /// return an empty value.
12761 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
12763 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
12767 MVT VT = Op1.getSimpleValueType();
12768 MVT EVT = VT.getVectorElementType();
12769 unsigned n = VT.getVectorNumElements();
12770 SmallVector<SDValue, 8> ULTOp1;
12772 for (unsigned i = 0; i < n; ++i) {
12773 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
12774 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
12777 // Avoid underflow.
12778 APInt Val = Elt->getAPIntValue();
12782 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
12785 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
12788 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
12789 SelectionDAG &DAG) {
12790 SDValue Op0 = Op.getOperand(0);
12791 SDValue Op1 = Op.getOperand(1);
12792 SDValue CC = Op.getOperand(2);
12793 MVT VT = Op.getSimpleValueType();
12794 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
12795 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
12800 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
12801 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
12804 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
12805 unsigned Opc = X86ISD::CMPP;
12806 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
12807 assert(VT.getVectorNumElements() <= 16);
12808 Opc = X86ISD::CMPM;
12810 // In the two special cases we can't handle, emit two comparisons.
12813 unsigned CombineOpc;
12814 if (SetCCOpcode == ISD::SETUEQ) {
12815 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
12817 assert(SetCCOpcode == ISD::SETONE);
12818 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
12821 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
12822 DAG.getConstant(CC0, MVT::i8));
12823 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
12824 DAG.getConstant(CC1, MVT::i8));
12825 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
12827 // Handle all other FP comparisons here.
12828 return DAG.getNode(Opc, dl, VT, Op0, Op1,
12829 DAG.getConstant(SSECC, MVT::i8));
12832 // Break 256-bit integer vector compare into smaller ones.
12833 if (VT.is256BitVector() && !Subtarget->hasInt256())
12834 return Lower256IntVSETCC(Op, DAG);
12836 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
12837 EVT OpVT = Op1.getValueType();
12838 if (Subtarget->hasAVX512()) {
12839 if (Op1.getValueType().is512BitVector() ||
12840 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
12841 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
12842 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
12844 // In AVX-512 architecture setcc returns mask with i1 elements,
12845 // But there is no compare instruction for i8 and i16 elements in KNL.
12846 // We are not talking about 512-bit operands in this case, these
12847 // types are illegal.
12849 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
12850 OpVT.getVectorElementType().getSizeInBits() >= 8))
12851 return DAG.getNode(ISD::TRUNCATE, dl, VT,
12852 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
12855 // We are handling one of the integer comparisons here. Since SSE only has
12856 // GT and EQ comparisons for integer, swapping operands and multiple
12857 // operations may be required for some comparisons.
12859 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
12860 bool Subus = false;
12862 switch (SetCCOpcode) {
12863 default: llvm_unreachable("Unexpected SETCC condition");
12864 case ISD::SETNE: Invert = true;
12865 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
12866 case ISD::SETLT: Swap = true;
12867 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
12868 case ISD::SETGE: Swap = true;
12869 case ISD::SETLE: Opc = X86ISD::PCMPGT;
12870 Invert = true; break;
12871 case ISD::SETULT: Swap = true;
12872 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
12873 FlipSigns = true; break;
12874 case ISD::SETUGE: Swap = true;
12875 case ISD::SETULE: Opc = X86ISD::PCMPGT;
12876 FlipSigns = true; Invert = true; break;
12879 // Special case: Use min/max operations for SETULE/SETUGE
12880 MVT VET = VT.getVectorElementType();
12882 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
12883 || (Subtarget->hasSSE2() && (VET == MVT::i8));
12886 switch (SetCCOpcode) {
12888 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
12889 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
12892 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
12895 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
12896 if (!MinMax && hasSubus) {
12897 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
12899 // t = psubus Op0, Op1
12900 // pcmpeq t, <0..0>
12901 switch (SetCCOpcode) {
12903 case ISD::SETULT: {
12904 // If the comparison is against a constant we can turn this into a
12905 // setule. With psubus, setule does not require a swap. This is
12906 // beneficial because the constant in the register is no longer
12907 // destructed as the destination so it can be hoisted out of a loop.
12908 // Only do this pre-AVX since vpcmp* is no longer destructive.
12909 if (Subtarget->hasAVX())
12911 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
12912 if (ULEOp1.getNode()) {
12914 Subus = true; Invert = false; Swap = false;
12918 // Psubus is better than flip-sign because it requires no inversion.
12919 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
12920 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
12924 Opc = X86ISD::SUBUS;
12930 std::swap(Op0, Op1);
12932 // Check that the operation in question is available (most are plain SSE2,
12933 // but PCMPGTQ and PCMPEQQ have different requirements).
12934 if (VT == MVT::v2i64) {
12935 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
12936 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
12938 // First cast everything to the right type.
12939 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
12940 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
12942 // Since SSE has no unsigned integer comparisons, we need to flip the sign
12943 // bits of the inputs before performing those operations. The lower
12944 // compare is always unsigned.
12947 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
12949 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
12950 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
12951 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
12952 Sign, Zero, Sign, Zero);
12954 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
12955 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
12957 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
12958 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
12959 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
12961 // Create masks for only the low parts/high parts of the 64 bit integers.
12962 static const int MaskHi[] = { 1, 1, 3, 3 };
12963 static const int MaskLo[] = { 0, 0, 2, 2 };
12964 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
12965 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
12966 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
12968 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
12969 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
12972 Result = DAG.getNOT(dl, Result, MVT::v4i32);
12974 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
12977 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
12978 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
12979 // pcmpeqd + pshufd + pand.
12980 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
12982 // First cast everything to the right type.
12983 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
12984 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
12987 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
12989 // Make sure the lower and upper halves are both all-ones.
12990 static const int Mask[] = { 1, 0, 3, 2 };
12991 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
12992 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
12995 Result = DAG.getNOT(dl, Result, MVT::v4i32);
12997 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
13001 // Since SSE has no unsigned integer comparisons, we need to flip the sign
13002 // bits of the inputs before performing those operations.
13004 EVT EltVT = VT.getVectorElementType();
13005 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
13006 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
13007 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
13010 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
13012 // If the logical-not of the result is required, perform that now.
13014 Result = DAG.getNOT(dl, Result, VT);
13017 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
13020 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
13021 getZeroVector(VT, Subtarget, DAG, dl));
13026 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
13028 MVT VT = Op.getSimpleValueType();
13030 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
13032 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
13033 && "SetCC type must be 8-bit or 1-bit integer");
13034 SDValue Op0 = Op.getOperand(0);
13035 SDValue Op1 = Op.getOperand(1);
13037 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
13039 // Optimize to BT if possible.
13040 // Lower (X & (1 << N)) == 0 to BT(X, N).
13041 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
13042 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
13043 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
13044 Op1.getOpcode() == ISD::Constant &&
13045 cast<ConstantSDNode>(Op1)->isNullValue() &&
13046 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
13047 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
13048 if (NewSetCC.getNode()) {
13050 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
13055 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
13057 if (Op1.getOpcode() == ISD::Constant &&
13058 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
13059 cast<ConstantSDNode>(Op1)->isNullValue()) &&
13060 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
13062 // If the input is a setcc, then reuse the input setcc or use a new one with
13063 // the inverted condition.
13064 if (Op0.getOpcode() == X86ISD::SETCC) {
13065 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
13066 bool Invert = (CC == ISD::SETNE) ^
13067 cast<ConstantSDNode>(Op1)->isNullValue();
13071 CCode = X86::GetOppositeBranchCondition(CCode);
13072 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
13073 DAG.getConstant(CCode, MVT::i8),
13074 Op0.getOperand(1));
13076 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
13080 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
13081 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
13082 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
13084 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
13085 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
13088 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
13089 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
13090 if (X86CC == X86::COND_INVALID)
13093 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
13094 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
13095 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
13096 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
13098 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
13102 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
13103 static bool isX86LogicalCmp(SDValue Op) {
13104 unsigned Opc = Op.getNode()->getOpcode();
13105 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
13106 Opc == X86ISD::SAHF)
13108 if (Op.getResNo() == 1 &&
13109 (Opc == X86ISD::ADD ||
13110 Opc == X86ISD::SUB ||
13111 Opc == X86ISD::ADC ||
13112 Opc == X86ISD::SBB ||
13113 Opc == X86ISD::SMUL ||
13114 Opc == X86ISD::UMUL ||
13115 Opc == X86ISD::INC ||
13116 Opc == X86ISD::DEC ||
13117 Opc == X86ISD::OR ||
13118 Opc == X86ISD::XOR ||
13119 Opc == X86ISD::AND))
13122 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
13128 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
13129 if (V.getOpcode() != ISD::TRUNCATE)
13132 SDValue VOp0 = V.getOperand(0);
13133 unsigned InBits = VOp0.getValueSizeInBits();
13134 unsigned Bits = V.getValueSizeInBits();
13135 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
13138 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
13139 bool addTest = true;
13140 SDValue Cond = Op.getOperand(0);
13141 SDValue Op1 = Op.getOperand(1);
13142 SDValue Op2 = Op.getOperand(2);
13144 EVT VT = Op1.getValueType();
13147 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
13148 // are available. Otherwise fp cmovs get lowered into a less efficient branch
13149 // sequence later on.
13150 if (Cond.getOpcode() == ISD::SETCC &&
13151 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
13152 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
13153 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
13154 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
13155 int SSECC = translateX86FSETCC(
13156 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
13159 if (Subtarget->hasAVX512()) {
13160 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
13161 DAG.getConstant(SSECC, MVT::i8));
13162 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
13164 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
13165 DAG.getConstant(SSECC, MVT::i8));
13166 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
13167 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
13168 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
13172 if (Cond.getOpcode() == ISD::SETCC) {
13173 SDValue NewCond = LowerSETCC(Cond, DAG);
13174 if (NewCond.getNode())
13178 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
13179 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
13180 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
13181 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
13182 if (Cond.getOpcode() == X86ISD::SETCC &&
13183 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
13184 isZero(Cond.getOperand(1).getOperand(1))) {
13185 SDValue Cmp = Cond.getOperand(1);
13187 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
13189 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
13190 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
13191 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
13193 SDValue CmpOp0 = Cmp.getOperand(0);
13194 // Apply further optimizations for special cases
13195 // (select (x != 0), -1, 0) -> neg & sbb
13196 // (select (x == 0), 0, -1) -> neg & sbb
13197 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
13198 if (YC->isNullValue() &&
13199 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
13200 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
13201 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
13202 DAG.getConstant(0, CmpOp0.getValueType()),
13204 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
13205 DAG.getConstant(X86::COND_B, MVT::i8),
13206 SDValue(Neg.getNode(), 1));
13210 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
13211 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
13212 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
13214 SDValue Res = // Res = 0 or -1.
13215 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
13216 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
13218 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
13219 Res = DAG.getNOT(DL, Res, Res.getValueType());
13221 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
13222 if (!N2C || !N2C->isNullValue())
13223 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
13228 // Look past (and (setcc_carry (cmp ...)), 1).
13229 if (Cond.getOpcode() == ISD::AND &&
13230 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
13231 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
13232 if (C && C->getAPIntValue() == 1)
13233 Cond = Cond.getOperand(0);
13236 // If condition flag is set by a X86ISD::CMP, then use it as the condition
13237 // setting operand in place of the X86ISD::SETCC.
13238 unsigned CondOpcode = Cond.getOpcode();
13239 if (CondOpcode == X86ISD::SETCC ||
13240 CondOpcode == X86ISD::SETCC_CARRY) {
13241 CC = Cond.getOperand(0);
13243 SDValue Cmp = Cond.getOperand(1);
13244 unsigned Opc = Cmp.getOpcode();
13245 MVT VT = Op.getSimpleValueType();
13247 bool IllegalFPCMov = false;
13248 if (VT.isFloatingPoint() && !VT.isVector() &&
13249 !isScalarFPTypeInSSEReg(VT)) // FPStack?
13250 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
13252 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
13253 Opc == X86ISD::BT) { // FIXME
13257 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
13258 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
13259 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
13260 Cond.getOperand(0).getValueType() != MVT::i8)) {
13261 SDValue LHS = Cond.getOperand(0);
13262 SDValue RHS = Cond.getOperand(1);
13263 unsigned X86Opcode;
13266 switch (CondOpcode) {
13267 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
13268 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
13269 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
13270 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
13271 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
13272 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
13273 default: llvm_unreachable("unexpected overflowing operator");
13275 if (CondOpcode == ISD::UMULO)
13276 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
13279 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
13281 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
13283 if (CondOpcode == ISD::UMULO)
13284 Cond = X86Op.getValue(2);
13286 Cond = X86Op.getValue(1);
13288 CC = DAG.getConstant(X86Cond, MVT::i8);
13293 // Look pass the truncate if the high bits are known zero.
13294 if (isTruncWithZeroHighBitsInput(Cond, DAG))
13295 Cond = Cond.getOperand(0);
13297 // We know the result of AND is compared against zero. Try to match
13299 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
13300 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
13301 if (NewSetCC.getNode()) {
13302 CC = NewSetCC.getOperand(0);
13303 Cond = NewSetCC.getOperand(1);
13310 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13311 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
13314 // a < b ? -1 : 0 -> RES = ~setcc_carry
13315 // a < b ? 0 : -1 -> RES = setcc_carry
13316 // a >= b ? -1 : 0 -> RES = setcc_carry
13317 // a >= b ? 0 : -1 -> RES = ~setcc_carry
13318 if (Cond.getOpcode() == X86ISD::SUB) {
13319 Cond = ConvertCmpIfNecessary(Cond, DAG);
13320 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
13322 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
13323 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
13324 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
13325 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
13326 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
13327 return DAG.getNOT(DL, Res, Res.getValueType());
13332 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
13333 // widen the cmov and push the truncate through. This avoids introducing a new
13334 // branch during isel and doesn't add any extensions.
13335 if (Op.getValueType() == MVT::i8 &&
13336 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
13337 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
13338 if (T1.getValueType() == T2.getValueType() &&
13339 // Blacklist CopyFromReg to avoid partial register stalls.
13340 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
13341 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
13342 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
13343 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
13347 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
13348 // condition is true.
13349 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
13350 SDValue Ops[] = { Op2, Op1, CC, Cond };
13351 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
13354 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
13355 SelectionDAG &DAG) {
13356 MVT VT = Op->getSimpleValueType(0);
13357 SDValue In = Op->getOperand(0);
13358 MVT InVT = In.getSimpleValueType();
13359 MVT VTElt = VT.getVectorElementType();
13360 MVT InVTElt = InVT.getVectorElementType();
13364 if ((InVTElt == MVT::i1) &&
13365 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
13366 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
13368 ((Subtarget->hasBWI() && VT.is512BitVector() &&
13369 VTElt.getSizeInBits() <= 16)) ||
13371 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
13372 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
13374 ((Subtarget->hasDQI() && VT.is512BitVector() &&
13375 VTElt.getSizeInBits() >= 32))))
13376 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
13378 unsigned int NumElts = VT.getVectorNumElements();
13380 if (NumElts != 8 && NumElts != 16)
13383 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
13384 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
13385 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
13386 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
13389 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13390 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
13392 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
13393 Constant *C = ConstantInt::get(*DAG.getContext(),
13394 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
13396 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
13397 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
13398 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
13399 MachinePointerInfo::getConstantPool(),
13400 false, false, false, Alignment);
13401 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
13402 if (VT.is512BitVector())
13404 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
13407 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
13408 SelectionDAG &DAG) {
13409 MVT VT = Op->getSimpleValueType(0);
13410 SDValue In = Op->getOperand(0);
13411 MVT InVT = In.getSimpleValueType();
13414 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
13415 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
13417 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
13418 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
13419 (VT != MVT::v16i16 || InVT != MVT::v16i8))
13422 if (Subtarget->hasInt256())
13423 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
13425 // Optimize vectors in AVX mode
13426 // Sign extend v8i16 to v8i32 and
13429 // Divide input vector into two parts
13430 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
13431 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
13432 // concat the vectors to original VT
13434 unsigned NumElems = InVT.getVectorNumElements();
13435 SDValue Undef = DAG.getUNDEF(InVT);
13437 SmallVector<int,8> ShufMask1(NumElems, -1);
13438 for (unsigned i = 0; i != NumElems/2; ++i)
13441 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
13443 SmallVector<int,8> ShufMask2(NumElems, -1);
13444 for (unsigned i = 0; i != NumElems/2; ++i)
13445 ShufMask2[i] = i + NumElems/2;
13447 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
13449 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
13450 VT.getVectorNumElements()/2);
13452 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
13453 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
13455 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
13458 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
13459 // may emit an illegal shuffle but the expansion is still better than scalar
13460 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
13461 // we'll emit a shuffle and a arithmetic shift.
13462 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
13463 // TODO: It is possible to support ZExt by zeroing the undef values during
13464 // the shuffle phase or after the shuffle.
13465 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
13466 SelectionDAG &DAG) {
13467 MVT RegVT = Op.getSimpleValueType();
13468 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
13469 assert(RegVT.isInteger() &&
13470 "We only custom lower integer vector sext loads.");
13472 // Nothing useful we can do without SSE2 shuffles.
13473 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
13475 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
13477 EVT MemVT = Ld->getMemoryVT();
13478 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13479 unsigned RegSz = RegVT.getSizeInBits();
13481 ISD::LoadExtType Ext = Ld->getExtensionType();
13483 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
13484 && "Only anyext and sext are currently implemented.");
13485 assert(MemVT != RegVT && "Cannot extend to the same type");
13486 assert(MemVT.isVector() && "Must load a vector from memory");
13488 unsigned NumElems = RegVT.getVectorNumElements();
13489 unsigned MemSz = MemVT.getSizeInBits();
13490 assert(RegSz > MemSz && "Register size must be greater than the mem size");
13492 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
13493 // The only way in which we have a legal 256-bit vector result but not the
13494 // integer 256-bit operations needed to directly lower a sextload is if we
13495 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
13496 // a 128-bit vector and a normal sign_extend to 256-bits that should get
13497 // correctly legalized. We do this late to allow the canonical form of
13498 // sextload to persist throughout the rest of the DAG combiner -- it wants
13499 // to fold together any extensions it can, and so will fuse a sign_extend
13500 // of an sextload into a sextload targeting a wider value.
13502 if (MemSz == 128) {
13503 // Just switch this to a normal load.
13504 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
13505 "it must be a legal 128-bit vector "
13507 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
13508 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
13509 Ld->isInvariant(), Ld->getAlignment());
13511 assert(MemSz < 128 &&
13512 "Can't extend a type wider than 128 bits to a 256 bit vector!");
13513 // Do an sext load to a 128-bit vector type. We want to use the same
13514 // number of elements, but elements half as wide. This will end up being
13515 // recursively lowered by this routine, but will succeed as we definitely
13516 // have all the necessary features if we're using AVX1.
13518 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
13519 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
13521 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
13522 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
13523 Ld->isNonTemporal(), Ld->isInvariant(),
13524 Ld->getAlignment());
13527 // Replace chain users with the new chain.
13528 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
13529 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
13531 // Finally, do a normal sign-extend to the desired register.
13532 return DAG.getSExtOrTrunc(Load, dl, RegVT);
13535 // All sizes must be a power of two.
13536 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
13537 "Non-power-of-two elements are not custom lowered!");
13539 // Attempt to load the original value using scalar loads.
13540 // Find the largest scalar type that divides the total loaded size.
13541 MVT SclrLoadTy = MVT::i8;
13542 for (MVT Tp : MVT::integer_valuetypes()) {
13543 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
13548 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
13549 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
13551 SclrLoadTy = MVT::f64;
13553 // Calculate the number of scalar loads that we need to perform
13554 // in order to load our vector from memory.
13555 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
13557 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
13558 "Can only lower sext loads with a single scalar load!");
13560 unsigned loadRegZize = RegSz;
13561 if (Ext == ISD::SEXTLOAD && RegSz == 256)
13564 // Represent our vector as a sequence of elements which are the
13565 // largest scalar that we can load.
13566 EVT LoadUnitVecVT = EVT::getVectorVT(
13567 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
13569 // Represent the data using the same element type that is stored in
13570 // memory. In practice, we ''widen'' MemVT.
13572 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
13573 loadRegZize / MemVT.getScalarType().getSizeInBits());
13575 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
13576 "Invalid vector type");
13578 // We can't shuffle using an illegal type.
13579 assert(TLI.isTypeLegal(WideVecVT) &&
13580 "We only lower types that form legal widened vector types");
13582 SmallVector<SDValue, 8> Chains;
13583 SDValue Ptr = Ld->getBasePtr();
13584 SDValue Increment =
13585 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
13586 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
13588 for (unsigned i = 0; i < NumLoads; ++i) {
13589 // Perform a single load.
13590 SDValue ScalarLoad =
13591 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
13592 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
13593 Ld->getAlignment());
13594 Chains.push_back(ScalarLoad.getValue(1));
13595 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
13596 // another round of DAGCombining.
13598 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
13600 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
13601 ScalarLoad, DAG.getIntPtrConstant(i));
13603 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
13606 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
13608 // Bitcast the loaded value to a vector of the original element type, in
13609 // the size of the target vector type.
13610 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
13611 unsigned SizeRatio = RegSz / MemSz;
13613 if (Ext == ISD::SEXTLOAD) {
13614 // If we have SSE4.1, we can directly emit a VSEXT node.
13615 if (Subtarget->hasSSE41()) {
13616 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
13617 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
13621 // Otherwise we'll shuffle the small elements in the high bits of the
13622 // larger type and perform an arithmetic shift. If the shift is not legal
13623 // it's better to scalarize.
13624 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
13625 "We can't implement a sext load without an arithmetic right shift!");
13627 // Redistribute the loaded elements into the different locations.
13628 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
13629 for (unsigned i = 0; i != NumElems; ++i)
13630 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
13632 SDValue Shuff = DAG.getVectorShuffle(
13633 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
13635 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
13637 // Build the arithmetic shift.
13638 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
13639 MemVT.getVectorElementType().getSizeInBits();
13641 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
13643 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
13647 // Redistribute the loaded elements into the different locations.
13648 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
13649 for (unsigned i = 0; i != NumElems; ++i)
13650 ShuffleVec[i * SizeRatio] = i;
13652 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
13653 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
13655 // Bitcast to the requested type.
13656 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
13657 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
13661 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
13662 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
13663 // from the AND / OR.
13664 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
13665 Opc = Op.getOpcode();
13666 if (Opc != ISD::OR && Opc != ISD::AND)
13668 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
13669 Op.getOperand(0).hasOneUse() &&
13670 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
13671 Op.getOperand(1).hasOneUse());
13674 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
13675 // 1 and that the SETCC node has a single use.
13676 static bool isXor1OfSetCC(SDValue Op) {
13677 if (Op.getOpcode() != ISD::XOR)
13679 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
13680 if (N1C && N1C->getAPIntValue() == 1) {
13681 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
13682 Op.getOperand(0).hasOneUse();
13687 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
13688 bool addTest = true;
13689 SDValue Chain = Op.getOperand(0);
13690 SDValue Cond = Op.getOperand(1);
13691 SDValue Dest = Op.getOperand(2);
13694 bool Inverted = false;
13696 if (Cond.getOpcode() == ISD::SETCC) {
13697 // Check for setcc([su]{add,sub,mul}o == 0).
13698 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
13699 isa<ConstantSDNode>(Cond.getOperand(1)) &&
13700 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
13701 Cond.getOperand(0).getResNo() == 1 &&
13702 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
13703 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
13704 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
13705 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
13706 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
13707 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
13709 Cond = Cond.getOperand(0);
13711 SDValue NewCond = LowerSETCC(Cond, DAG);
13712 if (NewCond.getNode())
13717 // FIXME: LowerXALUO doesn't handle these!!
13718 else if (Cond.getOpcode() == X86ISD::ADD ||
13719 Cond.getOpcode() == X86ISD::SUB ||
13720 Cond.getOpcode() == X86ISD::SMUL ||
13721 Cond.getOpcode() == X86ISD::UMUL)
13722 Cond = LowerXALUO(Cond, DAG);
13725 // Look pass (and (setcc_carry (cmp ...)), 1).
13726 if (Cond.getOpcode() == ISD::AND &&
13727 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
13728 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
13729 if (C && C->getAPIntValue() == 1)
13730 Cond = Cond.getOperand(0);
13733 // If condition flag is set by a X86ISD::CMP, then use it as the condition
13734 // setting operand in place of the X86ISD::SETCC.
13735 unsigned CondOpcode = Cond.getOpcode();
13736 if (CondOpcode == X86ISD::SETCC ||
13737 CondOpcode == X86ISD::SETCC_CARRY) {
13738 CC = Cond.getOperand(0);
13740 SDValue Cmp = Cond.getOperand(1);
13741 unsigned Opc = Cmp.getOpcode();
13742 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
13743 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
13747 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
13751 // These can only come from an arithmetic instruction with overflow,
13752 // e.g. SADDO, UADDO.
13753 Cond = Cond.getNode()->getOperand(1);
13759 CondOpcode = Cond.getOpcode();
13760 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
13761 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
13762 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
13763 Cond.getOperand(0).getValueType() != MVT::i8)) {
13764 SDValue LHS = Cond.getOperand(0);
13765 SDValue RHS = Cond.getOperand(1);
13766 unsigned X86Opcode;
13769 // Keep this in sync with LowerXALUO, otherwise we might create redundant
13770 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
13772 switch (CondOpcode) {
13773 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
13775 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
13777 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
13780 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
13781 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
13783 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
13785 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
13788 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
13789 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
13790 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
13791 default: llvm_unreachable("unexpected overflowing operator");
13794 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
13795 if (CondOpcode == ISD::UMULO)
13796 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
13799 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
13801 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
13803 if (CondOpcode == ISD::UMULO)
13804 Cond = X86Op.getValue(2);
13806 Cond = X86Op.getValue(1);
13808 CC = DAG.getConstant(X86Cond, MVT::i8);
13812 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
13813 SDValue Cmp = Cond.getOperand(0).getOperand(1);
13814 if (CondOpc == ISD::OR) {
13815 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
13816 // two branches instead of an explicit OR instruction with a
13818 if (Cmp == Cond.getOperand(1).getOperand(1) &&
13819 isX86LogicalCmp(Cmp)) {
13820 CC = Cond.getOperand(0).getOperand(0);
13821 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
13822 Chain, Dest, CC, Cmp);
13823 CC = Cond.getOperand(1).getOperand(0);
13827 } else { // ISD::AND
13828 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
13829 // two branches instead of an explicit AND instruction with a
13830 // separate test. However, we only do this if this block doesn't
13831 // have a fall-through edge, because this requires an explicit
13832 // jmp when the condition is false.
13833 if (Cmp == Cond.getOperand(1).getOperand(1) &&
13834 isX86LogicalCmp(Cmp) &&
13835 Op.getNode()->hasOneUse()) {
13836 X86::CondCode CCode =
13837 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
13838 CCode = X86::GetOppositeBranchCondition(CCode);
13839 CC = DAG.getConstant(CCode, MVT::i8);
13840 SDNode *User = *Op.getNode()->use_begin();
13841 // Look for an unconditional branch following this conditional branch.
13842 // We need this because we need to reverse the successors in order
13843 // to implement FCMP_OEQ.
13844 if (User->getOpcode() == ISD::BR) {
13845 SDValue FalseBB = User->getOperand(1);
13847 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
13848 assert(NewBR == User);
13852 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
13853 Chain, Dest, CC, Cmp);
13854 X86::CondCode CCode =
13855 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
13856 CCode = X86::GetOppositeBranchCondition(CCode);
13857 CC = DAG.getConstant(CCode, MVT::i8);
13863 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
13864 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
13865 // It should be transformed during dag combiner except when the condition
13866 // is set by a arithmetics with overflow node.
13867 X86::CondCode CCode =
13868 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
13869 CCode = X86::GetOppositeBranchCondition(CCode);
13870 CC = DAG.getConstant(CCode, MVT::i8);
13871 Cond = Cond.getOperand(0).getOperand(1);
13873 } else if (Cond.getOpcode() == ISD::SETCC &&
13874 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
13875 // For FCMP_OEQ, we can emit
13876 // two branches instead of an explicit AND instruction with a
13877 // separate test. However, we only do this if this block doesn't
13878 // have a fall-through edge, because this requires an explicit
13879 // jmp when the condition is false.
13880 if (Op.getNode()->hasOneUse()) {
13881 SDNode *User = *Op.getNode()->use_begin();
13882 // Look for an unconditional branch following this conditional branch.
13883 // We need this because we need to reverse the successors in order
13884 // to implement FCMP_OEQ.
13885 if (User->getOpcode() == ISD::BR) {
13886 SDValue FalseBB = User->getOperand(1);
13888 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
13889 assert(NewBR == User);
13893 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13894 Cond.getOperand(0), Cond.getOperand(1));
13895 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
13896 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13897 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
13898 Chain, Dest, CC, Cmp);
13899 CC = DAG.getConstant(X86::COND_P, MVT::i8);
13904 } else if (Cond.getOpcode() == ISD::SETCC &&
13905 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
13906 // For FCMP_UNE, we can emit
13907 // two branches instead of an explicit AND instruction with a
13908 // separate test. However, we only do this if this block doesn't
13909 // have a fall-through edge, because this requires an explicit
13910 // jmp when the condition is false.
13911 if (Op.getNode()->hasOneUse()) {
13912 SDNode *User = *Op.getNode()->use_begin();
13913 // Look for an unconditional branch following this conditional branch.
13914 // We need this because we need to reverse the successors in order
13915 // to implement FCMP_UNE.
13916 if (User->getOpcode() == ISD::BR) {
13917 SDValue FalseBB = User->getOperand(1);
13919 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
13920 assert(NewBR == User);
13923 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13924 Cond.getOperand(0), Cond.getOperand(1));
13925 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
13926 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13927 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
13928 Chain, Dest, CC, Cmp);
13929 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
13939 // Look pass the truncate if the high bits are known zero.
13940 if (isTruncWithZeroHighBitsInput(Cond, DAG))
13941 Cond = Cond.getOperand(0);
13943 // We know the result of AND is compared against zero. Try to match
13945 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
13946 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
13947 if (NewSetCC.getNode()) {
13948 CC = NewSetCC.getOperand(0);
13949 Cond = NewSetCC.getOperand(1);
13956 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
13957 CC = DAG.getConstant(X86Cond, MVT::i8);
13958 Cond = EmitTest(Cond, X86Cond, dl, DAG);
13960 Cond = ConvertCmpIfNecessary(Cond, DAG);
13961 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
13962 Chain, Dest, CC, Cond);
13965 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
13966 // Calls to _alloca are needed to probe the stack when allocating more than 4k
13967 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
13968 // that the guard pages used by the OS virtual memory manager are allocated in
13969 // correct sequence.
13971 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
13972 SelectionDAG &DAG) const {
13973 MachineFunction &MF = DAG.getMachineFunction();
13974 bool SplitStack = MF.shouldSplitStack();
13975 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
13980 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13981 SDNode* Node = Op.getNode();
13983 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
13984 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
13985 " not tell us which reg is the stack pointer!");
13986 EVT VT = Node->getValueType(0);
13987 SDValue Tmp1 = SDValue(Node, 0);
13988 SDValue Tmp2 = SDValue(Node, 1);
13989 SDValue Tmp3 = Node->getOperand(2);
13990 SDValue Chain = Tmp1.getOperand(0);
13992 // Chain the dynamic stack allocation so that it doesn't modify the stack
13993 // pointer when other instructions are using the stack.
13994 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
13997 SDValue Size = Tmp2.getOperand(1);
13998 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
13999 Chain = SP.getValue(1);
14000 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
14001 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
14002 unsigned StackAlign = TFI.getStackAlignment();
14003 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
14004 if (Align > StackAlign)
14005 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
14006 DAG.getConstant(-(uint64_t)Align, VT));
14007 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
14009 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
14010 DAG.getIntPtrConstant(0, true), SDValue(),
14013 SDValue Ops[2] = { Tmp1, Tmp2 };
14014 return DAG.getMergeValues(Ops, dl);
14018 SDValue Chain = Op.getOperand(0);
14019 SDValue Size = Op.getOperand(1);
14020 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
14021 EVT VT = Op.getNode()->getValueType(0);
14023 bool Is64Bit = Subtarget->is64Bit();
14024 EVT SPTy = getPointerTy();
14027 MachineRegisterInfo &MRI = MF.getRegInfo();
14030 // The 64 bit implementation of segmented stacks needs to clobber both r10
14031 // r11. This makes it impossible to use it along with nested parameters.
14032 const Function *F = MF.getFunction();
14034 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
14036 if (I->hasNestAttr())
14037 report_fatal_error("Cannot use segmented stacks with functions that "
14038 "have nested arguments.");
14041 const TargetRegisterClass *AddrRegClass =
14042 getRegClassFor(getPointerTy());
14043 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
14044 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
14045 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
14046 DAG.getRegister(Vreg, SPTy));
14047 SDValue Ops1[2] = { Value, Chain };
14048 return DAG.getMergeValues(Ops1, dl);
14051 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
14053 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
14054 Flag = Chain.getValue(1);
14055 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14057 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
14059 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
14060 unsigned SPReg = RegInfo->getStackRegister();
14061 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
14062 Chain = SP.getValue(1);
14065 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
14066 DAG.getConstant(-(uint64_t)Align, VT));
14067 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
14070 SDValue Ops1[2] = { SP, Chain };
14071 return DAG.getMergeValues(Ops1, dl);
14075 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
14076 MachineFunction &MF = DAG.getMachineFunction();
14077 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
14079 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
14082 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
14083 // vastart just stores the address of the VarArgsFrameIndex slot into the
14084 // memory location argument.
14085 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
14087 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
14088 MachinePointerInfo(SV), false, false, 0);
14092 // gp_offset (0 - 6 * 8)
14093 // fp_offset (48 - 48 + 8 * 16)
14094 // overflow_arg_area (point to parameters coming in memory).
14096 SmallVector<SDValue, 8> MemOps;
14097 SDValue FIN = Op.getOperand(1);
14099 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
14100 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
14102 FIN, MachinePointerInfo(SV), false, false, 0);
14103 MemOps.push_back(Store);
14106 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14107 FIN, DAG.getIntPtrConstant(4));
14108 Store = DAG.getStore(Op.getOperand(0), DL,
14109 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
14111 FIN, MachinePointerInfo(SV, 4), false, false, 0);
14112 MemOps.push_back(Store);
14114 // Store ptr to overflow_arg_area
14115 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14116 FIN, DAG.getIntPtrConstant(4));
14117 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
14119 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
14120 MachinePointerInfo(SV, 8),
14122 MemOps.push_back(Store);
14124 // Store ptr to reg_save_area.
14125 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14126 FIN, DAG.getIntPtrConstant(8));
14127 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
14129 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
14130 MachinePointerInfo(SV, 16), false, false, 0);
14131 MemOps.push_back(Store);
14132 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
14135 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
14136 assert(Subtarget->is64Bit() &&
14137 "LowerVAARG only handles 64-bit va_arg!");
14138 assert((Subtarget->isTargetLinux() ||
14139 Subtarget->isTargetDarwin()) &&
14140 "Unhandled target in LowerVAARG");
14141 assert(Op.getNode()->getNumOperands() == 4);
14142 SDValue Chain = Op.getOperand(0);
14143 SDValue SrcPtr = Op.getOperand(1);
14144 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
14145 unsigned Align = Op.getConstantOperandVal(3);
14148 EVT ArgVT = Op.getNode()->getValueType(0);
14149 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
14150 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
14153 // Decide which area this value should be read from.
14154 // TODO: Implement the AMD64 ABI in its entirety. This simple
14155 // selection mechanism works only for the basic types.
14156 if (ArgVT == MVT::f80) {
14157 llvm_unreachable("va_arg for f80 not yet implemented");
14158 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
14159 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
14160 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
14161 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
14163 llvm_unreachable("Unhandled argument type in LowerVAARG");
14166 if (ArgMode == 2) {
14167 // Sanity Check: Make sure using fp_offset makes sense.
14168 assert(!DAG.getTarget().Options.UseSoftFloat &&
14169 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
14170 Attribute::NoImplicitFloat)) &&
14171 Subtarget->hasSSE1());
14174 // Insert VAARG_64 node into the DAG
14175 // VAARG_64 returns two values: Variable Argument Address, Chain
14176 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, MVT::i32),
14177 DAG.getConstant(ArgMode, MVT::i8),
14178 DAG.getConstant(Align, MVT::i32)};
14179 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
14180 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
14181 VTs, InstOps, MVT::i64,
14182 MachinePointerInfo(SV),
14184 /*Volatile=*/false,
14186 /*WriteMem=*/true);
14187 Chain = VAARG.getValue(1);
14189 // Load the next argument and return it
14190 return DAG.getLoad(ArgVT, dl,
14193 MachinePointerInfo(),
14194 false, false, false, 0);
14197 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
14198 SelectionDAG &DAG) {
14199 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
14200 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
14201 SDValue Chain = Op.getOperand(0);
14202 SDValue DstPtr = Op.getOperand(1);
14203 SDValue SrcPtr = Op.getOperand(2);
14204 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
14205 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
14208 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
14209 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
14211 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
14214 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
14215 // amount is a constant. Takes immediate version of shift as input.
14216 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
14217 SDValue SrcOp, uint64_t ShiftAmt,
14218 SelectionDAG &DAG) {
14219 MVT ElementType = VT.getVectorElementType();
14221 // Fold this packed shift into its first operand if ShiftAmt is 0.
14225 // Check for ShiftAmt >= element width
14226 if (ShiftAmt >= ElementType.getSizeInBits()) {
14227 if (Opc == X86ISD::VSRAI)
14228 ShiftAmt = ElementType.getSizeInBits() - 1;
14230 return DAG.getConstant(0, VT);
14233 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
14234 && "Unknown target vector shift-by-constant node");
14236 // Fold this packed vector shift into a build vector if SrcOp is a
14237 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
14238 if (VT == SrcOp.getSimpleValueType() &&
14239 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
14240 SmallVector<SDValue, 8> Elts;
14241 unsigned NumElts = SrcOp->getNumOperands();
14242 ConstantSDNode *ND;
14245 default: llvm_unreachable(nullptr);
14246 case X86ISD::VSHLI:
14247 for (unsigned i=0; i!=NumElts; ++i) {
14248 SDValue CurrentOp = SrcOp->getOperand(i);
14249 if (CurrentOp->getOpcode() == ISD::UNDEF) {
14250 Elts.push_back(CurrentOp);
14253 ND = cast<ConstantSDNode>(CurrentOp);
14254 const APInt &C = ND->getAPIntValue();
14255 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
14258 case X86ISD::VSRLI:
14259 for (unsigned i=0; i!=NumElts; ++i) {
14260 SDValue CurrentOp = SrcOp->getOperand(i);
14261 if (CurrentOp->getOpcode() == ISD::UNDEF) {
14262 Elts.push_back(CurrentOp);
14265 ND = cast<ConstantSDNode>(CurrentOp);
14266 const APInt &C = ND->getAPIntValue();
14267 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
14270 case X86ISD::VSRAI:
14271 for (unsigned i=0; i!=NumElts; ++i) {
14272 SDValue CurrentOp = SrcOp->getOperand(i);
14273 if (CurrentOp->getOpcode() == ISD::UNDEF) {
14274 Elts.push_back(CurrentOp);
14277 ND = cast<ConstantSDNode>(CurrentOp);
14278 const APInt &C = ND->getAPIntValue();
14279 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
14284 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
14287 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
14290 // getTargetVShiftNode - Handle vector element shifts where the shift amount
14291 // may or may not be a constant. Takes immediate version of shift as input.
14292 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
14293 SDValue SrcOp, SDValue ShAmt,
14294 SelectionDAG &DAG) {
14295 MVT SVT = ShAmt.getSimpleValueType();
14296 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
14298 // Catch shift-by-constant.
14299 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
14300 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
14301 CShAmt->getZExtValue(), DAG);
14303 // Change opcode to non-immediate version
14305 default: llvm_unreachable("Unknown target vector shift node");
14306 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
14307 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
14308 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
14311 const X86Subtarget &Subtarget =
14312 static_cast<const X86Subtarget &>(DAG.getSubtarget());
14313 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
14314 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
14315 // Let the shuffle legalizer expand this shift amount node.
14316 SDValue Op0 = ShAmt.getOperand(0);
14317 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
14318 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
14320 // Need to build a vector containing shift amount.
14321 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
14322 SmallVector<SDValue, 4> ShOps;
14323 ShOps.push_back(ShAmt);
14324 if (SVT == MVT::i32) {
14325 ShOps.push_back(DAG.getConstant(0, SVT));
14326 ShOps.push_back(DAG.getUNDEF(SVT));
14328 ShOps.push_back(DAG.getUNDEF(SVT));
14330 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
14331 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
14334 // The return type has to be a 128-bit type with the same element
14335 // type as the input type.
14336 MVT EltVT = VT.getVectorElementType();
14337 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
14339 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
14340 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
14343 /// \brief Return (and \p Op, \p Mask) for compare instructions or
14344 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
14345 /// necessary casting for \p Mask when lowering masking intrinsics.
14346 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
14347 SDValue PreservedSrc,
14348 const X86Subtarget *Subtarget,
14349 SelectionDAG &DAG) {
14350 EVT VT = Op.getValueType();
14351 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
14352 MVT::i1, VT.getVectorNumElements());
14353 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
14354 Mask.getValueType().getSizeInBits());
14357 assert(MaskVT.isSimple() && "invalid mask type");
14359 if (isAllOnes(Mask))
14362 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
14363 // are extracted by EXTRACT_SUBVECTOR.
14364 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
14365 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
14366 DAG.getIntPtrConstant(0));
14368 switch (Op.getOpcode()) {
14370 case X86ISD::PCMPEQM:
14371 case X86ISD::PCMPGTM:
14373 case X86ISD::CMPMU:
14374 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
14376 if (PreservedSrc.getOpcode() == ISD::UNDEF)
14377 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
14378 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
14381 /// \brief Creates an SDNode for a predicated scalar operation.
14382 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
14383 /// The mask is comming as MVT::i8 and it should be truncated
14384 /// to MVT::i1 while lowering masking intrinsics.
14385 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
14386 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
14387 /// a scalar instruction.
14388 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
14389 SDValue PreservedSrc,
14390 const X86Subtarget *Subtarget,
14391 SelectionDAG &DAG) {
14392 if (isAllOnes(Mask))
14395 EVT VT = Op.getValueType();
14397 // The mask should be of type MVT::i1
14398 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
14400 if (PreservedSrc.getOpcode() == ISD::UNDEF)
14401 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
14402 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
14405 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
14406 SelectionDAG &DAG) {
14408 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14409 EVT VT = Op.getValueType();
14410 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
14412 switch(IntrData->Type) {
14413 case INTR_TYPE_1OP:
14414 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
14415 case INTR_TYPE_2OP:
14416 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
14418 case INTR_TYPE_3OP:
14419 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
14420 Op.getOperand(2), Op.getOperand(3));
14421 case INTR_TYPE_1OP_MASK_RM: {
14422 SDValue Src = Op.getOperand(1);
14423 SDValue Src0 = Op.getOperand(2);
14424 SDValue Mask = Op.getOperand(3);
14425 SDValue RoundingMode = Op.getOperand(4);
14426 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
14428 Mask, Src0, Subtarget, DAG);
14430 case INTR_TYPE_SCALAR_MASK_RM: {
14431 SDValue Src1 = Op.getOperand(1);
14432 SDValue Src2 = Op.getOperand(2);
14433 SDValue Src0 = Op.getOperand(3);
14434 SDValue Mask = Op.getOperand(4);
14435 SDValue RoundingMode = Op.getOperand(5);
14436 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
14438 Mask, Src0, Subtarget, DAG);
14440 case INTR_TYPE_2OP_MASK: {
14441 SDValue Src1 = Op.getOperand(1);
14442 SDValue Src2 = Op.getOperand(2);
14443 SDValue PassThru = Op.getOperand(3);
14444 SDValue Mask = Op.getOperand(4);
14445 // We specify 2 possible opcodes for intrinsics with rounding modes.
14446 // First, we check if the intrinsic may have non-default rounding mode,
14447 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
14448 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
14449 if (IntrWithRoundingModeOpcode != 0) {
14450 SDValue Rnd = Op.getOperand(5);
14451 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
14452 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
14453 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
14454 dl, Op.getValueType(),
14456 Mask, PassThru, Subtarget, DAG);
14459 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
14461 Mask, PassThru, Subtarget, DAG);
14463 case FMA_OP_MASK: {
14464 SDValue Src1 = Op.getOperand(1);
14465 SDValue Src2 = Op.getOperand(2);
14466 SDValue Src3 = Op.getOperand(3);
14467 SDValue Mask = Op.getOperand(4);
14468 // We specify 2 possible opcodes for intrinsics with rounding modes.
14469 // First, we check if the intrinsic may have non-default rounding mode,
14470 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
14471 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
14472 if (IntrWithRoundingModeOpcode != 0) {
14473 SDValue Rnd = Op.getOperand(5);
14474 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
14475 X86::STATIC_ROUNDING::CUR_DIRECTION)
14476 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
14477 dl, Op.getValueType(),
14478 Src1, Src2, Src3, Rnd),
14479 Mask, Src1, Subtarget, DAG);
14481 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
14482 dl, Op.getValueType(),
14484 Mask, Src1, Subtarget, DAG);
14487 case CMP_MASK_CC: {
14488 // Comparison intrinsics with masks.
14489 // Example of transformation:
14490 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
14491 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
14493 // (v8i1 (insert_subvector undef,
14494 // (v2i1 (and (PCMPEQM %a, %b),
14495 // (extract_subvector
14496 // (v8i1 (bitcast %mask)), 0))), 0))))
14497 EVT VT = Op.getOperand(1).getValueType();
14498 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
14499 VT.getVectorNumElements());
14500 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
14501 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
14502 Mask.getValueType().getSizeInBits());
14504 if (IntrData->Type == CMP_MASK_CC) {
14505 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
14506 Op.getOperand(2), Op.getOperand(3));
14508 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
14509 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
14512 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
14513 DAG.getTargetConstant(0, MaskVT),
14515 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
14516 DAG.getUNDEF(BitcastVT), CmpMask,
14517 DAG.getIntPtrConstant(0));
14518 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
14520 case COMI: { // Comparison intrinsics
14521 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
14522 SDValue LHS = Op.getOperand(1);
14523 SDValue RHS = Op.getOperand(2);
14524 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
14525 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
14526 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
14527 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
14528 DAG.getConstant(X86CC, MVT::i8), Cond);
14529 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
14532 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
14533 Op.getOperand(1), Op.getOperand(2), DAG);
14535 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
14536 Op.getSimpleValueType(),
14538 Op.getOperand(2), DAG),
14539 Op.getOperand(4), Op.getOperand(3), Subtarget,
14541 case COMPRESS_EXPAND_IN_REG: {
14542 SDValue Mask = Op.getOperand(3);
14543 SDValue DataToCompress = Op.getOperand(1);
14544 SDValue PassThru = Op.getOperand(2);
14545 if (isAllOnes(Mask)) // return data as is
14546 return Op.getOperand(1);
14547 EVT VT = Op.getValueType();
14548 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
14549 VT.getVectorNumElements());
14550 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
14551 Mask.getValueType().getSizeInBits());
14553 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
14554 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
14555 DAG.getIntPtrConstant(0));
14557 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
14561 SDValue Mask = Op.getOperand(3);
14562 EVT VT = Op.getValueType();
14563 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
14564 VT.getVectorNumElements());
14565 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
14566 Mask.getValueType().getSizeInBits());
14568 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
14569 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
14570 DAG.getIntPtrConstant(0));
14571 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
14580 default: return SDValue(); // Don't custom lower most intrinsics.
14582 case Intrinsic::x86_avx512_mask_valign_q_512:
14583 case Intrinsic::x86_avx512_mask_valign_d_512:
14584 // Vector source operands are swapped.
14585 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
14586 Op.getValueType(), Op.getOperand(2),
14589 Op.getOperand(5), Op.getOperand(4),
14592 // ptest and testp intrinsics. The intrinsic these come from are designed to
14593 // return an integer value, not just an instruction so lower it to the ptest
14594 // or testp pattern and a setcc for the result.
14595 case Intrinsic::x86_sse41_ptestz:
14596 case Intrinsic::x86_sse41_ptestc:
14597 case Intrinsic::x86_sse41_ptestnzc:
14598 case Intrinsic::x86_avx_ptestz_256:
14599 case Intrinsic::x86_avx_ptestc_256:
14600 case Intrinsic::x86_avx_ptestnzc_256:
14601 case Intrinsic::x86_avx_vtestz_ps:
14602 case Intrinsic::x86_avx_vtestc_ps:
14603 case Intrinsic::x86_avx_vtestnzc_ps:
14604 case Intrinsic::x86_avx_vtestz_pd:
14605 case Intrinsic::x86_avx_vtestc_pd:
14606 case Intrinsic::x86_avx_vtestnzc_pd:
14607 case Intrinsic::x86_avx_vtestz_ps_256:
14608 case Intrinsic::x86_avx_vtestc_ps_256:
14609 case Intrinsic::x86_avx_vtestnzc_ps_256:
14610 case Intrinsic::x86_avx_vtestz_pd_256:
14611 case Intrinsic::x86_avx_vtestc_pd_256:
14612 case Intrinsic::x86_avx_vtestnzc_pd_256: {
14613 bool IsTestPacked = false;
14616 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
14617 case Intrinsic::x86_avx_vtestz_ps:
14618 case Intrinsic::x86_avx_vtestz_pd:
14619 case Intrinsic::x86_avx_vtestz_ps_256:
14620 case Intrinsic::x86_avx_vtestz_pd_256:
14621 IsTestPacked = true; // Fallthrough
14622 case Intrinsic::x86_sse41_ptestz:
14623 case Intrinsic::x86_avx_ptestz_256:
14625 X86CC = X86::COND_E;
14627 case Intrinsic::x86_avx_vtestc_ps:
14628 case Intrinsic::x86_avx_vtestc_pd:
14629 case Intrinsic::x86_avx_vtestc_ps_256:
14630 case Intrinsic::x86_avx_vtestc_pd_256:
14631 IsTestPacked = true; // Fallthrough
14632 case Intrinsic::x86_sse41_ptestc:
14633 case Intrinsic::x86_avx_ptestc_256:
14635 X86CC = X86::COND_B;
14637 case Intrinsic::x86_avx_vtestnzc_ps:
14638 case Intrinsic::x86_avx_vtestnzc_pd:
14639 case Intrinsic::x86_avx_vtestnzc_ps_256:
14640 case Intrinsic::x86_avx_vtestnzc_pd_256:
14641 IsTestPacked = true; // Fallthrough
14642 case Intrinsic::x86_sse41_ptestnzc:
14643 case Intrinsic::x86_avx_ptestnzc_256:
14645 X86CC = X86::COND_A;
14649 SDValue LHS = Op.getOperand(1);
14650 SDValue RHS = Op.getOperand(2);
14651 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
14652 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
14653 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
14654 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
14655 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
14657 case Intrinsic::x86_avx512_kortestz_w:
14658 case Intrinsic::x86_avx512_kortestc_w: {
14659 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
14660 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
14661 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
14662 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
14663 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
14664 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
14665 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
14668 case Intrinsic::x86_sse42_pcmpistria128:
14669 case Intrinsic::x86_sse42_pcmpestria128:
14670 case Intrinsic::x86_sse42_pcmpistric128:
14671 case Intrinsic::x86_sse42_pcmpestric128:
14672 case Intrinsic::x86_sse42_pcmpistrio128:
14673 case Intrinsic::x86_sse42_pcmpestrio128:
14674 case Intrinsic::x86_sse42_pcmpistris128:
14675 case Intrinsic::x86_sse42_pcmpestris128:
14676 case Intrinsic::x86_sse42_pcmpistriz128:
14677 case Intrinsic::x86_sse42_pcmpestriz128: {
14681 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
14682 case Intrinsic::x86_sse42_pcmpistria128:
14683 Opcode = X86ISD::PCMPISTRI;
14684 X86CC = X86::COND_A;
14686 case Intrinsic::x86_sse42_pcmpestria128:
14687 Opcode = X86ISD::PCMPESTRI;
14688 X86CC = X86::COND_A;
14690 case Intrinsic::x86_sse42_pcmpistric128:
14691 Opcode = X86ISD::PCMPISTRI;
14692 X86CC = X86::COND_B;
14694 case Intrinsic::x86_sse42_pcmpestric128:
14695 Opcode = X86ISD::PCMPESTRI;
14696 X86CC = X86::COND_B;
14698 case Intrinsic::x86_sse42_pcmpistrio128:
14699 Opcode = X86ISD::PCMPISTRI;
14700 X86CC = X86::COND_O;
14702 case Intrinsic::x86_sse42_pcmpestrio128:
14703 Opcode = X86ISD::PCMPESTRI;
14704 X86CC = X86::COND_O;
14706 case Intrinsic::x86_sse42_pcmpistris128:
14707 Opcode = X86ISD::PCMPISTRI;
14708 X86CC = X86::COND_S;
14710 case Intrinsic::x86_sse42_pcmpestris128:
14711 Opcode = X86ISD::PCMPESTRI;
14712 X86CC = X86::COND_S;
14714 case Intrinsic::x86_sse42_pcmpistriz128:
14715 Opcode = X86ISD::PCMPISTRI;
14716 X86CC = X86::COND_E;
14718 case Intrinsic::x86_sse42_pcmpestriz128:
14719 Opcode = X86ISD::PCMPESTRI;
14720 X86CC = X86::COND_E;
14723 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
14724 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
14725 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
14726 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
14727 DAG.getConstant(X86CC, MVT::i8),
14728 SDValue(PCMP.getNode(), 1));
14729 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
14732 case Intrinsic::x86_sse42_pcmpistri128:
14733 case Intrinsic::x86_sse42_pcmpestri128: {
14735 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
14736 Opcode = X86ISD::PCMPISTRI;
14738 Opcode = X86ISD::PCMPESTRI;
14740 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
14741 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
14742 return DAG.getNode(Opcode, dl, VTs, NewOps);
14747 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
14748 SDValue Src, SDValue Mask, SDValue Base,
14749 SDValue Index, SDValue ScaleOp, SDValue Chain,
14750 const X86Subtarget * Subtarget) {
14752 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
14753 assert(C && "Invalid scale type");
14754 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
14755 EVT MaskVT = MVT::getVectorVT(MVT::i1,
14756 Index.getSimpleValueType().getVectorNumElements());
14758 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
14760 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
14762 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
14763 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
14764 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
14765 SDValue Segment = DAG.getRegister(0, MVT::i32);
14766 if (Src.getOpcode() == ISD::UNDEF)
14767 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
14768 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
14769 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
14770 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
14771 return DAG.getMergeValues(RetOps, dl);
14774 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
14775 SDValue Src, SDValue Mask, SDValue Base,
14776 SDValue Index, SDValue ScaleOp, SDValue Chain) {
14778 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
14779 assert(C && "Invalid scale type");
14780 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
14781 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
14782 SDValue Segment = DAG.getRegister(0, MVT::i32);
14783 EVT MaskVT = MVT::getVectorVT(MVT::i1,
14784 Index.getSimpleValueType().getVectorNumElements());
14786 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
14788 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
14790 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
14791 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
14792 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
14793 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
14794 return SDValue(Res, 1);
14797 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
14798 SDValue Mask, SDValue Base, SDValue Index,
14799 SDValue ScaleOp, SDValue Chain) {
14801 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
14802 assert(C && "Invalid scale type");
14803 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
14804 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
14805 SDValue Segment = DAG.getRegister(0, MVT::i32);
14807 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
14809 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
14811 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
14813 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
14814 //SDVTList VTs = DAG.getVTList(MVT::Other);
14815 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
14816 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
14817 return SDValue(Res, 0);
14820 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
14821 // read performance monitor counters (x86_rdpmc).
14822 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
14823 SelectionDAG &DAG, const X86Subtarget *Subtarget,
14824 SmallVectorImpl<SDValue> &Results) {
14825 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
14826 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
14829 // The ECX register is used to select the index of the performance counter
14831 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
14833 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
14835 // Reads the content of a 64-bit performance counter and returns it in the
14836 // registers EDX:EAX.
14837 if (Subtarget->is64Bit()) {
14838 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
14839 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
14842 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
14843 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
14846 Chain = HI.getValue(1);
14848 if (Subtarget->is64Bit()) {
14849 // The EAX register is loaded with the low-order 32 bits. The EDX register
14850 // is loaded with the supported high-order bits of the counter.
14851 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
14852 DAG.getConstant(32, MVT::i8));
14853 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
14854 Results.push_back(Chain);
14858 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
14859 SDValue Ops[] = { LO, HI };
14860 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
14861 Results.push_back(Pair);
14862 Results.push_back(Chain);
14865 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
14866 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
14867 // also used to custom lower READCYCLECOUNTER nodes.
14868 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
14869 SelectionDAG &DAG, const X86Subtarget *Subtarget,
14870 SmallVectorImpl<SDValue> &Results) {
14871 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
14872 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
14875 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
14876 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
14877 // and the EAX register is loaded with the low-order 32 bits.
14878 if (Subtarget->is64Bit()) {
14879 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
14880 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
14883 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
14884 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
14887 SDValue Chain = HI.getValue(1);
14889 if (Opcode == X86ISD::RDTSCP_DAG) {
14890 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
14892 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
14893 // the ECX register. Add 'ecx' explicitly to the chain.
14894 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
14896 // Explicitly store the content of ECX at the location passed in input
14897 // to the 'rdtscp' intrinsic.
14898 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
14899 MachinePointerInfo(), false, false, 0);
14902 if (Subtarget->is64Bit()) {
14903 // The EDX register is loaded with the high-order 32 bits of the MSR, and
14904 // the EAX register is loaded with the low-order 32 bits.
14905 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
14906 DAG.getConstant(32, MVT::i8));
14907 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
14908 Results.push_back(Chain);
14912 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
14913 SDValue Ops[] = { LO, HI };
14914 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
14915 Results.push_back(Pair);
14916 Results.push_back(Chain);
14919 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
14920 SelectionDAG &DAG) {
14921 SmallVector<SDValue, 2> Results;
14923 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
14925 return DAG.getMergeValues(Results, DL);
14929 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
14930 SelectionDAG &DAG) {
14931 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
14933 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
14938 switch(IntrData->Type) {
14940 llvm_unreachable("Unknown Intrinsic Type");
14944 // Emit the node with the right value type.
14945 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
14946 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
14948 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
14949 // Otherwise return the value from Rand, which is always 0, casted to i32.
14950 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
14951 DAG.getConstant(1, Op->getValueType(1)),
14952 DAG.getConstant(X86::COND_B, MVT::i32),
14953 SDValue(Result.getNode(), 1) };
14954 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
14955 DAG.getVTList(Op->getValueType(1), MVT::Glue),
14958 // Return { result, isValid, chain }.
14959 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
14960 SDValue(Result.getNode(), 2));
14963 //gather(v1, mask, index, base, scale);
14964 SDValue Chain = Op.getOperand(0);
14965 SDValue Src = Op.getOperand(2);
14966 SDValue Base = Op.getOperand(3);
14967 SDValue Index = Op.getOperand(4);
14968 SDValue Mask = Op.getOperand(5);
14969 SDValue Scale = Op.getOperand(6);
14970 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
14974 //scatter(base, mask, index, v1, scale);
14975 SDValue Chain = Op.getOperand(0);
14976 SDValue Base = Op.getOperand(2);
14977 SDValue Mask = Op.getOperand(3);
14978 SDValue Index = Op.getOperand(4);
14979 SDValue Src = Op.getOperand(5);
14980 SDValue Scale = Op.getOperand(6);
14981 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
14984 SDValue Hint = Op.getOperand(6);
14986 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
14987 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
14988 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
14989 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
14990 SDValue Chain = Op.getOperand(0);
14991 SDValue Mask = Op.getOperand(2);
14992 SDValue Index = Op.getOperand(3);
14993 SDValue Base = Op.getOperand(4);
14994 SDValue Scale = Op.getOperand(5);
14995 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
14997 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
14999 SmallVector<SDValue, 2> Results;
15000 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
15001 return DAG.getMergeValues(Results, dl);
15003 // Read Performance Monitoring Counters.
15005 SmallVector<SDValue, 2> Results;
15006 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
15007 return DAG.getMergeValues(Results, dl);
15009 // XTEST intrinsics.
15011 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
15012 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
15013 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15014 DAG.getConstant(X86::COND_NE, MVT::i8),
15016 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
15017 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
15018 Ret, SDValue(InTrans.getNode(), 1));
15022 SmallVector<SDValue, 2> Results;
15023 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
15024 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
15025 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
15026 DAG.getConstant(-1, MVT::i8));
15027 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
15028 Op.getOperand(4), GenCF.getValue(1));
15029 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
15030 Op.getOperand(5), MachinePointerInfo(),
15032 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15033 DAG.getConstant(X86::COND_B, MVT::i8),
15035 Results.push_back(SetCC);
15036 Results.push_back(Store);
15037 return DAG.getMergeValues(Results, dl);
15039 case COMPRESS_TO_MEM: {
15041 SDValue Mask = Op.getOperand(4);
15042 SDValue DataToCompress = Op.getOperand(3);
15043 SDValue Addr = Op.getOperand(2);
15044 SDValue Chain = Op.getOperand(0);
15046 if (isAllOnes(Mask)) // return just a store
15047 return DAG.getStore(Chain, dl, DataToCompress, Addr,
15048 MachinePointerInfo(), false, false, 0);
15050 EVT VT = DataToCompress.getValueType();
15051 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
15052 VT.getVectorNumElements());
15053 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
15054 Mask.getValueType().getSizeInBits());
15055 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
15056 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
15057 DAG.getIntPtrConstant(0));
15059 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
15060 DataToCompress, DAG.getUNDEF(VT));
15061 return DAG.getStore(Chain, dl, Compressed, Addr,
15062 MachinePointerInfo(), false, false, 0);
15064 case EXPAND_FROM_MEM: {
15066 SDValue Mask = Op.getOperand(4);
15067 SDValue PathThru = Op.getOperand(3);
15068 SDValue Addr = Op.getOperand(2);
15069 SDValue Chain = Op.getOperand(0);
15070 EVT VT = Op.getValueType();
15072 if (isAllOnes(Mask)) // return just a load
15073 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
15075 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
15076 VT.getVectorNumElements());
15077 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
15078 Mask.getValueType().getSizeInBits());
15079 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
15080 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
15081 DAG.getIntPtrConstant(0));
15083 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
15084 false, false, false, 0);
15086 SDValue Results[] = {
15087 DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand, PathThru),
15089 return DAG.getMergeValues(Results, dl);
15094 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
15095 SelectionDAG &DAG) const {
15096 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
15097 MFI->setReturnAddressIsTaken(true);
15099 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15102 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15104 EVT PtrVT = getPointerTy();
15107 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15108 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
15109 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
15110 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15111 DAG.getNode(ISD::ADD, dl, PtrVT,
15112 FrameAddr, Offset),
15113 MachinePointerInfo(), false, false, false, 0);
15116 // Just load the return address.
15117 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
15118 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15119 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
15122 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
15123 MachineFunction &MF = DAG.getMachineFunction();
15124 MachineFrameInfo *MFI = MF.getFrameInfo();
15125 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
15126 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
15127 EVT VT = Op.getValueType();
15129 MFI->setFrameAddressIsTaken(true);
15131 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
15132 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
15133 // is not possible to crawl up the stack without looking at the unwind codes
15135 int FrameAddrIndex = FuncInfo->getFAIndex();
15136 if (!FrameAddrIndex) {
15137 // Set up a frame object for the return address.
15138 unsigned SlotSize = RegInfo->getSlotSize();
15139 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
15140 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
15141 FuncInfo->setFAIndex(FrameAddrIndex);
15143 return DAG.getFrameIndex(FrameAddrIndex, VT);
15146 unsigned FrameReg =
15147 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
15148 SDLoc dl(Op); // FIXME probably not meaningful
15149 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15150 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
15151 (FrameReg == X86::EBP && VT == MVT::i32)) &&
15152 "Invalid Frame Register!");
15153 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
15155 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
15156 MachinePointerInfo(),
15157 false, false, false, 0);
15161 // FIXME? Maybe this could be a TableGen attribute on some registers and
15162 // this table could be generated automatically from RegInfo.
15163 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
15165 unsigned Reg = StringSwitch<unsigned>(RegName)
15166 .Case("esp", X86::ESP)
15167 .Case("rsp", X86::RSP)
15171 report_fatal_error("Invalid register name global variable");
15174 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
15175 SelectionDAG &DAG) const {
15176 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
15177 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
15180 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
15181 SDValue Chain = Op.getOperand(0);
15182 SDValue Offset = Op.getOperand(1);
15183 SDValue Handler = Op.getOperand(2);
15186 EVT PtrVT = getPointerTy();
15187 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
15188 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
15189 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
15190 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
15191 "Invalid Frame Register!");
15192 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
15193 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
15195 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
15196 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
15197 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
15198 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
15200 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
15202 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
15203 DAG.getRegister(StoreAddrReg, PtrVT));
15206 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
15207 SelectionDAG &DAG) const {
15209 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
15210 DAG.getVTList(MVT::i32, MVT::Other),
15211 Op.getOperand(0), Op.getOperand(1));
15214 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
15215 SelectionDAG &DAG) const {
15217 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
15218 Op.getOperand(0), Op.getOperand(1));
15221 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
15222 return Op.getOperand(0);
15225 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
15226 SelectionDAG &DAG) const {
15227 SDValue Root = Op.getOperand(0);
15228 SDValue Trmp = Op.getOperand(1); // trampoline
15229 SDValue FPtr = Op.getOperand(2); // nested function
15230 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
15233 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
15234 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
15236 if (Subtarget->is64Bit()) {
15237 SDValue OutChains[6];
15239 // Large code-model.
15240 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
15241 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
15243 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
15244 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
15246 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
15248 // Load the pointer to the nested function into R11.
15249 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
15250 SDValue Addr = Trmp;
15251 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
15252 Addr, MachinePointerInfo(TrmpAddr),
15255 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
15256 DAG.getConstant(2, MVT::i64));
15257 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
15258 MachinePointerInfo(TrmpAddr, 2),
15261 // Load the 'nest' parameter value into R10.
15262 // R10 is specified in X86CallingConv.td
15263 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
15264 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
15265 DAG.getConstant(10, MVT::i64));
15266 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
15267 Addr, MachinePointerInfo(TrmpAddr, 10),
15270 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
15271 DAG.getConstant(12, MVT::i64));
15272 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
15273 MachinePointerInfo(TrmpAddr, 12),
15276 // Jump to the nested function.
15277 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
15278 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
15279 DAG.getConstant(20, MVT::i64));
15280 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
15281 Addr, MachinePointerInfo(TrmpAddr, 20),
15284 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
15285 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
15286 DAG.getConstant(22, MVT::i64));
15287 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
15288 MachinePointerInfo(TrmpAddr, 22),
15291 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
15293 const Function *Func =
15294 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
15295 CallingConv::ID CC = Func->getCallingConv();
15300 llvm_unreachable("Unsupported calling convention");
15301 case CallingConv::C:
15302 case CallingConv::X86_StdCall: {
15303 // Pass 'nest' parameter in ECX.
15304 // Must be kept in sync with X86CallingConv.td
15305 NestReg = X86::ECX;
15307 // Check that ECX wasn't needed by an 'inreg' parameter.
15308 FunctionType *FTy = Func->getFunctionType();
15309 const AttributeSet &Attrs = Func->getAttributes();
15311 if (!Attrs.isEmpty() && !Func->isVarArg()) {
15312 unsigned InRegCount = 0;
15315 for (FunctionType::param_iterator I = FTy->param_begin(),
15316 E = FTy->param_end(); I != E; ++I, ++Idx)
15317 if (Attrs.hasAttribute(Idx, Attribute::InReg))
15318 // FIXME: should only count parameters that are lowered to integers.
15319 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
15321 if (InRegCount > 2) {
15322 report_fatal_error("Nest register in use - reduce number of inreg"
15328 case CallingConv::X86_FastCall:
15329 case CallingConv::X86_ThisCall:
15330 case CallingConv::Fast:
15331 // Pass 'nest' parameter in EAX.
15332 // Must be kept in sync with X86CallingConv.td
15333 NestReg = X86::EAX;
15337 SDValue OutChains[4];
15338 SDValue Addr, Disp;
15340 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
15341 DAG.getConstant(10, MVT::i32));
15342 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
15344 // This is storing the opcode for MOV32ri.
15345 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
15346 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
15347 OutChains[0] = DAG.getStore(Root, dl,
15348 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
15349 Trmp, MachinePointerInfo(TrmpAddr),
15352 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
15353 DAG.getConstant(1, MVT::i32));
15354 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
15355 MachinePointerInfo(TrmpAddr, 1),
15358 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
15359 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
15360 DAG.getConstant(5, MVT::i32));
15361 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
15362 MachinePointerInfo(TrmpAddr, 5),
15365 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
15366 DAG.getConstant(6, MVT::i32));
15367 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
15368 MachinePointerInfo(TrmpAddr, 6),
15371 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
15375 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
15376 SelectionDAG &DAG) const {
15378 The rounding mode is in bits 11:10 of FPSR, and has the following
15380 00 Round to nearest
15385 FLT_ROUNDS, on the other hand, expects the following:
15392 To perform the conversion, we do:
15393 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
15396 MachineFunction &MF = DAG.getMachineFunction();
15397 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
15398 unsigned StackAlignment = TFI.getStackAlignment();
15399 MVT VT = Op.getSimpleValueType();
15402 // Save FP Control Word to stack slot
15403 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
15404 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
15406 MachineMemOperand *MMO =
15407 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
15408 MachineMemOperand::MOStore, 2, 2);
15410 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
15411 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
15412 DAG.getVTList(MVT::Other),
15413 Ops, MVT::i16, MMO);
15415 // Load FP Control Word from stack slot
15416 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
15417 MachinePointerInfo(), false, false, false, 0);
15419 // Transform as necessary
15421 DAG.getNode(ISD::SRL, DL, MVT::i16,
15422 DAG.getNode(ISD::AND, DL, MVT::i16,
15423 CWD, DAG.getConstant(0x800, MVT::i16)),
15424 DAG.getConstant(11, MVT::i8));
15426 DAG.getNode(ISD::SRL, DL, MVT::i16,
15427 DAG.getNode(ISD::AND, DL, MVT::i16,
15428 CWD, DAG.getConstant(0x400, MVT::i16)),
15429 DAG.getConstant(9, MVT::i8));
15432 DAG.getNode(ISD::AND, DL, MVT::i16,
15433 DAG.getNode(ISD::ADD, DL, MVT::i16,
15434 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
15435 DAG.getConstant(1, MVT::i16)),
15436 DAG.getConstant(3, MVT::i16));
15438 return DAG.getNode((VT.getSizeInBits() < 16 ?
15439 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
15442 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
15443 MVT VT = Op.getSimpleValueType();
15445 unsigned NumBits = VT.getSizeInBits();
15448 Op = Op.getOperand(0);
15449 if (VT == MVT::i8) {
15450 // Zero extend to i32 since there is not an i8 bsr.
15452 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
15455 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
15456 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
15457 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
15459 // If src is zero (i.e. bsr sets ZF), returns NumBits.
15462 DAG.getConstant(NumBits+NumBits-1, OpVT),
15463 DAG.getConstant(X86::COND_E, MVT::i8),
15466 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
15468 // Finally xor with NumBits-1.
15469 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
15472 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
15476 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
15477 MVT VT = Op.getSimpleValueType();
15479 unsigned NumBits = VT.getSizeInBits();
15482 Op = Op.getOperand(0);
15483 if (VT == MVT::i8) {
15484 // Zero extend to i32 since there is not an i8 bsr.
15486 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
15489 // Issue a bsr (scan bits in reverse).
15490 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
15491 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
15493 // And xor with NumBits-1.
15494 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
15497 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
15501 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
15502 MVT VT = Op.getSimpleValueType();
15503 unsigned NumBits = VT.getSizeInBits();
15505 Op = Op.getOperand(0);
15507 // Issue a bsf (scan bits forward) which also sets EFLAGS.
15508 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
15509 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
15511 // If src is zero (i.e. bsf sets ZF), returns NumBits.
15514 DAG.getConstant(NumBits, VT),
15515 DAG.getConstant(X86::COND_E, MVT::i8),
15518 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
15521 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
15522 // ones, and then concatenate the result back.
15523 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
15524 MVT VT = Op.getSimpleValueType();
15526 assert(VT.is256BitVector() && VT.isInteger() &&
15527 "Unsupported value type for operation");
15529 unsigned NumElems = VT.getVectorNumElements();
15532 // Extract the LHS vectors
15533 SDValue LHS = Op.getOperand(0);
15534 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15535 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15537 // Extract the RHS vectors
15538 SDValue RHS = Op.getOperand(1);
15539 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15540 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15542 MVT EltVT = VT.getVectorElementType();
15543 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15545 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15546 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
15547 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
15550 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
15551 assert(Op.getSimpleValueType().is256BitVector() &&
15552 Op.getSimpleValueType().isInteger() &&
15553 "Only handle AVX 256-bit vector integer operation");
15554 return Lower256IntArith(Op, DAG);
15557 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
15558 assert(Op.getSimpleValueType().is256BitVector() &&
15559 Op.getSimpleValueType().isInteger() &&
15560 "Only handle AVX 256-bit vector integer operation");
15561 return Lower256IntArith(Op, DAG);
15564 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
15565 SelectionDAG &DAG) {
15567 MVT VT = Op.getSimpleValueType();
15569 // Decompose 256-bit ops into smaller 128-bit ops.
15570 if (VT.is256BitVector() && !Subtarget->hasInt256())
15571 return Lower256IntArith(Op, DAG);
15573 SDValue A = Op.getOperand(0);
15574 SDValue B = Op.getOperand(1);
15576 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
15577 if (VT == MVT::v4i32) {
15578 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
15579 "Should not custom lower when pmuldq is available!");
15581 // Extract the odd parts.
15582 static const int UnpackMask[] = { 1, -1, 3, -1 };
15583 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
15584 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
15586 // Multiply the even parts.
15587 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
15588 // Now multiply odd parts.
15589 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
15591 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
15592 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
15594 // Merge the two vectors back together with a shuffle. This expands into 2
15596 static const int ShufMask[] = { 0, 4, 2, 6 };
15597 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
15600 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
15601 "Only know how to lower V2I64/V4I64/V8I64 multiply");
15603 // Ahi = psrlqi(a, 32);
15604 // Bhi = psrlqi(b, 32);
15606 // AloBlo = pmuludq(a, b);
15607 // AloBhi = pmuludq(a, Bhi);
15608 // AhiBlo = pmuludq(Ahi, b);
15610 // AloBhi = psllqi(AloBhi, 32);
15611 // AhiBlo = psllqi(AhiBlo, 32);
15612 // return AloBlo + AloBhi + AhiBlo;
15614 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
15615 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
15617 // Bit cast to 32-bit vectors for MULUDQ
15618 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
15619 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
15620 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
15621 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
15622 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
15623 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
15625 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
15626 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
15627 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
15629 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
15630 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
15632 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
15633 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
15636 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
15637 assert(Subtarget->isTargetWin64() && "Unexpected target");
15638 EVT VT = Op.getValueType();
15639 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
15640 "Unexpected return type for lowering");
15644 switch (Op->getOpcode()) {
15645 default: llvm_unreachable("Unexpected request for libcall!");
15646 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
15647 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
15648 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
15649 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
15650 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
15651 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
15655 SDValue InChain = DAG.getEntryNode();
15657 TargetLowering::ArgListTy Args;
15658 TargetLowering::ArgListEntry Entry;
15659 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
15660 EVT ArgVT = Op->getOperand(i).getValueType();
15661 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
15662 "Unexpected argument type for lowering");
15663 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
15664 Entry.Node = StackPtr;
15665 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
15667 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
15668 Entry.Ty = PointerType::get(ArgTy,0);
15669 Entry.isSExt = false;
15670 Entry.isZExt = false;
15671 Args.push_back(Entry);
15674 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
15677 TargetLowering::CallLoweringInfo CLI(DAG);
15678 CLI.setDebugLoc(dl).setChain(InChain)
15679 .setCallee(getLibcallCallingConv(LC),
15680 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
15681 Callee, std::move(Args), 0)
15682 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
15684 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
15685 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
15688 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
15689 SelectionDAG &DAG) {
15690 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
15691 EVT VT = Op0.getValueType();
15694 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
15695 (VT == MVT::v8i32 && Subtarget->hasInt256()));
15697 // PMULxD operations multiply each even value (starting at 0) of LHS with
15698 // the related value of RHS and produce a widen result.
15699 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
15700 // => <2 x i64> <ae|cg>
15702 // In other word, to have all the results, we need to perform two PMULxD:
15703 // 1. one with the even values.
15704 // 2. one with the odd values.
15705 // To achieve #2, with need to place the odd values at an even position.
15707 // Place the odd value at an even position (basically, shift all values 1
15708 // step to the left):
15709 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
15710 // <a|b|c|d> => <b|undef|d|undef>
15711 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
15712 // <e|f|g|h> => <f|undef|h|undef>
15713 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
15715 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
15717 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
15718 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
15720 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
15721 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
15722 // => <2 x i64> <ae|cg>
15723 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
15724 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
15725 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
15726 // => <2 x i64> <bf|dh>
15727 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
15728 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
15730 // Shuffle it back into the right order.
15731 SDValue Highs, Lows;
15732 if (VT == MVT::v8i32) {
15733 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
15734 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
15735 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
15736 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
15738 const int HighMask[] = {1, 5, 3, 7};
15739 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
15740 const int LowMask[] = {0, 4, 2, 6};
15741 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
15744 // If we have a signed multiply but no PMULDQ fix up the high parts of a
15745 // unsigned multiply.
15746 if (IsSigned && !Subtarget->hasSSE41()) {
15748 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
15749 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
15750 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
15751 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
15752 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
15754 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
15755 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
15758 // The first result of MUL_LOHI is actually the low value, followed by the
15760 SDValue Ops[] = {Lows, Highs};
15761 return DAG.getMergeValues(Ops, dl);
15764 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
15765 const X86Subtarget *Subtarget) {
15766 MVT VT = Op.getSimpleValueType();
15768 SDValue R = Op.getOperand(0);
15769 SDValue Amt = Op.getOperand(1);
15771 // Optimize shl/srl/sra with constant shift amount.
15772 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
15773 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
15774 uint64_t ShiftAmt = ShiftConst->getZExtValue();
15776 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
15777 (Subtarget->hasInt256() &&
15778 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
15779 (Subtarget->hasAVX512() &&
15780 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
15781 if (Op.getOpcode() == ISD::SHL)
15782 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
15784 if (Op.getOpcode() == ISD::SRL)
15785 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
15787 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
15788 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
15792 if (VT == MVT::v16i8) {
15793 if (Op.getOpcode() == ISD::SHL) {
15794 // Make a large shift.
15795 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
15796 MVT::v8i16, R, ShiftAmt,
15798 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
15799 // Zero out the rightmost bits.
15800 SmallVector<SDValue, 16> V(16,
15801 DAG.getConstant(uint8_t(-1U << ShiftAmt),
15803 return DAG.getNode(ISD::AND, dl, VT, SHL,
15804 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
15806 if (Op.getOpcode() == ISD::SRL) {
15807 // Make a large shift.
15808 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
15809 MVT::v8i16, R, ShiftAmt,
15811 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
15812 // Zero out the leftmost bits.
15813 SmallVector<SDValue, 16> V(16,
15814 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
15816 return DAG.getNode(ISD::AND, dl, VT, SRL,
15817 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
15819 if (Op.getOpcode() == ISD::SRA) {
15820 if (ShiftAmt == 7) {
15821 // R s>> 7 === R s< 0
15822 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
15823 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
15826 // R s>> a === ((R u>> a) ^ m) - m
15827 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
15828 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
15830 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
15831 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
15832 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
15835 llvm_unreachable("Unknown shift opcode.");
15838 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
15839 if (Op.getOpcode() == ISD::SHL) {
15840 // Make a large shift.
15841 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
15842 MVT::v16i16, R, ShiftAmt,
15844 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
15845 // Zero out the rightmost bits.
15846 SmallVector<SDValue, 32> V(32,
15847 DAG.getConstant(uint8_t(-1U << ShiftAmt),
15849 return DAG.getNode(ISD::AND, dl, VT, SHL,
15850 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
15852 if (Op.getOpcode() == ISD::SRL) {
15853 // Make a large shift.
15854 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
15855 MVT::v16i16, R, ShiftAmt,
15857 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
15858 // Zero out the leftmost bits.
15859 SmallVector<SDValue, 32> V(32,
15860 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
15862 return DAG.getNode(ISD::AND, dl, VT, SRL,
15863 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
15865 if (Op.getOpcode() == ISD::SRA) {
15866 if (ShiftAmt == 7) {
15867 // R s>> 7 === R s< 0
15868 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
15869 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
15872 // R s>> a === ((R u>> a) ^ m) - m
15873 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
15874 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
15876 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
15877 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
15878 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
15881 llvm_unreachable("Unknown shift opcode.");
15886 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
15887 if (!Subtarget->is64Bit() &&
15888 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
15889 Amt.getOpcode() == ISD::BITCAST &&
15890 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
15891 Amt = Amt.getOperand(0);
15892 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
15893 VT.getVectorNumElements();
15894 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
15895 uint64_t ShiftAmt = 0;
15896 for (unsigned i = 0; i != Ratio; ++i) {
15897 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
15901 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
15903 // Check remaining shift amounts.
15904 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
15905 uint64_t ShAmt = 0;
15906 for (unsigned j = 0; j != Ratio; ++j) {
15907 ConstantSDNode *C =
15908 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
15912 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
15914 if (ShAmt != ShiftAmt)
15917 switch (Op.getOpcode()) {
15919 llvm_unreachable("Unknown shift opcode!");
15921 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
15924 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
15927 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
15935 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
15936 const X86Subtarget* Subtarget) {
15937 MVT VT = Op.getSimpleValueType();
15939 SDValue R = Op.getOperand(0);
15940 SDValue Amt = Op.getOperand(1);
15942 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
15943 VT == MVT::v4i32 || VT == MVT::v8i16 ||
15944 (Subtarget->hasInt256() &&
15945 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
15946 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
15947 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
15949 EVT EltVT = VT.getVectorElementType();
15951 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
15952 // Check if this build_vector node is doing a splat.
15953 // If so, then set BaseShAmt equal to the splat value.
15954 BaseShAmt = BV->getSplatValue();
15955 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
15956 BaseShAmt = SDValue();
15958 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
15959 Amt = Amt.getOperand(0);
15961 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
15962 if (SVN && SVN->isSplat()) {
15963 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
15964 SDValue InVec = Amt.getOperand(0);
15965 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
15966 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
15967 "Unexpected shuffle index found!");
15968 BaseShAmt = InVec.getOperand(SplatIdx);
15969 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
15970 if (ConstantSDNode *C =
15971 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
15972 if (C->getZExtValue() == SplatIdx)
15973 BaseShAmt = InVec.getOperand(1);
15978 // Avoid introducing an extract element from a shuffle.
15979 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
15980 DAG.getIntPtrConstant(SplatIdx));
15984 if (BaseShAmt.getNode()) {
15985 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
15986 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
15987 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
15988 else if (EltVT.bitsLT(MVT::i32))
15989 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
15991 switch (Op.getOpcode()) {
15993 llvm_unreachable("Unknown shift opcode!");
15995 switch (VT.SimpleTy) {
15996 default: return SDValue();
16005 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
16008 switch (VT.SimpleTy) {
16009 default: return SDValue();
16016 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
16019 switch (VT.SimpleTy) {
16020 default: return SDValue();
16029 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
16035 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
16036 if (!Subtarget->is64Bit() &&
16037 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
16038 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
16039 Amt.getOpcode() == ISD::BITCAST &&
16040 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
16041 Amt = Amt.getOperand(0);
16042 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
16043 VT.getVectorNumElements();
16044 std::vector<SDValue> Vals(Ratio);
16045 for (unsigned i = 0; i != Ratio; ++i)
16046 Vals[i] = Amt.getOperand(i);
16047 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
16048 for (unsigned j = 0; j != Ratio; ++j)
16049 if (Vals[j] != Amt.getOperand(i + j))
16052 switch (Op.getOpcode()) {
16054 llvm_unreachable("Unknown shift opcode!");
16056 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
16058 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
16060 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
16067 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
16068 SelectionDAG &DAG) {
16069 MVT VT = Op.getSimpleValueType();
16071 SDValue R = Op.getOperand(0);
16072 SDValue Amt = Op.getOperand(1);
16075 assert(VT.isVector() && "Custom lowering only for vector shifts!");
16076 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
16078 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
16082 V = LowerScalarVariableShift(Op, DAG, Subtarget);
16086 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
16088 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
16089 if (Subtarget->hasInt256()) {
16090 if (Op.getOpcode() == ISD::SRL &&
16091 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
16092 VT == MVT::v4i64 || VT == MVT::v8i32))
16094 if (Op.getOpcode() == ISD::SHL &&
16095 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
16096 VT == MVT::v4i64 || VT == MVT::v8i32))
16098 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
16102 // If possible, lower this packed shift into a vector multiply instead of
16103 // expanding it into a sequence of scalar shifts.
16104 // Do this only if the vector shift count is a constant build_vector.
16105 if (Op.getOpcode() == ISD::SHL &&
16106 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
16107 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
16108 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
16109 SmallVector<SDValue, 8> Elts;
16110 EVT SVT = VT.getScalarType();
16111 unsigned SVTBits = SVT.getSizeInBits();
16112 const APInt &One = APInt(SVTBits, 1);
16113 unsigned NumElems = VT.getVectorNumElements();
16115 for (unsigned i=0; i !=NumElems; ++i) {
16116 SDValue Op = Amt->getOperand(i);
16117 if (Op->getOpcode() == ISD::UNDEF) {
16118 Elts.push_back(Op);
16122 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
16123 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
16124 uint64_t ShAmt = C.getZExtValue();
16125 if (ShAmt >= SVTBits) {
16126 Elts.push_back(DAG.getUNDEF(SVT));
16129 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
16131 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
16132 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
16135 // Lower SHL with variable shift amount.
16136 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
16137 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
16139 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
16140 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
16141 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
16142 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
16145 // If possible, lower this shift as a sequence of two shifts by
16146 // constant plus a MOVSS/MOVSD instead of scalarizing it.
16148 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
16150 // Could be rewritten as:
16151 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
16153 // The advantage is that the two shifts from the example would be
16154 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
16155 // the vector shift into four scalar shifts plus four pairs of vector
16157 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
16158 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
16159 unsigned TargetOpcode = X86ISD::MOVSS;
16160 bool CanBeSimplified;
16161 // The splat value for the first packed shift (the 'X' from the example).
16162 SDValue Amt1 = Amt->getOperand(0);
16163 // The splat value for the second packed shift (the 'Y' from the example).
16164 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
16165 Amt->getOperand(2);
16167 // See if it is possible to replace this node with a sequence of
16168 // two shifts followed by a MOVSS/MOVSD
16169 if (VT == MVT::v4i32) {
16170 // Check if it is legal to use a MOVSS.
16171 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
16172 Amt2 == Amt->getOperand(3);
16173 if (!CanBeSimplified) {
16174 // Otherwise, check if we can still simplify this node using a MOVSD.
16175 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
16176 Amt->getOperand(2) == Amt->getOperand(3);
16177 TargetOpcode = X86ISD::MOVSD;
16178 Amt2 = Amt->getOperand(2);
16181 // Do similar checks for the case where the machine value type
16183 CanBeSimplified = Amt1 == Amt->getOperand(1);
16184 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
16185 CanBeSimplified = Amt2 == Amt->getOperand(i);
16187 if (!CanBeSimplified) {
16188 TargetOpcode = X86ISD::MOVSD;
16189 CanBeSimplified = true;
16190 Amt2 = Amt->getOperand(4);
16191 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
16192 CanBeSimplified = Amt1 == Amt->getOperand(i);
16193 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
16194 CanBeSimplified = Amt2 == Amt->getOperand(j);
16198 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
16199 isa<ConstantSDNode>(Amt2)) {
16200 // Replace this node with two shifts followed by a MOVSS/MOVSD.
16201 EVT CastVT = MVT::v4i32;
16203 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
16204 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
16206 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
16207 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
16208 if (TargetOpcode == X86ISD::MOVSD)
16209 CastVT = MVT::v2i64;
16210 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
16211 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
16212 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
16214 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16218 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
16219 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
16222 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
16223 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
16225 // Turn 'a' into a mask suitable for VSELECT
16226 SDValue VSelM = DAG.getConstant(0x80, VT);
16227 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
16228 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
16230 SDValue CM1 = DAG.getConstant(0x0f, VT);
16231 SDValue CM2 = DAG.getConstant(0x3f, VT);
16233 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
16234 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
16235 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
16236 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
16237 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
16240 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
16241 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
16242 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
16244 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
16245 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
16246 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
16247 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
16248 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
16251 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
16252 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
16253 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
16255 // return VSELECT(r, r+r, a);
16256 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
16257 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
16261 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
16262 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
16263 // solution better.
16264 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
16265 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
16267 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
16268 R = DAG.getNode(ExtOpc, dl, NewVT, R);
16269 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
16270 return DAG.getNode(ISD::TRUNCATE, dl, VT,
16271 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
16274 // Decompose 256-bit shifts into smaller 128-bit shifts.
16275 if (VT.is256BitVector()) {
16276 unsigned NumElems = VT.getVectorNumElements();
16277 MVT EltVT = VT.getVectorElementType();
16278 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
16280 // Extract the two vectors
16281 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
16282 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
16284 // Recreate the shift amount vectors
16285 SDValue Amt1, Amt2;
16286 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
16287 // Constant shift amount
16288 SmallVector<SDValue, 4> Amt1Csts;
16289 SmallVector<SDValue, 4> Amt2Csts;
16290 for (unsigned i = 0; i != NumElems/2; ++i)
16291 Amt1Csts.push_back(Amt->getOperand(i));
16292 for (unsigned i = NumElems/2; i != NumElems; ++i)
16293 Amt2Csts.push_back(Amt->getOperand(i));
16295 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
16296 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
16298 // Variable shift amount
16299 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
16300 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
16303 // Issue new vector shifts for the smaller types
16304 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
16305 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
16307 // Concatenate the result back
16308 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
16314 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
16315 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
16316 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
16317 // looks for this combo and may remove the "setcc" instruction if the "setcc"
16318 // has only one use.
16319 SDNode *N = Op.getNode();
16320 SDValue LHS = N->getOperand(0);
16321 SDValue RHS = N->getOperand(1);
16322 unsigned BaseOp = 0;
16325 switch (Op.getOpcode()) {
16326 default: llvm_unreachable("Unknown ovf instruction!");
16328 // A subtract of one will be selected as a INC. Note that INC doesn't
16329 // set CF, so we can't do this for UADDO.
16330 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16332 BaseOp = X86ISD::INC;
16333 Cond = X86::COND_O;
16336 BaseOp = X86ISD::ADD;
16337 Cond = X86::COND_O;
16340 BaseOp = X86ISD::ADD;
16341 Cond = X86::COND_B;
16344 // A subtract of one will be selected as a DEC. Note that DEC doesn't
16345 // set CF, so we can't do this for USUBO.
16346 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16348 BaseOp = X86ISD::DEC;
16349 Cond = X86::COND_O;
16352 BaseOp = X86ISD::SUB;
16353 Cond = X86::COND_O;
16356 BaseOp = X86ISD::SUB;
16357 Cond = X86::COND_B;
16360 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
16361 Cond = X86::COND_O;
16363 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
16364 if (N->getValueType(0) == MVT::i8) {
16365 BaseOp = X86ISD::UMUL8;
16366 Cond = X86::COND_O;
16369 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
16371 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
16374 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
16375 DAG.getConstant(X86::COND_O, MVT::i32),
16376 SDValue(Sum.getNode(), 2));
16378 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
16382 // Also sets EFLAGS.
16383 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
16384 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
16387 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
16388 DAG.getConstant(Cond, MVT::i32),
16389 SDValue(Sum.getNode(), 1));
16391 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
16394 // Sign extension of the low part of vector elements. This may be used either
16395 // when sign extend instructions are not available or if the vector element
16396 // sizes already match the sign-extended size. If the vector elements are in
16397 // their pre-extended size and sign extend instructions are available, that will
16398 // be handled by LowerSIGN_EXTEND.
16399 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
16400 SelectionDAG &DAG) const {
16402 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
16403 MVT VT = Op.getSimpleValueType();
16405 if (!Subtarget->hasSSE2() || !VT.isVector())
16408 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
16409 ExtraVT.getScalarType().getSizeInBits();
16411 switch (VT.SimpleTy) {
16412 default: return SDValue();
16415 if (!Subtarget->hasFp256())
16417 if (!Subtarget->hasInt256()) {
16418 // needs to be split
16419 unsigned NumElems = VT.getVectorNumElements();
16421 // Extract the LHS vectors
16422 SDValue LHS = Op.getOperand(0);
16423 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
16424 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
16426 MVT EltVT = VT.getVectorElementType();
16427 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
16429 EVT ExtraEltVT = ExtraVT.getVectorElementType();
16430 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
16431 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
16433 SDValue Extra = DAG.getValueType(ExtraVT);
16435 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
16436 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
16438 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
16443 SDValue Op0 = Op.getOperand(0);
16445 // This is a sign extension of some low part of vector elements without
16446 // changing the size of the vector elements themselves:
16447 // Shift-Left + Shift-Right-Algebraic.
16448 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
16450 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
16456 /// Returns true if the operand type is exactly twice the native width, and
16457 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
16458 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
16459 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
16460 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
16461 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
16464 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
16465 else if (OpWidth == 128)
16466 return Subtarget->hasCmpxchg16b();
16471 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
16472 return needsCmpXchgNb(SI->getValueOperand()->getType());
16475 // Note: this turns large loads into lock cmpxchg8b/16b.
16476 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
16477 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
16478 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
16479 return needsCmpXchgNb(PTy->getElementType());
16482 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
16483 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
16484 const Type *MemType = AI->getType();
16486 // If the operand is too big, we must see if cmpxchg8/16b is available
16487 // and default to library calls otherwise.
16488 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
16489 return needsCmpXchgNb(MemType);
16491 AtomicRMWInst::BinOp Op = AI->getOperation();
16494 llvm_unreachable("Unknown atomic operation");
16495 case AtomicRMWInst::Xchg:
16496 case AtomicRMWInst::Add:
16497 case AtomicRMWInst::Sub:
16498 // It's better to use xadd, xsub or xchg for these in all cases.
16500 case AtomicRMWInst::Or:
16501 case AtomicRMWInst::And:
16502 case AtomicRMWInst::Xor:
16503 // If the atomicrmw's result isn't actually used, we can just add a "lock"
16504 // prefix to a normal instruction for these operations.
16505 return !AI->use_empty();
16506 case AtomicRMWInst::Nand:
16507 case AtomicRMWInst::Max:
16508 case AtomicRMWInst::Min:
16509 case AtomicRMWInst::UMax:
16510 case AtomicRMWInst::UMin:
16511 // These always require a non-trivial set of data operations on x86. We must
16512 // use a cmpxchg loop.
16517 static bool hasMFENCE(const X86Subtarget& Subtarget) {
16518 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
16519 // no-sse2). There isn't any reason to disable it if the target processor
16521 return Subtarget.hasSSE2() || Subtarget.is64Bit();
16525 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
16526 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
16527 const Type *MemType = AI->getType();
16528 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
16529 // there is no benefit in turning such RMWs into loads, and it is actually
16530 // harmful as it introduces a mfence.
16531 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
16534 auto Builder = IRBuilder<>(AI);
16535 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
16536 auto SynchScope = AI->getSynchScope();
16537 // We must restrict the ordering to avoid generating loads with Release or
16538 // ReleaseAcquire orderings.
16539 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
16540 auto Ptr = AI->getPointerOperand();
16542 // Before the load we need a fence. Here is an example lifted from
16543 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
16546 // x.store(1, relaxed);
16547 // r1 = y.fetch_add(0, release);
16549 // y.fetch_add(42, acquire);
16550 // r2 = x.load(relaxed);
16551 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
16552 // lowered to just a load without a fence. A mfence flushes the store buffer,
16553 // making the optimization clearly correct.
16554 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
16555 // otherwise, we might be able to be more agressive on relaxed idempotent
16556 // rmw. In practice, they do not look useful, so we don't try to be
16557 // especially clever.
16558 if (SynchScope == SingleThread) {
16559 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
16560 // the IR level, so we must wrap it in an intrinsic.
16562 } else if (hasMFENCE(*Subtarget)) {
16563 Function *MFence = llvm::Intrinsic::getDeclaration(M,
16564 Intrinsic::x86_sse2_mfence);
16565 Builder.CreateCall(MFence);
16567 // FIXME: it might make sense to use a locked operation here but on a
16568 // different cache-line to prevent cache-line bouncing. In practice it
16569 // is probably a small win, and x86 processors without mfence are rare
16570 // enough that we do not bother.
16574 // Finally we can emit the atomic load.
16575 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
16576 AI->getType()->getPrimitiveSizeInBits());
16577 Loaded->setAtomic(Order, SynchScope);
16578 AI->replaceAllUsesWith(Loaded);
16579 AI->eraseFromParent();
16583 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
16584 SelectionDAG &DAG) {
16586 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
16587 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
16588 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
16589 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
16591 // The only fence that needs an instruction is a sequentially-consistent
16592 // cross-thread fence.
16593 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
16594 if (hasMFENCE(*Subtarget))
16595 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
16597 SDValue Chain = Op.getOperand(0);
16598 SDValue Zero = DAG.getConstant(0, MVT::i32);
16600 DAG.getRegister(X86::ESP, MVT::i32), // Base
16601 DAG.getTargetConstant(1, MVT::i8), // Scale
16602 DAG.getRegister(0, MVT::i32), // Index
16603 DAG.getTargetConstant(0, MVT::i32), // Disp
16604 DAG.getRegister(0, MVT::i32), // Segment.
16608 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
16609 return SDValue(Res, 0);
16612 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
16613 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
16616 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
16617 SelectionDAG &DAG) {
16618 MVT T = Op.getSimpleValueType();
16622 switch(T.SimpleTy) {
16623 default: llvm_unreachable("Invalid value type!");
16624 case MVT::i8: Reg = X86::AL; size = 1; break;
16625 case MVT::i16: Reg = X86::AX; size = 2; break;
16626 case MVT::i32: Reg = X86::EAX; size = 4; break;
16628 assert(Subtarget->is64Bit() && "Node not type legal!");
16629 Reg = X86::RAX; size = 8;
16632 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
16633 Op.getOperand(2), SDValue());
16634 SDValue Ops[] = { cpIn.getValue(0),
16637 DAG.getTargetConstant(size, MVT::i8),
16638 cpIn.getValue(1) };
16639 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
16640 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
16641 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
16645 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
16646 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
16647 MVT::i32, cpOut.getValue(2));
16648 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
16649 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
16651 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
16652 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
16653 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
16657 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
16658 SelectionDAG &DAG) {
16659 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
16660 MVT DstVT = Op.getSimpleValueType();
16662 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
16663 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
16664 if (DstVT != MVT::f64)
16665 // This conversion needs to be expanded.
16668 SDValue InVec = Op->getOperand(0);
16670 unsigned NumElts = SrcVT.getVectorNumElements();
16671 EVT SVT = SrcVT.getVectorElementType();
16673 // Widen the vector in input in the case of MVT::v2i32.
16674 // Example: from MVT::v2i32 to MVT::v4i32.
16675 SmallVector<SDValue, 16> Elts;
16676 for (unsigned i = 0, e = NumElts; i != e; ++i)
16677 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
16678 DAG.getIntPtrConstant(i)));
16680 // Explicitly mark the extra elements as Undef.
16681 Elts.append(NumElts, DAG.getUNDEF(SVT));
16683 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
16684 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
16685 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
16686 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
16687 DAG.getIntPtrConstant(0));
16690 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
16691 Subtarget->hasMMX() && "Unexpected custom BITCAST");
16692 assert((DstVT == MVT::i64 ||
16693 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
16694 "Unexpected custom BITCAST");
16695 // i64 <=> MMX conversions are Legal.
16696 if (SrcVT==MVT::i64 && DstVT.isVector())
16698 if (DstVT==MVT::i64 && SrcVT.isVector())
16700 // MMX <=> MMX conversions are Legal.
16701 if (SrcVT.isVector() && DstVT.isVector())
16703 // All other conversions need to be expanded.
16707 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
16708 SelectionDAG &DAG) {
16709 SDNode *Node = Op.getNode();
16712 Op = Op.getOperand(0);
16713 EVT VT = Op.getValueType();
16714 assert((VT.is128BitVector() || VT.is256BitVector()) &&
16715 "CTPOP lowering only implemented for 128/256-bit wide vector types");
16717 unsigned NumElts = VT.getVectorNumElements();
16718 EVT EltVT = VT.getVectorElementType();
16719 unsigned Len = EltVT.getSizeInBits();
16721 // This is the vectorized version of the "best" algorithm from
16722 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
16723 // with a minor tweak to use a series of adds + shifts instead of vector
16724 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
16726 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
16727 // v8i32 => Always profitable
16729 // FIXME: There a couple of possible improvements:
16731 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
16732 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
16734 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
16735 "CTPOP not implemented for this vector element type.");
16737 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
16738 // extra legalization.
16739 bool NeedsBitcast = EltVT == MVT::i32;
16740 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
16742 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
16743 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
16744 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
16746 // v = v - ((v >> 1) & 0x55555555...)
16747 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
16748 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
16749 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
16751 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
16753 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
16754 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
16756 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
16758 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
16759 if (VT != And.getValueType())
16760 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
16761 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
16763 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
16764 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
16765 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
16766 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
16767 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
16769 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
16770 if (NeedsBitcast) {
16771 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
16772 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
16773 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
16776 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
16777 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
16778 if (VT != AndRHS.getValueType()) {
16779 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
16780 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
16782 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
16784 // v = (v + (v >> 4)) & 0x0F0F0F0F...
16785 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
16786 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
16787 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
16788 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
16790 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
16791 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
16792 if (NeedsBitcast) {
16793 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
16794 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
16796 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
16797 if (VT != And.getValueType())
16798 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
16800 // The algorithm mentioned above uses:
16801 // v = (v * 0x01010101...) >> (Len - 8)
16803 // Change it to use vector adds + vector shifts which yield faster results on
16804 // Haswell than using vector integer multiplication.
16806 // For i32 elements:
16807 // v = v + (v >> 8)
16808 // v = v + (v >> 16)
16810 // For i64 elements:
16811 // v = v + (v >> 8)
16812 // v = v + (v >> 16)
16813 // v = v + (v >> 32)
16816 SmallVector<SDValue, 8> Csts;
16817 for (unsigned i = 8; i <= Len/2; i *= 2) {
16818 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
16819 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
16820 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
16821 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
16825 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
16826 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
16827 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
16828 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
16829 if (NeedsBitcast) {
16830 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
16831 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
16833 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
16834 if (VT != And.getValueType())
16835 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
16840 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
16841 SDNode *Node = Op.getNode();
16843 EVT T = Node->getValueType(0);
16844 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
16845 DAG.getConstant(0, T), Node->getOperand(2));
16846 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
16847 cast<AtomicSDNode>(Node)->getMemoryVT(),
16848 Node->getOperand(0),
16849 Node->getOperand(1), negOp,
16850 cast<AtomicSDNode>(Node)->getMemOperand(),
16851 cast<AtomicSDNode>(Node)->getOrdering(),
16852 cast<AtomicSDNode>(Node)->getSynchScope());
16855 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
16856 SDNode *Node = Op.getNode();
16858 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
16860 // Convert seq_cst store -> xchg
16861 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
16862 // FIXME: On 32-bit, store -> fist or movq would be more efficient
16863 // (The only way to get a 16-byte store is cmpxchg16b)
16864 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
16865 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
16866 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
16867 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
16868 cast<AtomicSDNode>(Node)->getMemoryVT(),
16869 Node->getOperand(0),
16870 Node->getOperand(1), Node->getOperand(2),
16871 cast<AtomicSDNode>(Node)->getMemOperand(),
16872 cast<AtomicSDNode>(Node)->getOrdering(),
16873 cast<AtomicSDNode>(Node)->getSynchScope());
16874 return Swap.getValue(1);
16876 // Other atomic stores have a simple pattern.
16880 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
16881 EVT VT = Op.getNode()->getSimpleValueType(0);
16883 // Let legalize expand this if it isn't a legal type yet.
16884 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
16887 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
16890 bool ExtraOp = false;
16891 switch (Op.getOpcode()) {
16892 default: llvm_unreachable("Invalid code");
16893 case ISD::ADDC: Opc = X86ISD::ADD; break;
16894 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
16895 case ISD::SUBC: Opc = X86ISD::SUB; break;
16896 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
16900 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
16902 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
16903 Op.getOperand(1), Op.getOperand(2));
16906 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
16907 SelectionDAG &DAG) {
16908 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
16910 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
16911 // which returns the values as { float, float } (in XMM0) or
16912 // { double, double } (which is returned in XMM0, XMM1).
16914 SDValue Arg = Op.getOperand(0);
16915 EVT ArgVT = Arg.getValueType();
16916 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16918 TargetLowering::ArgListTy Args;
16919 TargetLowering::ArgListEntry Entry;
16923 Entry.isSExt = false;
16924 Entry.isZExt = false;
16925 Args.push_back(Entry);
16927 bool isF64 = ArgVT == MVT::f64;
16928 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
16929 // the small struct {f32, f32} is returned in (eax, edx). For f64,
16930 // the results are returned via SRet in memory.
16931 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
16932 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16933 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
16935 Type *RetTy = isF64
16936 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
16937 : (Type*)VectorType::get(ArgTy, 4);
16939 TargetLowering::CallLoweringInfo CLI(DAG);
16940 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
16941 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
16943 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
16946 // Returned in xmm0 and xmm1.
16947 return CallResult.first;
16949 // Returned in bits 0:31 and 32:64 xmm0.
16950 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
16951 CallResult.first, DAG.getIntPtrConstant(0));
16952 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
16953 CallResult.first, DAG.getIntPtrConstant(1));
16954 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
16955 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
16958 /// LowerOperation - Provide custom lowering hooks for some operations.
16960 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
16961 switch (Op.getOpcode()) {
16962 default: llvm_unreachable("Should not custom lower this!");
16963 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
16964 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
16965 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
16966 return LowerCMP_SWAP(Op, Subtarget, DAG);
16967 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
16968 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
16969 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
16970 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
16971 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
16972 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
16973 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
16974 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
16975 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
16976 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
16977 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
16978 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
16979 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
16980 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
16981 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
16982 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
16983 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
16984 case ISD::SHL_PARTS:
16985 case ISD::SRA_PARTS:
16986 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
16987 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
16988 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
16989 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
16990 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
16991 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
16992 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
16993 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
16994 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
16995 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
16996 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
16998 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
16999 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
17000 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
17001 case ISD::SETCC: return LowerSETCC(Op, DAG);
17002 case ISD::SELECT: return LowerSELECT(Op, DAG);
17003 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
17004 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
17005 case ISD::VASTART: return LowerVASTART(Op, DAG);
17006 case ISD::VAARG: return LowerVAARG(Op, DAG);
17007 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
17008 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
17009 case ISD::INTRINSIC_VOID:
17010 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
17011 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
17012 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
17013 case ISD::FRAME_TO_ARGS_OFFSET:
17014 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
17015 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
17016 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
17017 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
17018 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
17019 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
17020 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
17021 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
17022 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
17023 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
17024 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
17025 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
17026 case ISD::UMUL_LOHI:
17027 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
17030 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
17036 case ISD::UMULO: return LowerXALUO(Op, DAG);
17037 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
17038 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
17042 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
17043 case ISD::ADD: return LowerADD(Op, DAG);
17044 case ISD::SUB: return LowerSUB(Op, DAG);
17045 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
17049 /// ReplaceNodeResults - Replace a node with an illegal result type
17050 /// with a new node built out of custom code.
17051 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
17052 SmallVectorImpl<SDValue>&Results,
17053 SelectionDAG &DAG) const {
17055 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17056 switch (N->getOpcode()) {
17058 llvm_unreachable("Do not know how to custom type legalize this operation!");
17059 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
17060 case X86ISD::FMINC:
17062 case X86ISD::FMAXC:
17063 case X86ISD::FMAX: {
17064 EVT VT = N->getValueType(0);
17065 if (VT != MVT::v2f32)
17066 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
17067 SDValue UNDEF = DAG.getUNDEF(VT);
17068 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
17069 N->getOperand(0), UNDEF);
17070 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
17071 N->getOperand(1), UNDEF);
17072 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
17075 case ISD::SIGN_EXTEND_INREG:
17080 // We don't want to expand or promote these.
17087 case ISD::UDIVREM: {
17088 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
17089 Results.push_back(V);
17092 case ISD::FP_TO_SINT:
17093 case ISD::FP_TO_UINT: {
17094 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
17096 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
17099 std::pair<SDValue,SDValue> Vals =
17100 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
17101 SDValue FIST = Vals.first, StackSlot = Vals.second;
17102 if (FIST.getNode()) {
17103 EVT VT = N->getValueType(0);
17104 // Return a load from the stack slot.
17105 if (StackSlot.getNode())
17106 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
17107 MachinePointerInfo(),
17108 false, false, false, 0));
17110 Results.push_back(FIST);
17114 case ISD::UINT_TO_FP: {
17115 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
17116 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
17117 N->getValueType(0) != MVT::v2f32)
17119 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
17121 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
17123 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
17124 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
17125 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
17126 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
17127 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
17128 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
17131 case ISD::FP_ROUND: {
17132 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
17134 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
17135 Results.push_back(V);
17138 case ISD::INTRINSIC_W_CHAIN: {
17139 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
17141 default : llvm_unreachable("Do not know how to custom type "
17142 "legalize this intrinsic operation!");
17143 case Intrinsic::x86_rdtsc:
17144 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
17146 case Intrinsic::x86_rdtscp:
17147 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
17149 case Intrinsic::x86_rdpmc:
17150 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
17153 case ISD::READCYCLECOUNTER: {
17154 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
17157 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
17158 EVT T = N->getValueType(0);
17159 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
17160 bool Regs64bit = T == MVT::i128;
17161 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
17162 SDValue cpInL, cpInH;
17163 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
17164 DAG.getConstant(0, HalfT));
17165 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
17166 DAG.getConstant(1, HalfT));
17167 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
17168 Regs64bit ? X86::RAX : X86::EAX,
17170 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
17171 Regs64bit ? X86::RDX : X86::EDX,
17172 cpInH, cpInL.getValue(1));
17173 SDValue swapInL, swapInH;
17174 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
17175 DAG.getConstant(0, HalfT));
17176 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
17177 DAG.getConstant(1, HalfT));
17178 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
17179 Regs64bit ? X86::RBX : X86::EBX,
17180 swapInL, cpInH.getValue(1));
17181 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
17182 Regs64bit ? X86::RCX : X86::ECX,
17183 swapInH, swapInL.getValue(1));
17184 SDValue Ops[] = { swapInH.getValue(0),
17186 swapInH.getValue(1) };
17187 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17188 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
17189 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
17190 X86ISD::LCMPXCHG8_DAG;
17191 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
17192 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
17193 Regs64bit ? X86::RAX : X86::EAX,
17194 HalfT, Result.getValue(1));
17195 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
17196 Regs64bit ? X86::RDX : X86::EDX,
17197 HalfT, cpOutL.getValue(2));
17198 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
17200 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
17201 MVT::i32, cpOutH.getValue(2));
17203 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17204 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
17205 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
17207 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
17208 Results.push_back(Success);
17209 Results.push_back(EFLAGS.getValue(1));
17212 case ISD::ATOMIC_SWAP:
17213 case ISD::ATOMIC_LOAD_ADD:
17214 case ISD::ATOMIC_LOAD_SUB:
17215 case ISD::ATOMIC_LOAD_AND:
17216 case ISD::ATOMIC_LOAD_OR:
17217 case ISD::ATOMIC_LOAD_XOR:
17218 case ISD::ATOMIC_LOAD_NAND:
17219 case ISD::ATOMIC_LOAD_MIN:
17220 case ISD::ATOMIC_LOAD_MAX:
17221 case ISD::ATOMIC_LOAD_UMIN:
17222 case ISD::ATOMIC_LOAD_UMAX:
17223 case ISD::ATOMIC_LOAD: {
17224 // Delegate to generic TypeLegalization. Situations we can really handle
17225 // should have already been dealt with by AtomicExpandPass.cpp.
17228 case ISD::BITCAST: {
17229 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
17230 EVT DstVT = N->getValueType(0);
17231 EVT SrcVT = N->getOperand(0)->getValueType(0);
17233 if (SrcVT != MVT::f64 ||
17234 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
17237 unsigned NumElts = DstVT.getVectorNumElements();
17238 EVT SVT = DstVT.getVectorElementType();
17239 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
17240 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
17241 MVT::v2f64, N->getOperand(0));
17242 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
17244 if (ExperimentalVectorWideningLegalization) {
17245 // If we are legalizing vectors by widening, we already have the desired
17246 // legal vector type, just return it.
17247 Results.push_back(ToVecInt);
17251 SmallVector<SDValue, 8> Elts;
17252 for (unsigned i = 0, e = NumElts; i != e; ++i)
17253 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
17254 ToVecInt, DAG.getIntPtrConstant(i)));
17256 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
17261 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
17263 default: return nullptr;
17264 case X86ISD::BSF: return "X86ISD::BSF";
17265 case X86ISD::BSR: return "X86ISD::BSR";
17266 case X86ISD::SHLD: return "X86ISD::SHLD";
17267 case X86ISD::SHRD: return "X86ISD::SHRD";
17268 case X86ISD::FAND: return "X86ISD::FAND";
17269 case X86ISD::FANDN: return "X86ISD::FANDN";
17270 case X86ISD::FOR: return "X86ISD::FOR";
17271 case X86ISD::FXOR: return "X86ISD::FXOR";
17272 case X86ISD::FSRL: return "X86ISD::FSRL";
17273 case X86ISD::FILD: return "X86ISD::FILD";
17274 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
17275 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
17276 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
17277 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
17278 case X86ISD::FLD: return "X86ISD::FLD";
17279 case X86ISD::FST: return "X86ISD::FST";
17280 case X86ISD::CALL: return "X86ISD::CALL";
17281 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
17282 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
17283 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
17284 case X86ISD::BT: return "X86ISD::BT";
17285 case X86ISD::CMP: return "X86ISD::CMP";
17286 case X86ISD::COMI: return "X86ISD::COMI";
17287 case X86ISD::UCOMI: return "X86ISD::UCOMI";
17288 case X86ISD::CMPM: return "X86ISD::CMPM";
17289 case X86ISD::CMPMU: return "X86ISD::CMPMU";
17290 case X86ISD::SETCC: return "X86ISD::SETCC";
17291 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
17292 case X86ISD::FSETCC: return "X86ISD::FSETCC";
17293 case X86ISD::CMOV: return "X86ISD::CMOV";
17294 case X86ISD::BRCOND: return "X86ISD::BRCOND";
17295 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
17296 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
17297 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
17298 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
17299 case X86ISD::Wrapper: return "X86ISD::Wrapper";
17300 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
17301 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
17302 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
17303 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
17304 case X86ISD::PINSRB: return "X86ISD::PINSRB";
17305 case X86ISD::PINSRW: return "X86ISD::PINSRW";
17306 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
17307 case X86ISD::ANDNP: return "X86ISD::ANDNP";
17308 case X86ISD::PSIGN: return "X86ISD::PSIGN";
17309 case X86ISD::BLENDI: return "X86ISD::BLENDI";
17310 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
17311 case X86ISD::SUBUS: return "X86ISD::SUBUS";
17312 case X86ISD::HADD: return "X86ISD::HADD";
17313 case X86ISD::HSUB: return "X86ISD::HSUB";
17314 case X86ISD::FHADD: return "X86ISD::FHADD";
17315 case X86ISD::FHSUB: return "X86ISD::FHSUB";
17316 case X86ISD::UMAX: return "X86ISD::UMAX";
17317 case X86ISD::UMIN: return "X86ISD::UMIN";
17318 case X86ISD::SMAX: return "X86ISD::SMAX";
17319 case X86ISD::SMIN: return "X86ISD::SMIN";
17320 case X86ISD::FMAX: return "X86ISD::FMAX";
17321 case X86ISD::FMIN: return "X86ISD::FMIN";
17322 case X86ISD::FMAXC: return "X86ISD::FMAXC";
17323 case X86ISD::FMINC: return "X86ISD::FMINC";
17324 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
17325 case X86ISD::FRCP: return "X86ISD::FRCP";
17326 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
17327 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
17328 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
17329 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
17330 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
17331 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
17332 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
17333 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
17334 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
17335 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
17336 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
17337 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
17338 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
17339 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
17340 case X86ISD::VZEXT: return "X86ISD::VZEXT";
17341 case X86ISD::VSEXT: return "X86ISD::VSEXT";
17342 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
17343 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
17344 case X86ISD::VINSERT: return "X86ISD::VINSERT";
17345 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
17346 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
17347 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
17348 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
17349 case X86ISD::VSHL: return "X86ISD::VSHL";
17350 case X86ISD::VSRL: return "X86ISD::VSRL";
17351 case X86ISD::VSRA: return "X86ISD::VSRA";
17352 case X86ISD::VSHLI: return "X86ISD::VSHLI";
17353 case X86ISD::VSRLI: return "X86ISD::VSRLI";
17354 case X86ISD::VSRAI: return "X86ISD::VSRAI";
17355 case X86ISD::CMPP: return "X86ISD::CMPP";
17356 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
17357 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
17358 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
17359 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
17360 case X86ISD::ADD: return "X86ISD::ADD";
17361 case X86ISD::SUB: return "X86ISD::SUB";
17362 case X86ISD::ADC: return "X86ISD::ADC";
17363 case X86ISD::SBB: return "X86ISD::SBB";
17364 case X86ISD::SMUL: return "X86ISD::SMUL";
17365 case X86ISD::UMUL: return "X86ISD::UMUL";
17366 case X86ISD::SMUL8: return "X86ISD::SMUL8";
17367 case X86ISD::UMUL8: return "X86ISD::UMUL8";
17368 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
17369 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
17370 case X86ISD::INC: return "X86ISD::INC";
17371 case X86ISD::DEC: return "X86ISD::DEC";
17372 case X86ISD::OR: return "X86ISD::OR";
17373 case X86ISD::XOR: return "X86ISD::XOR";
17374 case X86ISD::AND: return "X86ISD::AND";
17375 case X86ISD::BEXTR: return "X86ISD::BEXTR";
17376 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
17377 case X86ISD::PTEST: return "X86ISD::PTEST";
17378 case X86ISD::TESTP: return "X86ISD::TESTP";
17379 case X86ISD::TESTM: return "X86ISD::TESTM";
17380 case X86ISD::TESTNM: return "X86ISD::TESTNM";
17381 case X86ISD::KORTEST: return "X86ISD::KORTEST";
17382 case X86ISD::PACKSS: return "X86ISD::PACKSS";
17383 case X86ISD::PACKUS: return "X86ISD::PACKUS";
17384 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
17385 case X86ISD::VALIGN: return "X86ISD::VALIGN";
17386 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
17387 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
17388 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
17389 case X86ISD::SHUFP: return "X86ISD::SHUFP";
17390 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
17391 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
17392 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
17393 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
17394 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
17395 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
17396 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
17397 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
17398 case X86ISD::MOVSD: return "X86ISD::MOVSD";
17399 case X86ISD::MOVSS: return "X86ISD::MOVSS";
17400 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
17401 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
17402 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
17403 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
17404 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
17405 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
17406 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
17407 case X86ISD::VPERMV: return "X86ISD::VPERMV";
17408 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
17409 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
17410 case X86ISD::VPERMI: return "X86ISD::VPERMI";
17411 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
17412 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
17413 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
17414 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
17415 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
17416 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
17417 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
17418 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
17419 case X86ISD::SAHF: return "X86ISD::SAHF";
17420 case X86ISD::RDRAND: return "X86ISD::RDRAND";
17421 case X86ISD::RDSEED: return "X86ISD::RDSEED";
17422 case X86ISD::FMADD: return "X86ISD::FMADD";
17423 case X86ISD::FMSUB: return "X86ISD::FMSUB";
17424 case X86ISD::FNMADD: return "X86ISD::FNMADD";
17425 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
17426 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
17427 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
17428 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
17429 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
17430 case X86ISD::XTEST: return "X86ISD::XTEST";
17431 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
17432 case X86ISD::EXPAND: return "X86ISD::EXPAND";
17433 case X86ISD::SELECT: return "X86ISD::SELECT";
17434 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
17435 case X86ISD::RCP28: return "X86ISD::RCP28";
17436 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
17437 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
17438 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
17439 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
17440 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
17444 // isLegalAddressingMode - Return true if the addressing mode represented
17445 // by AM is legal for this target, for a load/store of the specified type.
17446 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
17448 // X86 supports extremely general addressing modes.
17449 CodeModel::Model M = getTargetMachine().getCodeModel();
17450 Reloc::Model R = getTargetMachine().getRelocationModel();
17452 // X86 allows a sign-extended 32-bit immediate field as a displacement.
17453 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
17458 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
17460 // If a reference to this global requires an extra load, we can't fold it.
17461 if (isGlobalStubReference(GVFlags))
17464 // If BaseGV requires a register for the PIC base, we cannot also have a
17465 // BaseReg specified.
17466 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
17469 // If lower 4G is not available, then we must use rip-relative addressing.
17470 if ((M != CodeModel::Small || R != Reloc::Static) &&
17471 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
17475 switch (AM.Scale) {
17481 // These scales always work.
17486 // These scales are formed with basereg+scalereg. Only accept if there is
17491 default: // Other stuff never works.
17498 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
17499 unsigned Bits = Ty->getScalarSizeInBits();
17501 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
17502 // particularly cheaper than those without.
17506 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
17507 // variable shifts just as cheap as scalar ones.
17508 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
17511 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
17512 // fully general vector.
17516 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
17517 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
17519 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
17520 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
17521 return NumBits1 > NumBits2;
17524 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
17525 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
17528 if (!isTypeLegal(EVT::getEVT(Ty1)))
17531 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
17533 // Assuming the caller doesn't have a zeroext or signext return parameter,
17534 // truncation all the way down to i1 is valid.
17538 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
17539 return isInt<32>(Imm);
17542 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
17543 // Can also use sub to handle negated immediates.
17544 return isInt<32>(Imm);
17547 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
17548 if (!VT1.isInteger() || !VT2.isInteger())
17550 unsigned NumBits1 = VT1.getSizeInBits();
17551 unsigned NumBits2 = VT2.getSizeInBits();
17552 return NumBits1 > NumBits2;
17555 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
17556 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
17557 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
17560 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
17561 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
17562 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
17565 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
17566 EVT VT1 = Val.getValueType();
17567 if (isZExtFree(VT1, VT2))
17570 if (Val.getOpcode() != ISD::LOAD)
17573 if (!VT1.isSimple() || !VT1.isInteger() ||
17574 !VT2.isSimple() || !VT2.isInteger())
17577 switch (VT1.getSimpleVT().SimpleTy) {
17582 // X86 has 8, 16, and 32-bit zero-extending loads.
17589 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
17592 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
17593 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
17596 VT = VT.getScalarType();
17598 if (!VT.isSimple())
17601 switch (VT.getSimpleVT().SimpleTy) {
17612 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
17613 // i16 instructions are longer (0x66 prefix) and potentially slower.
17614 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
17617 /// isShuffleMaskLegal - Targets can use this to indicate that they only
17618 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
17619 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
17620 /// are assumed to be legal.
17622 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
17624 if (!VT.isSimple())
17627 // Very little shuffling can be done for 64-bit vectors right now.
17628 if (VT.getSizeInBits() == 64)
17631 // We only care that the types being shuffled are legal. The lowering can
17632 // handle any possible shuffle mask that results.
17633 return isTypeLegal(VT.getSimpleVT());
17637 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
17639 // Just delegate to the generic legality, clear masks aren't special.
17640 return isShuffleMaskLegal(Mask, VT);
17643 //===----------------------------------------------------------------------===//
17644 // X86 Scheduler Hooks
17645 //===----------------------------------------------------------------------===//
17647 /// Utility function to emit xbegin specifying the start of an RTM region.
17648 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
17649 const TargetInstrInfo *TII) {
17650 DebugLoc DL = MI->getDebugLoc();
17652 const BasicBlock *BB = MBB->getBasicBlock();
17653 MachineFunction::iterator I = MBB;
17656 // For the v = xbegin(), we generate
17667 MachineBasicBlock *thisMBB = MBB;
17668 MachineFunction *MF = MBB->getParent();
17669 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
17670 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
17671 MF->insert(I, mainMBB);
17672 MF->insert(I, sinkMBB);
17674 // Transfer the remainder of BB and its successor edges to sinkMBB.
17675 sinkMBB->splice(sinkMBB->begin(), MBB,
17676 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
17677 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
17681 // # fallthrough to mainMBB
17682 // # abortion to sinkMBB
17683 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
17684 thisMBB->addSuccessor(mainMBB);
17685 thisMBB->addSuccessor(sinkMBB);
17689 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
17690 mainMBB->addSuccessor(sinkMBB);
17693 // EAX is live into the sinkMBB
17694 sinkMBB->addLiveIn(X86::EAX);
17695 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
17696 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
17699 MI->eraseFromParent();
17703 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
17704 // or XMM0_V32I8 in AVX all of this code can be replaced with that
17705 // in the .td file.
17706 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
17707 const TargetInstrInfo *TII) {
17709 switch (MI->getOpcode()) {
17710 default: llvm_unreachable("illegal opcode!");
17711 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
17712 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
17713 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
17714 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
17715 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
17716 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
17717 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
17718 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
17721 DebugLoc dl = MI->getDebugLoc();
17722 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
17724 unsigned NumArgs = MI->getNumOperands();
17725 for (unsigned i = 1; i < NumArgs; ++i) {
17726 MachineOperand &Op = MI->getOperand(i);
17727 if (!(Op.isReg() && Op.isImplicit()))
17728 MIB.addOperand(Op);
17730 if (MI->hasOneMemOperand())
17731 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
17733 BuildMI(*BB, MI, dl,
17734 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
17735 .addReg(X86::XMM0);
17737 MI->eraseFromParent();
17741 // FIXME: Custom handling because TableGen doesn't support multiple implicit
17742 // defs in an instruction pattern
17743 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
17744 const TargetInstrInfo *TII) {
17746 switch (MI->getOpcode()) {
17747 default: llvm_unreachable("illegal opcode!");
17748 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
17749 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
17750 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
17751 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
17752 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
17753 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
17754 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
17755 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
17758 DebugLoc dl = MI->getDebugLoc();
17759 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
17761 unsigned NumArgs = MI->getNumOperands(); // remove the results
17762 for (unsigned i = 1; i < NumArgs; ++i) {
17763 MachineOperand &Op = MI->getOperand(i);
17764 if (!(Op.isReg() && Op.isImplicit()))
17765 MIB.addOperand(Op);
17767 if (MI->hasOneMemOperand())
17768 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
17770 BuildMI(*BB, MI, dl,
17771 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
17774 MI->eraseFromParent();
17778 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
17779 const X86Subtarget *Subtarget) {
17780 DebugLoc dl = MI->getDebugLoc();
17781 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
17782 // Address into RAX/EAX, other two args into ECX, EDX.
17783 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
17784 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
17785 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
17786 for (int i = 0; i < X86::AddrNumOperands; ++i)
17787 MIB.addOperand(MI->getOperand(i));
17789 unsigned ValOps = X86::AddrNumOperands;
17790 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
17791 .addReg(MI->getOperand(ValOps).getReg());
17792 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
17793 .addReg(MI->getOperand(ValOps+1).getReg());
17795 // The instruction doesn't actually take any operands though.
17796 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
17798 MI->eraseFromParent(); // The pseudo is gone now.
17802 MachineBasicBlock *
17803 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
17804 MachineBasicBlock *MBB) const {
17805 // Emit va_arg instruction on X86-64.
17807 // Operands to this pseudo-instruction:
17808 // 0 ) Output : destination address (reg)
17809 // 1-5) Input : va_list address (addr, i64mem)
17810 // 6 ) ArgSize : Size (in bytes) of vararg type
17811 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
17812 // 8 ) Align : Alignment of type
17813 // 9 ) EFLAGS (implicit-def)
17815 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
17816 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
17818 unsigned DestReg = MI->getOperand(0).getReg();
17819 MachineOperand &Base = MI->getOperand(1);
17820 MachineOperand &Scale = MI->getOperand(2);
17821 MachineOperand &Index = MI->getOperand(3);
17822 MachineOperand &Disp = MI->getOperand(4);
17823 MachineOperand &Segment = MI->getOperand(5);
17824 unsigned ArgSize = MI->getOperand(6).getImm();
17825 unsigned ArgMode = MI->getOperand(7).getImm();
17826 unsigned Align = MI->getOperand(8).getImm();
17828 // Memory Reference
17829 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
17830 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
17831 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
17833 // Machine Information
17834 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
17835 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
17836 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
17837 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
17838 DebugLoc DL = MI->getDebugLoc();
17840 // struct va_list {
17843 // i64 overflow_area (address)
17844 // i64 reg_save_area (address)
17846 // sizeof(va_list) = 24
17847 // alignment(va_list) = 8
17849 unsigned TotalNumIntRegs = 6;
17850 unsigned TotalNumXMMRegs = 8;
17851 bool UseGPOffset = (ArgMode == 1);
17852 bool UseFPOffset = (ArgMode == 2);
17853 unsigned MaxOffset = TotalNumIntRegs * 8 +
17854 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
17856 /* Align ArgSize to a multiple of 8 */
17857 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
17858 bool NeedsAlign = (Align > 8);
17860 MachineBasicBlock *thisMBB = MBB;
17861 MachineBasicBlock *overflowMBB;
17862 MachineBasicBlock *offsetMBB;
17863 MachineBasicBlock *endMBB;
17865 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
17866 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
17867 unsigned OffsetReg = 0;
17869 if (!UseGPOffset && !UseFPOffset) {
17870 // If we only pull from the overflow region, we don't create a branch.
17871 // We don't need to alter control flow.
17872 OffsetDestReg = 0; // unused
17873 OverflowDestReg = DestReg;
17875 offsetMBB = nullptr;
17876 overflowMBB = thisMBB;
17879 // First emit code to check if gp_offset (or fp_offset) is below the bound.
17880 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
17881 // If not, pull from overflow_area. (branch to overflowMBB)
17886 // offsetMBB overflowMBB
17891 // Registers for the PHI in endMBB
17892 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
17893 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
17895 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
17896 MachineFunction *MF = MBB->getParent();
17897 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
17898 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
17899 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
17901 MachineFunction::iterator MBBIter = MBB;
17904 // Insert the new basic blocks
17905 MF->insert(MBBIter, offsetMBB);
17906 MF->insert(MBBIter, overflowMBB);
17907 MF->insert(MBBIter, endMBB);
17909 // Transfer the remainder of MBB and its successor edges to endMBB.
17910 endMBB->splice(endMBB->begin(), thisMBB,
17911 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
17912 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
17914 // Make offsetMBB and overflowMBB successors of thisMBB
17915 thisMBB->addSuccessor(offsetMBB);
17916 thisMBB->addSuccessor(overflowMBB);
17918 // endMBB is a successor of both offsetMBB and overflowMBB
17919 offsetMBB->addSuccessor(endMBB);
17920 overflowMBB->addSuccessor(endMBB);
17922 // Load the offset value into a register
17923 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
17924 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
17928 .addDisp(Disp, UseFPOffset ? 4 : 0)
17929 .addOperand(Segment)
17930 .setMemRefs(MMOBegin, MMOEnd);
17932 // Check if there is enough room left to pull this argument.
17933 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
17935 .addImm(MaxOffset + 8 - ArgSizeA8);
17937 // Branch to "overflowMBB" if offset >= max
17938 // Fall through to "offsetMBB" otherwise
17939 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
17940 .addMBB(overflowMBB);
17943 // In offsetMBB, emit code to use the reg_save_area.
17945 assert(OffsetReg != 0);
17947 // Read the reg_save_area address.
17948 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
17949 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
17954 .addOperand(Segment)
17955 .setMemRefs(MMOBegin, MMOEnd);
17957 // Zero-extend the offset
17958 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
17959 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
17962 .addImm(X86::sub_32bit);
17964 // Add the offset to the reg_save_area to get the final address.
17965 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
17966 .addReg(OffsetReg64)
17967 .addReg(RegSaveReg);
17969 // Compute the offset for the next argument
17970 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
17971 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
17973 .addImm(UseFPOffset ? 16 : 8);
17975 // Store it back into the va_list.
17976 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
17980 .addDisp(Disp, UseFPOffset ? 4 : 0)
17981 .addOperand(Segment)
17982 .addReg(NextOffsetReg)
17983 .setMemRefs(MMOBegin, MMOEnd);
17986 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
17991 // Emit code to use overflow area
17994 // Load the overflow_area address into a register.
17995 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
17996 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
18001 .addOperand(Segment)
18002 .setMemRefs(MMOBegin, MMOEnd);
18004 // If we need to align it, do so. Otherwise, just copy the address
18005 // to OverflowDestReg.
18007 // Align the overflow address
18008 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
18009 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
18011 // aligned_addr = (addr + (align-1)) & ~(align-1)
18012 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
18013 .addReg(OverflowAddrReg)
18016 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
18018 .addImm(~(uint64_t)(Align-1));
18020 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
18021 .addReg(OverflowAddrReg);
18024 // Compute the next overflow address after this argument.
18025 // (the overflow address should be kept 8-byte aligned)
18026 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
18027 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
18028 .addReg(OverflowDestReg)
18029 .addImm(ArgSizeA8);
18031 // Store the new overflow address.
18032 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
18037 .addOperand(Segment)
18038 .addReg(NextAddrReg)
18039 .setMemRefs(MMOBegin, MMOEnd);
18041 // If we branched, emit the PHI to the front of endMBB.
18043 BuildMI(*endMBB, endMBB->begin(), DL,
18044 TII->get(X86::PHI), DestReg)
18045 .addReg(OffsetDestReg).addMBB(offsetMBB)
18046 .addReg(OverflowDestReg).addMBB(overflowMBB);
18049 // Erase the pseudo instruction
18050 MI->eraseFromParent();
18055 MachineBasicBlock *
18056 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
18058 MachineBasicBlock *MBB) const {
18059 // Emit code to save XMM registers to the stack. The ABI says that the
18060 // number of registers to save is given in %al, so it's theoretically
18061 // possible to do an indirect jump trick to avoid saving all of them,
18062 // however this code takes a simpler approach and just executes all
18063 // of the stores if %al is non-zero. It's less code, and it's probably
18064 // easier on the hardware branch predictor, and stores aren't all that
18065 // expensive anyway.
18067 // Create the new basic blocks. One block contains all the XMM stores,
18068 // and one block is the final destination regardless of whether any
18069 // stores were performed.
18070 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
18071 MachineFunction *F = MBB->getParent();
18072 MachineFunction::iterator MBBIter = MBB;
18074 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
18075 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
18076 F->insert(MBBIter, XMMSaveMBB);
18077 F->insert(MBBIter, EndMBB);
18079 // Transfer the remainder of MBB and its successor edges to EndMBB.
18080 EndMBB->splice(EndMBB->begin(), MBB,
18081 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
18082 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
18084 // The original block will now fall through to the XMM save block.
18085 MBB->addSuccessor(XMMSaveMBB);
18086 // The XMMSaveMBB will fall through to the end block.
18087 XMMSaveMBB->addSuccessor(EndMBB);
18089 // Now add the instructions.
18090 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18091 DebugLoc DL = MI->getDebugLoc();
18093 unsigned CountReg = MI->getOperand(0).getReg();
18094 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
18095 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
18097 if (!Subtarget->isTargetWin64()) {
18098 // If %al is 0, branch around the XMM save block.
18099 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
18100 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
18101 MBB->addSuccessor(EndMBB);
18104 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
18105 // that was just emitted, but clearly shouldn't be "saved".
18106 assert((MI->getNumOperands() <= 3 ||
18107 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
18108 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
18109 && "Expected last argument to be EFLAGS");
18110 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
18111 // In the XMM save block, save all the XMM argument registers.
18112 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
18113 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
18114 MachineMemOperand *MMO =
18115 F->getMachineMemOperand(
18116 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
18117 MachineMemOperand::MOStore,
18118 /*Size=*/16, /*Align=*/16);
18119 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
18120 .addFrameIndex(RegSaveFrameIndex)
18121 .addImm(/*Scale=*/1)
18122 .addReg(/*IndexReg=*/0)
18123 .addImm(/*Disp=*/Offset)
18124 .addReg(/*Segment=*/0)
18125 .addReg(MI->getOperand(i).getReg())
18126 .addMemOperand(MMO);
18129 MI->eraseFromParent(); // The pseudo instruction is gone now.
18134 // The EFLAGS operand of SelectItr might be missing a kill marker
18135 // because there were multiple uses of EFLAGS, and ISel didn't know
18136 // which to mark. Figure out whether SelectItr should have had a
18137 // kill marker, and set it if it should. Returns the correct kill
18139 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
18140 MachineBasicBlock* BB,
18141 const TargetRegisterInfo* TRI) {
18142 // Scan forward through BB for a use/def of EFLAGS.
18143 MachineBasicBlock::iterator miI(std::next(SelectItr));
18144 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
18145 const MachineInstr& mi = *miI;
18146 if (mi.readsRegister(X86::EFLAGS))
18148 if (mi.definesRegister(X86::EFLAGS))
18149 break; // Should have kill-flag - update below.
18152 // If we hit the end of the block, check whether EFLAGS is live into a
18154 if (miI == BB->end()) {
18155 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
18156 sEnd = BB->succ_end();
18157 sItr != sEnd; ++sItr) {
18158 MachineBasicBlock* succ = *sItr;
18159 if (succ->isLiveIn(X86::EFLAGS))
18164 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
18165 // out. SelectMI should have a kill flag on EFLAGS.
18166 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
18170 MachineBasicBlock *
18171 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
18172 MachineBasicBlock *BB) const {
18173 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18174 DebugLoc DL = MI->getDebugLoc();
18176 // To "insert" a SELECT_CC instruction, we actually have to insert the
18177 // diamond control-flow pattern. The incoming instruction knows the
18178 // destination vreg to set, the condition code register to branch on, the
18179 // true/false values to select between, and a branch opcode to use.
18180 const BasicBlock *LLVM_BB = BB->getBasicBlock();
18181 MachineFunction::iterator It = BB;
18187 // cmpTY ccX, r1, r2
18189 // fallthrough --> copy0MBB
18190 MachineBasicBlock *thisMBB = BB;
18191 MachineFunction *F = BB->getParent();
18192 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
18193 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
18194 F->insert(It, copy0MBB);
18195 F->insert(It, sinkMBB);
18197 // If the EFLAGS register isn't dead in the terminator, then claim that it's
18198 // live into the sink and copy blocks.
18199 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18200 if (!MI->killsRegister(X86::EFLAGS) &&
18201 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
18202 copy0MBB->addLiveIn(X86::EFLAGS);
18203 sinkMBB->addLiveIn(X86::EFLAGS);
18206 // Transfer the remainder of BB and its successor edges to sinkMBB.
18207 sinkMBB->splice(sinkMBB->begin(), BB,
18208 std::next(MachineBasicBlock::iterator(MI)), BB->end());
18209 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
18211 // Add the true and fallthrough blocks as its successors.
18212 BB->addSuccessor(copy0MBB);
18213 BB->addSuccessor(sinkMBB);
18215 // Create the conditional branch instruction.
18217 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
18218 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
18221 // %FalseValue = ...
18222 // # fallthrough to sinkMBB
18223 copy0MBB->addSuccessor(sinkMBB);
18226 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
18228 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
18229 TII->get(X86::PHI), MI->getOperand(0).getReg())
18230 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
18231 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
18233 MI->eraseFromParent(); // The pseudo instruction is gone now.
18237 MachineBasicBlock *
18238 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
18239 MachineBasicBlock *BB) const {
18240 MachineFunction *MF = BB->getParent();
18241 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18242 DebugLoc DL = MI->getDebugLoc();
18243 const BasicBlock *LLVM_BB = BB->getBasicBlock();
18245 assert(MF->shouldSplitStack());
18247 const bool Is64Bit = Subtarget->is64Bit();
18248 const bool IsLP64 = Subtarget->isTarget64BitLP64();
18250 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
18251 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
18254 // ... [Till the alloca]
18255 // If stacklet is not large enough, jump to mallocMBB
18258 // Allocate by subtracting from RSP
18259 // Jump to continueMBB
18262 // Allocate by call to runtime
18266 // [rest of original BB]
18269 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
18270 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
18271 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
18273 MachineRegisterInfo &MRI = MF->getRegInfo();
18274 const TargetRegisterClass *AddrRegClass =
18275 getRegClassFor(getPointerTy());
18277 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
18278 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
18279 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
18280 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
18281 sizeVReg = MI->getOperand(1).getReg(),
18282 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
18284 MachineFunction::iterator MBBIter = BB;
18287 MF->insert(MBBIter, bumpMBB);
18288 MF->insert(MBBIter, mallocMBB);
18289 MF->insert(MBBIter, continueMBB);
18291 continueMBB->splice(continueMBB->begin(), BB,
18292 std::next(MachineBasicBlock::iterator(MI)), BB->end());
18293 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
18295 // Add code to the main basic block to check if the stack limit has been hit,
18296 // and if so, jump to mallocMBB otherwise to bumpMBB.
18297 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
18298 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
18299 .addReg(tmpSPVReg).addReg(sizeVReg);
18300 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
18301 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
18302 .addReg(SPLimitVReg);
18303 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
18305 // bumpMBB simply decreases the stack pointer, since we know the current
18306 // stacklet has enough space.
18307 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
18308 .addReg(SPLimitVReg);
18309 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
18310 .addReg(SPLimitVReg);
18311 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
18313 // Calls into a routine in libgcc to allocate more space from the heap.
18314 const uint32_t *RegMask =
18315 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
18317 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
18319 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
18320 .addExternalSymbol("__morestack_allocate_stack_space")
18321 .addRegMask(RegMask)
18322 .addReg(X86::RDI, RegState::Implicit)
18323 .addReg(X86::RAX, RegState::ImplicitDefine);
18324 } else if (Is64Bit) {
18325 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
18327 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
18328 .addExternalSymbol("__morestack_allocate_stack_space")
18329 .addRegMask(RegMask)
18330 .addReg(X86::EDI, RegState::Implicit)
18331 .addReg(X86::EAX, RegState::ImplicitDefine);
18333 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
18335 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
18336 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
18337 .addExternalSymbol("__morestack_allocate_stack_space")
18338 .addRegMask(RegMask)
18339 .addReg(X86::EAX, RegState::ImplicitDefine);
18343 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
18346 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
18347 .addReg(IsLP64 ? X86::RAX : X86::EAX);
18348 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
18350 // Set up the CFG correctly.
18351 BB->addSuccessor(bumpMBB);
18352 BB->addSuccessor(mallocMBB);
18353 mallocMBB->addSuccessor(continueMBB);
18354 bumpMBB->addSuccessor(continueMBB);
18356 // Take care of the PHI nodes.
18357 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
18358 MI->getOperand(0).getReg())
18359 .addReg(mallocPtrVReg).addMBB(mallocMBB)
18360 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
18362 // Delete the original pseudo instruction.
18363 MI->eraseFromParent();
18366 return continueMBB;
18369 MachineBasicBlock *
18370 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
18371 MachineBasicBlock *BB) const {
18372 DebugLoc DL = MI->getDebugLoc();
18374 assert(!Subtarget->isTargetMachO());
18376 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
18378 MI->eraseFromParent(); // The pseudo instruction is gone now.
18382 MachineBasicBlock *
18383 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
18384 MachineBasicBlock *BB) const {
18385 // This is pretty easy. We're taking the value that we received from
18386 // our load from the relocation, sticking it in either RDI (x86-64)
18387 // or EAX and doing an indirect call. The return value will then
18388 // be in the normal return register.
18389 MachineFunction *F = BB->getParent();
18390 const X86InstrInfo *TII = Subtarget->getInstrInfo();
18391 DebugLoc DL = MI->getDebugLoc();
18393 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
18394 assert(MI->getOperand(3).isGlobal() && "This should be a global");
18396 // Get a register mask for the lowered call.
18397 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
18398 // proper register mask.
18399 const uint32_t *RegMask =
18400 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
18401 if (Subtarget->is64Bit()) {
18402 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
18403 TII->get(X86::MOV64rm), X86::RDI)
18405 .addImm(0).addReg(0)
18406 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
18407 MI->getOperand(3).getTargetFlags())
18409 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
18410 addDirectMem(MIB, X86::RDI);
18411 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
18412 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
18413 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
18414 TII->get(X86::MOV32rm), X86::EAX)
18416 .addImm(0).addReg(0)
18417 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
18418 MI->getOperand(3).getTargetFlags())
18420 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
18421 addDirectMem(MIB, X86::EAX);
18422 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
18424 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
18425 TII->get(X86::MOV32rm), X86::EAX)
18426 .addReg(TII->getGlobalBaseReg(F))
18427 .addImm(0).addReg(0)
18428 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
18429 MI->getOperand(3).getTargetFlags())
18431 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
18432 addDirectMem(MIB, X86::EAX);
18433 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
18436 MI->eraseFromParent(); // The pseudo instruction is gone now.
18440 MachineBasicBlock *
18441 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
18442 MachineBasicBlock *MBB) const {
18443 DebugLoc DL = MI->getDebugLoc();
18444 MachineFunction *MF = MBB->getParent();
18445 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18446 MachineRegisterInfo &MRI = MF->getRegInfo();
18448 const BasicBlock *BB = MBB->getBasicBlock();
18449 MachineFunction::iterator I = MBB;
18452 // Memory Reference
18453 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
18454 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
18457 unsigned MemOpndSlot = 0;
18459 unsigned CurOp = 0;
18461 DstReg = MI->getOperand(CurOp++).getReg();
18462 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
18463 assert(RC->hasType(MVT::i32) && "Invalid destination!");
18464 unsigned mainDstReg = MRI.createVirtualRegister(RC);
18465 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
18467 MemOpndSlot = CurOp;
18469 MVT PVT = getPointerTy();
18470 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
18471 "Invalid Pointer Size!");
18473 // For v = setjmp(buf), we generate
18476 // buf[LabelOffset] = restoreMBB
18477 // SjLjSetup restoreMBB
18483 // v = phi(main, restore)
18486 // if base pointer being used, load it from frame
18489 MachineBasicBlock *thisMBB = MBB;
18490 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
18491 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
18492 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
18493 MF->insert(I, mainMBB);
18494 MF->insert(I, sinkMBB);
18495 MF->push_back(restoreMBB);
18497 MachineInstrBuilder MIB;
18499 // Transfer the remainder of BB and its successor edges to sinkMBB.
18500 sinkMBB->splice(sinkMBB->begin(), MBB,
18501 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
18502 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
18505 unsigned PtrStoreOpc = 0;
18506 unsigned LabelReg = 0;
18507 const int64_t LabelOffset = 1 * PVT.getStoreSize();
18508 Reloc::Model RM = MF->getTarget().getRelocationModel();
18509 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
18510 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
18512 // Prepare IP either in reg or imm.
18513 if (!UseImmLabel) {
18514 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
18515 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
18516 LabelReg = MRI.createVirtualRegister(PtrRC);
18517 if (Subtarget->is64Bit()) {
18518 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
18522 .addMBB(restoreMBB)
18525 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
18526 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
18527 .addReg(XII->getGlobalBaseReg(MF))
18530 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
18534 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
18536 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
18537 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
18538 if (i == X86::AddrDisp)
18539 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
18541 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
18544 MIB.addReg(LabelReg);
18546 MIB.addMBB(restoreMBB);
18547 MIB.setMemRefs(MMOBegin, MMOEnd);
18549 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
18550 .addMBB(restoreMBB);
18552 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18553 MIB.addRegMask(RegInfo->getNoPreservedMask());
18554 thisMBB->addSuccessor(mainMBB);
18555 thisMBB->addSuccessor(restoreMBB);
18559 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
18560 mainMBB->addSuccessor(sinkMBB);
18563 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
18564 TII->get(X86::PHI), DstReg)
18565 .addReg(mainDstReg).addMBB(mainMBB)
18566 .addReg(restoreDstReg).addMBB(restoreMBB);
18569 if (RegInfo->hasBasePointer(*MF)) {
18570 const bool Uses64BitFramePtr =
18571 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
18572 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
18573 X86FI->setRestoreBasePointer(MF);
18574 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
18575 unsigned BasePtr = RegInfo->getBaseRegister();
18576 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
18577 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
18578 FramePtr, true, X86FI->getRestoreBasePointerOffset())
18579 .setMIFlag(MachineInstr::FrameSetup);
18581 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
18582 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
18583 restoreMBB->addSuccessor(sinkMBB);
18585 MI->eraseFromParent();
18589 MachineBasicBlock *
18590 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
18591 MachineBasicBlock *MBB) const {
18592 DebugLoc DL = MI->getDebugLoc();
18593 MachineFunction *MF = MBB->getParent();
18594 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18595 MachineRegisterInfo &MRI = MF->getRegInfo();
18597 // Memory Reference
18598 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
18599 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
18601 MVT PVT = getPointerTy();
18602 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
18603 "Invalid Pointer Size!");
18605 const TargetRegisterClass *RC =
18606 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
18607 unsigned Tmp = MRI.createVirtualRegister(RC);
18608 // Since FP is only updated here but NOT referenced, it's treated as GPR.
18609 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18610 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
18611 unsigned SP = RegInfo->getStackRegister();
18613 MachineInstrBuilder MIB;
18615 const int64_t LabelOffset = 1 * PVT.getStoreSize();
18616 const int64_t SPOffset = 2 * PVT.getStoreSize();
18618 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
18619 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
18622 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
18623 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
18624 MIB.addOperand(MI->getOperand(i));
18625 MIB.setMemRefs(MMOBegin, MMOEnd);
18627 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
18628 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
18629 if (i == X86::AddrDisp)
18630 MIB.addDisp(MI->getOperand(i), LabelOffset);
18632 MIB.addOperand(MI->getOperand(i));
18634 MIB.setMemRefs(MMOBegin, MMOEnd);
18636 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
18637 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
18638 if (i == X86::AddrDisp)
18639 MIB.addDisp(MI->getOperand(i), SPOffset);
18641 MIB.addOperand(MI->getOperand(i));
18643 MIB.setMemRefs(MMOBegin, MMOEnd);
18645 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
18647 MI->eraseFromParent();
18651 // Replace 213-type (isel default) FMA3 instructions with 231-type for
18652 // accumulator loops. Writing back to the accumulator allows the coalescer
18653 // to remove extra copies in the loop.
18654 MachineBasicBlock *
18655 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
18656 MachineBasicBlock *MBB) const {
18657 MachineOperand &AddendOp = MI->getOperand(3);
18659 // Bail out early if the addend isn't a register - we can't switch these.
18660 if (!AddendOp.isReg())
18663 MachineFunction &MF = *MBB->getParent();
18664 MachineRegisterInfo &MRI = MF.getRegInfo();
18666 // Check whether the addend is defined by a PHI:
18667 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
18668 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
18669 if (!AddendDef.isPHI())
18672 // Look for the following pattern:
18674 // %addend = phi [%entry, 0], [%loop, %result]
18676 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
18680 // %addend = phi [%entry, 0], [%loop, %result]
18682 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
18684 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
18685 assert(AddendDef.getOperand(i).isReg());
18686 MachineOperand PHISrcOp = AddendDef.getOperand(i);
18687 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
18688 if (&PHISrcInst == MI) {
18689 // Found a matching instruction.
18690 unsigned NewFMAOpc = 0;
18691 switch (MI->getOpcode()) {
18692 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
18693 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
18694 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
18695 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
18696 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
18697 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
18698 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
18699 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
18700 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
18701 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
18702 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
18703 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
18704 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
18705 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
18706 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
18707 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
18708 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
18709 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
18710 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
18711 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
18713 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
18714 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
18715 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
18716 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
18717 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
18718 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
18719 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
18720 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
18721 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
18722 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
18723 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
18724 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
18725 default: llvm_unreachable("Unrecognized FMA variant.");
18728 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
18729 MachineInstrBuilder MIB =
18730 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
18731 .addOperand(MI->getOperand(0))
18732 .addOperand(MI->getOperand(3))
18733 .addOperand(MI->getOperand(2))
18734 .addOperand(MI->getOperand(1));
18735 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
18736 MI->eraseFromParent();
18743 MachineBasicBlock *
18744 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
18745 MachineBasicBlock *BB) const {
18746 switch (MI->getOpcode()) {
18747 default: llvm_unreachable("Unexpected instr type to insert");
18748 case X86::TAILJMPd64:
18749 case X86::TAILJMPr64:
18750 case X86::TAILJMPm64:
18751 case X86::TAILJMPd64_REX:
18752 case X86::TAILJMPr64_REX:
18753 case X86::TAILJMPm64_REX:
18754 llvm_unreachable("TAILJMP64 would not be touched here.");
18755 case X86::TCRETURNdi64:
18756 case X86::TCRETURNri64:
18757 case X86::TCRETURNmi64:
18759 case X86::WIN_ALLOCA:
18760 return EmitLoweredWinAlloca(MI, BB);
18761 case X86::SEG_ALLOCA_32:
18762 case X86::SEG_ALLOCA_64:
18763 return EmitLoweredSegAlloca(MI, BB);
18764 case X86::TLSCall_32:
18765 case X86::TLSCall_64:
18766 return EmitLoweredTLSCall(MI, BB);
18767 case X86::CMOV_GR8:
18768 case X86::CMOV_FR32:
18769 case X86::CMOV_FR64:
18770 case X86::CMOV_V4F32:
18771 case X86::CMOV_V2F64:
18772 case X86::CMOV_V2I64:
18773 case X86::CMOV_V8F32:
18774 case X86::CMOV_V4F64:
18775 case X86::CMOV_V4I64:
18776 case X86::CMOV_V16F32:
18777 case X86::CMOV_V8F64:
18778 case X86::CMOV_V8I64:
18779 case X86::CMOV_GR16:
18780 case X86::CMOV_GR32:
18781 case X86::CMOV_RFP32:
18782 case X86::CMOV_RFP64:
18783 case X86::CMOV_RFP80:
18784 return EmitLoweredSelect(MI, BB);
18786 case X86::FP32_TO_INT16_IN_MEM:
18787 case X86::FP32_TO_INT32_IN_MEM:
18788 case X86::FP32_TO_INT64_IN_MEM:
18789 case X86::FP64_TO_INT16_IN_MEM:
18790 case X86::FP64_TO_INT32_IN_MEM:
18791 case X86::FP64_TO_INT64_IN_MEM:
18792 case X86::FP80_TO_INT16_IN_MEM:
18793 case X86::FP80_TO_INT32_IN_MEM:
18794 case X86::FP80_TO_INT64_IN_MEM: {
18795 MachineFunction *F = BB->getParent();
18796 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18797 DebugLoc DL = MI->getDebugLoc();
18799 // Change the floating point control register to use "round towards zero"
18800 // mode when truncating to an integer value.
18801 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
18802 addFrameReference(BuildMI(*BB, MI, DL,
18803 TII->get(X86::FNSTCW16m)), CWFrameIdx);
18805 // Load the old value of the high byte of the control word...
18807 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
18808 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
18811 // Set the high part to be round to zero...
18812 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
18815 // Reload the modified control word now...
18816 addFrameReference(BuildMI(*BB, MI, DL,
18817 TII->get(X86::FLDCW16m)), CWFrameIdx);
18819 // Restore the memory image of control word to original value
18820 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
18823 // Get the X86 opcode to use.
18825 switch (MI->getOpcode()) {
18826 default: llvm_unreachable("illegal opcode!");
18827 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
18828 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
18829 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
18830 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
18831 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
18832 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
18833 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
18834 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
18835 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
18839 MachineOperand &Op = MI->getOperand(0);
18841 AM.BaseType = X86AddressMode::RegBase;
18842 AM.Base.Reg = Op.getReg();
18844 AM.BaseType = X86AddressMode::FrameIndexBase;
18845 AM.Base.FrameIndex = Op.getIndex();
18847 Op = MI->getOperand(1);
18849 AM.Scale = Op.getImm();
18850 Op = MI->getOperand(2);
18852 AM.IndexReg = Op.getImm();
18853 Op = MI->getOperand(3);
18854 if (Op.isGlobal()) {
18855 AM.GV = Op.getGlobal();
18857 AM.Disp = Op.getImm();
18859 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
18860 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
18862 // Reload the original control word now.
18863 addFrameReference(BuildMI(*BB, MI, DL,
18864 TII->get(X86::FLDCW16m)), CWFrameIdx);
18866 MI->eraseFromParent(); // The pseudo instruction is gone now.
18869 // String/text processing lowering.
18870 case X86::PCMPISTRM128REG:
18871 case X86::VPCMPISTRM128REG:
18872 case X86::PCMPISTRM128MEM:
18873 case X86::VPCMPISTRM128MEM:
18874 case X86::PCMPESTRM128REG:
18875 case X86::VPCMPESTRM128REG:
18876 case X86::PCMPESTRM128MEM:
18877 case X86::VPCMPESTRM128MEM:
18878 assert(Subtarget->hasSSE42() &&
18879 "Target must have SSE4.2 or AVX features enabled");
18880 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
18882 // String/text processing lowering.
18883 case X86::PCMPISTRIREG:
18884 case X86::VPCMPISTRIREG:
18885 case X86::PCMPISTRIMEM:
18886 case X86::VPCMPISTRIMEM:
18887 case X86::PCMPESTRIREG:
18888 case X86::VPCMPESTRIREG:
18889 case X86::PCMPESTRIMEM:
18890 case X86::VPCMPESTRIMEM:
18891 assert(Subtarget->hasSSE42() &&
18892 "Target must have SSE4.2 or AVX features enabled");
18893 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
18895 // Thread synchronization.
18897 return EmitMonitor(MI, BB, Subtarget);
18901 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
18903 case X86::VASTART_SAVE_XMM_REGS:
18904 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
18906 case X86::VAARG_64:
18907 return EmitVAARG64WithCustomInserter(MI, BB);
18909 case X86::EH_SjLj_SetJmp32:
18910 case X86::EH_SjLj_SetJmp64:
18911 return emitEHSjLjSetJmp(MI, BB);
18913 case X86::EH_SjLj_LongJmp32:
18914 case X86::EH_SjLj_LongJmp64:
18915 return emitEHSjLjLongJmp(MI, BB);
18917 case TargetOpcode::STATEPOINT:
18918 // As an implementation detail, STATEPOINT shares the STACKMAP format at
18919 // this point in the process. We diverge later.
18920 return emitPatchPoint(MI, BB);
18922 case TargetOpcode::STACKMAP:
18923 case TargetOpcode::PATCHPOINT:
18924 return emitPatchPoint(MI, BB);
18926 case X86::VFMADDPDr213r:
18927 case X86::VFMADDPSr213r:
18928 case X86::VFMADDSDr213r:
18929 case X86::VFMADDSSr213r:
18930 case X86::VFMSUBPDr213r:
18931 case X86::VFMSUBPSr213r:
18932 case X86::VFMSUBSDr213r:
18933 case X86::VFMSUBSSr213r:
18934 case X86::VFNMADDPDr213r:
18935 case X86::VFNMADDPSr213r:
18936 case X86::VFNMADDSDr213r:
18937 case X86::VFNMADDSSr213r:
18938 case X86::VFNMSUBPDr213r:
18939 case X86::VFNMSUBPSr213r:
18940 case X86::VFNMSUBSDr213r:
18941 case X86::VFNMSUBSSr213r:
18942 case X86::VFMADDSUBPDr213r:
18943 case X86::VFMADDSUBPSr213r:
18944 case X86::VFMSUBADDPDr213r:
18945 case X86::VFMSUBADDPSr213r:
18946 case X86::VFMADDPDr213rY:
18947 case X86::VFMADDPSr213rY:
18948 case X86::VFMSUBPDr213rY:
18949 case X86::VFMSUBPSr213rY:
18950 case X86::VFNMADDPDr213rY:
18951 case X86::VFNMADDPSr213rY:
18952 case X86::VFNMSUBPDr213rY:
18953 case X86::VFNMSUBPSr213rY:
18954 case X86::VFMADDSUBPDr213rY:
18955 case X86::VFMADDSUBPSr213rY:
18956 case X86::VFMSUBADDPDr213rY:
18957 case X86::VFMSUBADDPSr213rY:
18958 return emitFMA3Instr(MI, BB);
18962 //===----------------------------------------------------------------------===//
18963 // X86 Optimization Hooks
18964 //===----------------------------------------------------------------------===//
18966 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
18969 const SelectionDAG &DAG,
18970 unsigned Depth) const {
18971 unsigned BitWidth = KnownZero.getBitWidth();
18972 unsigned Opc = Op.getOpcode();
18973 assert((Opc >= ISD::BUILTIN_OP_END ||
18974 Opc == ISD::INTRINSIC_WO_CHAIN ||
18975 Opc == ISD::INTRINSIC_W_CHAIN ||
18976 Opc == ISD::INTRINSIC_VOID) &&
18977 "Should use MaskedValueIsZero if you don't know whether Op"
18978 " is a target node!");
18980 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
18994 // These nodes' second result is a boolean.
18995 if (Op.getResNo() == 0)
18998 case X86ISD::SETCC:
18999 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
19001 case ISD::INTRINSIC_WO_CHAIN: {
19002 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
19003 unsigned NumLoBits = 0;
19006 case Intrinsic::x86_sse_movmsk_ps:
19007 case Intrinsic::x86_avx_movmsk_ps_256:
19008 case Intrinsic::x86_sse2_movmsk_pd:
19009 case Intrinsic::x86_avx_movmsk_pd_256:
19010 case Intrinsic::x86_mmx_pmovmskb:
19011 case Intrinsic::x86_sse2_pmovmskb_128:
19012 case Intrinsic::x86_avx2_pmovmskb: {
19013 // High bits of movmskp{s|d}, pmovmskb are known zero.
19015 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
19016 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
19017 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
19018 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
19019 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
19020 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
19021 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
19022 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
19024 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
19033 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
19035 const SelectionDAG &,
19036 unsigned Depth) const {
19037 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
19038 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
19039 return Op.getValueType().getScalarType().getSizeInBits();
19045 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
19046 /// node is a GlobalAddress + offset.
19047 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
19048 const GlobalValue* &GA,
19049 int64_t &Offset) const {
19050 if (N->getOpcode() == X86ISD::Wrapper) {
19051 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
19052 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
19053 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
19057 return TargetLowering::isGAPlusOffset(N, GA, Offset);
19060 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
19061 /// same as extracting the high 128-bit part of 256-bit vector and then
19062 /// inserting the result into the low part of a new 256-bit vector
19063 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
19064 EVT VT = SVOp->getValueType(0);
19065 unsigned NumElems = VT.getVectorNumElements();
19067 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
19068 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
19069 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
19070 SVOp->getMaskElt(j) >= 0)
19076 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
19077 /// same as extracting the low 128-bit part of 256-bit vector and then
19078 /// inserting the result into the high part of a new 256-bit vector
19079 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
19080 EVT VT = SVOp->getValueType(0);
19081 unsigned NumElems = VT.getVectorNumElements();
19083 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
19084 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
19085 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
19086 SVOp->getMaskElt(j) >= 0)
19092 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
19093 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
19094 TargetLowering::DAGCombinerInfo &DCI,
19095 const X86Subtarget* Subtarget) {
19097 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
19098 SDValue V1 = SVOp->getOperand(0);
19099 SDValue V2 = SVOp->getOperand(1);
19100 EVT VT = SVOp->getValueType(0);
19101 unsigned NumElems = VT.getVectorNumElements();
19103 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
19104 V2.getOpcode() == ISD::CONCAT_VECTORS) {
19108 // V UNDEF BUILD_VECTOR UNDEF
19110 // CONCAT_VECTOR CONCAT_VECTOR
19113 // RESULT: V + zero extended
19115 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
19116 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
19117 V1.getOperand(1).getOpcode() != ISD::UNDEF)
19120 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
19123 // To match the shuffle mask, the first half of the mask should
19124 // be exactly the first vector, and all the rest a splat with the
19125 // first element of the second one.
19126 for (unsigned i = 0; i != NumElems/2; ++i)
19127 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
19128 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
19131 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
19132 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
19133 if (Ld->hasNUsesOfValue(1, 0)) {
19134 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
19135 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
19137 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
19139 Ld->getPointerInfo(),
19140 Ld->getAlignment(),
19141 false/*isVolatile*/, true/*ReadMem*/,
19142 false/*WriteMem*/);
19144 // Make sure the newly-created LOAD is in the same position as Ld in
19145 // terms of dependency. We create a TokenFactor for Ld and ResNode,
19146 // and update uses of Ld's output chain to use the TokenFactor.
19147 if (Ld->hasAnyUseOfValue(1)) {
19148 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
19149 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
19150 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
19151 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
19152 SDValue(ResNode.getNode(), 1));
19155 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
19159 // Emit a zeroed vector and insert the desired subvector on its
19161 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
19162 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
19163 return DCI.CombineTo(N, InsV);
19166 //===--------------------------------------------------------------------===//
19167 // Combine some shuffles into subvector extracts and inserts:
19170 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
19171 if (isShuffleHigh128VectorInsertLow(SVOp)) {
19172 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
19173 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
19174 return DCI.CombineTo(N, InsV);
19177 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
19178 if (isShuffleLow128VectorInsertHigh(SVOp)) {
19179 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
19180 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
19181 return DCI.CombineTo(N, InsV);
19187 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
19190 /// This is the leaf of the recursive combinine below. When we have found some
19191 /// chain of single-use x86 shuffle instructions and accumulated the combined
19192 /// shuffle mask represented by them, this will try to pattern match that mask
19193 /// into either a single instruction if there is a special purpose instruction
19194 /// for this operation, or into a PSHUFB instruction which is a fully general
19195 /// instruction but should only be used to replace chains over a certain depth.
19196 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
19197 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
19198 TargetLowering::DAGCombinerInfo &DCI,
19199 const X86Subtarget *Subtarget) {
19200 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
19202 // Find the operand that enters the chain. Note that multiple uses are OK
19203 // here, we're not going to remove the operand we find.
19204 SDValue Input = Op.getOperand(0);
19205 while (Input.getOpcode() == ISD::BITCAST)
19206 Input = Input.getOperand(0);
19208 MVT VT = Input.getSimpleValueType();
19209 MVT RootVT = Root.getSimpleValueType();
19212 // Just remove no-op shuffle masks.
19213 if (Mask.size() == 1) {
19214 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
19219 // Use the float domain if the operand type is a floating point type.
19220 bool FloatDomain = VT.isFloatingPoint();
19222 // For floating point shuffles, we don't have free copies in the shuffle
19223 // instructions or the ability to load as part of the instruction, so
19224 // canonicalize their shuffles to UNPCK or MOV variants.
19226 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
19227 // vectors because it can have a load folded into it that UNPCK cannot. This
19228 // doesn't preclude something switching to the shorter encoding post-RA.
19230 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
19231 bool Lo = Mask.equals(0, 0);
19234 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
19235 // is no slower than UNPCKLPD but has the option to fold the input operand
19236 // into even an unaligned memory load.
19237 if (Lo && Subtarget->hasSSE3()) {
19238 Shuffle = X86ISD::MOVDDUP;
19239 ShuffleVT = MVT::v2f64;
19241 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
19242 // than the UNPCK variants.
19243 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
19244 ShuffleVT = MVT::v4f32;
19246 if (Depth == 1 && Root->getOpcode() == Shuffle)
19247 return false; // Nothing to do!
19248 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
19249 DCI.AddToWorklist(Op.getNode());
19250 if (Shuffle == X86ISD::MOVDDUP)
19251 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
19253 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
19254 DCI.AddToWorklist(Op.getNode());
19255 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
19259 if (Subtarget->hasSSE3() &&
19260 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
19261 bool Lo = Mask.equals(0, 0, 2, 2);
19262 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
19263 MVT ShuffleVT = MVT::v4f32;
19264 if (Depth == 1 && Root->getOpcode() == Shuffle)
19265 return false; // Nothing to do!
19266 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
19267 DCI.AddToWorklist(Op.getNode());
19268 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
19269 DCI.AddToWorklist(Op.getNode());
19270 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
19274 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
19275 bool Lo = Mask.equals(0, 0, 1, 1);
19276 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
19277 MVT ShuffleVT = MVT::v4f32;
19278 if (Depth == 1 && Root->getOpcode() == Shuffle)
19279 return false; // Nothing to do!
19280 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
19281 DCI.AddToWorklist(Op.getNode());
19282 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
19283 DCI.AddToWorklist(Op.getNode());
19284 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
19290 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
19291 // variants as none of these have single-instruction variants that are
19292 // superior to the UNPCK formulation.
19293 if (!FloatDomain &&
19294 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
19295 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
19296 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
19297 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
19299 bool Lo = Mask[0] == 0;
19300 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
19301 if (Depth == 1 && Root->getOpcode() == Shuffle)
19302 return false; // Nothing to do!
19304 switch (Mask.size()) {
19306 ShuffleVT = MVT::v8i16;
19309 ShuffleVT = MVT::v16i8;
19312 llvm_unreachable("Impossible mask size!");
19314 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
19315 DCI.AddToWorklist(Op.getNode());
19316 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
19317 DCI.AddToWorklist(Op.getNode());
19318 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
19323 // Don't try to re-form single instruction chains under any circumstances now
19324 // that we've done encoding canonicalization for them.
19328 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
19329 // can replace them with a single PSHUFB instruction profitably. Intel's
19330 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
19331 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
19332 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
19333 SmallVector<SDValue, 16> PSHUFBMask;
19334 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
19335 int Ratio = 16 / Mask.size();
19336 for (unsigned i = 0; i < 16; ++i) {
19337 if (Mask[i / Ratio] == SM_SentinelUndef) {
19338 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
19341 int M = Mask[i / Ratio] != SM_SentinelZero
19342 ? Ratio * Mask[i / Ratio] + i % Ratio
19344 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
19346 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
19347 DCI.AddToWorklist(Op.getNode());
19348 SDValue PSHUFBMaskOp =
19349 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
19350 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
19351 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
19352 DCI.AddToWorklist(Op.getNode());
19353 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
19358 // Failed to find any combines.
19362 /// \brief Fully generic combining of x86 shuffle instructions.
19364 /// This should be the last combine run over the x86 shuffle instructions. Once
19365 /// they have been fully optimized, this will recursively consider all chains
19366 /// of single-use shuffle instructions, build a generic model of the cumulative
19367 /// shuffle operation, and check for simpler instructions which implement this
19368 /// operation. We use this primarily for two purposes:
19370 /// 1) Collapse generic shuffles to specialized single instructions when
19371 /// equivalent. In most cases, this is just an encoding size win, but
19372 /// sometimes we will collapse multiple generic shuffles into a single
19373 /// special-purpose shuffle.
19374 /// 2) Look for sequences of shuffle instructions with 3 or more total
19375 /// instructions, and replace them with the slightly more expensive SSSE3
19376 /// PSHUFB instruction if available. We do this as the last combining step
19377 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
19378 /// a suitable short sequence of other instructions. The PHUFB will either
19379 /// use a register or have to read from memory and so is slightly (but only
19380 /// slightly) more expensive than the other shuffle instructions.
19382 /// Because this is inherently a quadratic operation (for each shuffle in
19383 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
19384 /// This should never be an issue in practice as the shuffle lowering doesn't
19385 /// produce sequences of more than 8 instructions.
19387 /// FIXME: We will currently miss some cases where the redundant shuffling
19388 /// would simplify under the threshold for PSHUFB formation because of
19389 /// combine-ordering. To fix this, we should do the redundant instruction
19390 /// combining in this recursive walk.
19391 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
19392 ArrayRef<int> RootMask,
19393 int Depth, bool HasPSHUFB,
19395 TargetLowering::DAGCombinerInfo &DCI,
19396 const X86Subtarget *Subtarget) {
19397 // Bound the depth of our recursive combine because this is ultimately
19398 // quadratic in nature.
19402 // Directly rip through bitcasts to find the underlying operand.
19403 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
19404 Op = Op.getOperand(0);
19406 MVT VT = Op.getSimpleValueType();
19407 if (!VT.isVector())
19408 return false; // Bail if we hit a non-vector.
19409 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
19410 // version should be added.
19411 if (VT.getSizeInBits() != 128)
19414 assert(Root.getSimpleValueType().isVector() &&
19415 "Shuffles operate on vector types!");
19416 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
19417 "Can only combine shuffles of the same vector register size.");
19419 if (!isTargetShuffle(Op.getOpcode()))
19421 SmallVector<int, 16> OpMask;
19423 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
19424 // We only can combine unary shuffles which we can decode the mask for.
19425 if (!HaveMask || !IsUnary)
19428 assert(VT.getVectorNumElements() == OpMask.size() &&
19429 "Different mask size from vector size!");
19430 assert(((RootMask.size() > OpMask.size() &&
19431 RootMask.size() % OpMask.size() == 0) ||
19432 (OpMask.size() > RootMask.size() &&
19433 OpMask.size() % RootMask.size() == 0) ||
19434 OpMask.size() == RootMask.size()) &&
19435 "The smaller number of elements must divide the larger.");
19436 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
19437 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
19438 assert(((RootRatio == 1 && OpRatio == 1) ||
19439 (RootRatio == 1) != (OpRatio == 1)) &&
19440 "Must not have a ratio for both incoming and op masks!");
19442 SmallVector<int, 16> Mask;
19443 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
19445 // Merge this shuffle operation's mask into our accumulated mask. Note that
19446 // this shuffle's mask will be the first applied to the input, followed by the
19447 // root mask to get us all the way to the root value arrangement. The reason
19448 // for this order is that we are recursing up the operation chain.
19449 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
19450 int RootIdx = i / RootRatio;
19451 if (RootMask[RootIdx] < 0) {
19452 // This is a zero or undef lane, we're done.
19453 Mask.push_back(RootMask[RootIdx]);
19457 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
19458 int OpIdx = RootMaskedIdx / OpRatio;
19459 if (OpMask[OpIdx] < 0) {
19460 // The incoming lanes are zero or undef, it doesn't matter which ones we
19462 Mask.push_back(OpMask[OpIdx]);
19466 // Ok, we have non-zero lanes, map them through.
19467 Mask.push_back(OpMask[OpIdx] * OpRatio +
19468 RootMaskedIdx % OpRatio);
19471 // See if we can recurse into the operand to combine more things.
19472 switch (Op.getOpcode()) {
19473 case X86ISD::PSHUFB:
19475 case X86ISD::PSHUFD:
19476 case X86ISD::PSHUFHW:
19477 case X86ISD::PSHUFLW:
19478 if (Op.getOperand(0).hasOneUse() &&
19479 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
19480 HasPSHUFB, DAG, DCI, Subtarget))
19484 case X86ISD::UNPCKL:
19485 case X86ISD::UNPCKH:
19486 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
19487 // We can't check for single use, we have to check that this shuffle is the only user.
19488 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
19489 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
19490 HasPSHUFB, DAG, DCI, Subtarget))
19495 // Minor canonicalization of the accumulated shuffle mask to make it easier
19496 // to match below. All this does is detect masks with squential pairs of
19497 // elements, and shrink them to the half-width mask. It does this in a loop
19498 // so it will reduce the size of the mask to the minimal width mask which
19499 // performs an equivalent shuffle.
19500 SmallVector<int, 16> WidenedMask;
19501 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
19502 Mask = std::move(WidenedMask);
19503 WidenedMask.clear();
19506 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
19510 /// \brief Get the PSHUF-style mask from PSHUF node.
19512 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
19513 /// PSHUF-style masks that can be reused with such instructions.
19514 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
19515 SmallVector<int, 4> Mask;
19517 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
19521 switch (N.getOpcode()) {
19522 case X86ISD::PSHUFD:
19524 case X86ISD::PSHUFLW:
19527 case X86ISD::PSHUFHW:
19528 Mask.erase(Mask.begin(), Mask.begin() + 4);
19529 for (int &M : Mask)
19533 llvm_unreachable("No valid shuffle instruction found!");
19537 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
19539 /// We walk up the chain and look for a combinable shuffle, skipping over
19540 /// shuffles that we could hoist this shuffle's transformation past without
19541 /// altering anything.
19543 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
19545 TargetLowering::DAGCombinerInfo &DCI) {
19546 assert(N.getOpcode() == X86ISD::PSHUFD &&
19547 "Called with something other than an x86 128-bit half shuffle!");
19550 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
19551 // of the shuffles in the chain so that we can form a fresh chain to replace
19553 SmallVector<SDValue, 8> Chain;
19554 SDValue V = N.getOperand(0);
19555 for (; V.hasOneUse(); V = V.getOperand(0)) {
19556 switch (V.getOpcode()) {
19558 return SDValue(); // Nothing combined!
19561 // Skip bitcasts as we always know the type for the target specific
19565 case X86ISD::PSHUFD:
19566 // Found another dword shuffle.
19569 case X86ISD::PSHUFLW:
19570 // Check that the low words (being shuffled) are the identity in the
19571 // dword shuffle, and the high words are self-contained.
19572 if (Mask[0] != 0 || Mask[1] != 1 ||
19573 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
19576 Chain.push_back(V);
19579 case X86ISD::PSHUFHW:
19580 // Check that the high words (being shuffled) are the identity in the
19581 // dword shuffle, and the low words are self-contained.
19582 if (Mask[2] != 2 || Mask[3] != 3 ||
19583 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
19586 Chain.push_back(V);
19589 case X86ISD::UNPCKL:
19590 case X86ISD::UNPCKH:
19591 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
19592 // shuffle into a preceding word shuffle.
19593 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
19596 // Search for a half-shuffle which we can combine with.
19597 unsigned CombineOp =
19598 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
19599 if (V.getOperand(0) != V.getOperand(1) ||
19600 !V->isOnlyUserOf(V.getOperand(0).getNode()))
19602 Chain.push_back(V);
19603 V = V.getOperand(0);
19605 switch (V.getOpcode()) {
19607 return SDValue(); // Nothing to combine.
19609 case X86ISD::PSHUFLW:
19610 case X86ISD::PSHUFHW:
19611 if (V.getOpcode() == CombineOp)
19614 Chain.push_back(V);
19618 V = V.getOperand(0);
19622 } while (V.hasOneUse());
19625 // Break out of the loop if we break out of the switch.
19629 if (!V.hasOneUse())
19630 // We fell out of the loop without finding a viable combining instruction.
19633 // Merge this node's mask and our incoming mask.
19634 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
19635 for (int &M : Mask)
19637 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
19638 getV4X86ShuffleImm8ForMask(Mask, DAG));
19640 // Rebuild the chain around this new shuffle.
19641 while (!Chain.empty()) {
19642 SDValue W = Chain.pop_back_val();
19644 if (V.getValueType() != W.getOperand(0).getValueType())
19645 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
19647 switch (W.getOpcode()) {
19649 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
19651 case X86ISD::UNPCKL:
19652 case X86ISD::UNPCKH:
19653 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
19656 case X86ISD::PSHUFD:
19657 case X86ISD::PSHUFLW:
19658 case X86ISD::PSHUFHW:
19659 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
19663 if (V.getValueType() != N.getValueType())
19664 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
19666 // Return the new chain to replace N.
19670 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
19672 /// We walk up the chain, skipping shuffles of the other half and looking
19673 /// through shuffles which switch halves trying to find a shuffle of the same
19674 /// pair of dwords.
19675 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
19677 TargetLowering::DAGCombinerInfo &DCI) {
19679 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
19680 "Called with something other than an x86 128-bit half shuffle!");
19682 unsigned CombineOpcode = N.getOpcode();
19684 // Walk up a single-use chain looking for a combinable shuffle.
19685 SDValue V = N.getOperand(0);
19686 for (; V.hasOneUse(); V = V.getOperand(0)) {
19687 switch (V.getOpcode()) {
19689 return false; // Nothing combined!
19692 // Skip bitcasts as we always know the type for the target specific
19696 case X86ISD::PSHUFLW:
19697 case X86ISD::PSHUFHW:
19698 if (V.getOpcode() == CombineOpcode)
19701 // Other-half shuffles are no-ops.
19704 // Break out of the loop if we break out of the switch.
19708 if (!V.hasOneUse())
19709 // We fell out of the loop without finding a viable combining instruction.
19712 // Combine away the bottom node as its shuffle will be accumulated into
19713 // a preceding shuffle.
19714 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
19716 // Record the old value.
19719 // Merge this node's mask and our incoming mask (adjusted to account for all
19720 // the pshufd instructions encountered).
19721 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
19722 for (int &M : Mask)
19724 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
19725 getV4X86ShuffleImm8ForMask(Mask, DAG));
19727 // Check that the shuffles didn't cancel each other out. If not, we need to
19728 // combine to the new one.
19730 // Replace the combinable shuffle with the combined one, updating all users
19731 // so that we re-evaluate the chain here.
19732 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
19737 /// \brief Try to combine x86 target specific shuffles.
19738 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
19739 TargetLowering::DAGCombinerInfo &DCI,
19740 const X86Subtarget *Subtarget) {
19742 MVT VT = N.getSimpleValueType();
19743 SmallVector<int, 4> Mask;
19745 switch (N.getOpcode()) {
19746 case X86ISD::PSHUFD:
19747 case X86ISD::PSHUFLW:
19748 case X86ISD::PSHUFHW:
19749 Mask = getPSHUFShuffleMask(N);
19750 assert(Mask.size() == 4);
19756 // Nuke no-op shuffles that show up after combining.
19757 if (isNoopShuffleMask(Mask))
19758 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
19760 // Look for simplifications involving one or two shuffle instructions.
19761 SDValue V = N.getOperand(0);
19762 switch (N.getOpcode()) {
19765 case X86ISD::PSHUFLW:
19766 case X86ISD::PSHUFHW:
19767 assert(VT == MVT::v8i16);
19770 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
19771 return SDValue(); // We combined away this shuffle, so we're done.
19773 // See if this reduces to a PSHUFD which is no more expensive and can
19774 // combine with more operations. Note that it has to at least flip the
19775 // dwords as otherwise it would have been removed as a no-op.
19776 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
19777 int DMask[] = {0, 1, 2, 3};
19778 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
19779 DMask[DOffset + 0] = DOffset + 1;
19780 DMask[DOffset + 1] = DOffset + 0;
19781 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
19782 DCI.AddToWorklist(V.getNode());
19783 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
19784 getV4X86ShuffleImm8ForMask(DMask, DAG));
19785 DCI.AddToWorklist(V.getNode());
19786 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
19789 // Look for shuffle patterns which can be implemented as a single unpack.
19790 // FIXME: This doesn't handle the location of the PSHUFD generically, and
19791 // only works when we have a PSHUFD followed by two half-shuffles.
19792 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
19793 (V.getOpcode() == X86ISD::PSHUFLW ||
19794 V.getOpcode() == X86ISD::PSHUFHW) &&
19795 V.getOpcode() != N.getOpcode() &&
19797 SDValue D = V.getOperand(0);
19798 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
19799 D = D.getOperand(0);
19800 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
19801 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
19802 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
19803 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
19804 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
19806 for (int i = 0; i < 4; ++i) {
19807 WordMask[i + NOffset] = Mask[i] + NOffset;
19808 WordMask[i + VOffset] = VMask[i] + VOffset;
19810 // Map the word mask through the DWord mask.
19812 for (int i = 0; i < 8; ++i)
19813 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
19814 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
19815 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
19816 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
19817 std::begin(UnpackLoMask)) ||
19818 std::equal(std::begin(MappedMask), std::end(MappedMask),
19819 std::begin(UnpackHiMask))) {
19820 // We can replace all three shuffles with an unpack.
19821 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
19822 DCI.AddToWorklist(V.getNode());
19823 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
19825 DL, MVT::v8i16, V, V);
19832 case X86ISD::PSHUFD:
19833 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
19842 /// \brief Try to combine a shuffle into a target-specific add-sub node.
19844 /// We combine this directly on the abstract vector shuffle nodes so it is
19845 /// easier to generically match. We also insert dummy vector shuffle nodes for
19846 /// the operands which explicitly discard the lanes which are unused by this
19847 /// operation to try to flow through the rest of the combiner the fact that
19848 /// they're unused.
19849 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
19851 EVT VT = N->getValueType(0);
19853 // We only handle target-independent shuffles.
19854 // FIXME: It would be easy and harmless to use the target shuffle mask
19855 // extraction tool to support more.
19856 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
19859 auto *SVN = cast<ShuffleVectorSDNode>(N);
19860 ArrayRef<int> Mask = SVN->getMask();
19861 SDValue V1 = N->getOperand(0);
19862 SDValue V2 = N->getOperand(1);
19864 // We require the first shuffle operand to be the SUB node, and the second to
19865 // be the ADD node.
19866 // FIXME: We should support the commuted patterns.
19867 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
19870 // If there are other uses of these operations we can't fold them.
19871 if (!V1->hasOneUse() || !V2->hasOneUse())
19874 // Ensure that both operations have the same operands. Note that we can
19875 // commute the FADD operands.
19876 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
19877 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
19878 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
19881 // We're looking for blends between FADD and FSUB nodes. We insist on these
19882 // nodes being lined up in a specific expected pattern.
19883 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
19884 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
19885 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
19888 // Only specific types are legal at this point, assert so we notice if and
19889 // when these change.
19890 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
19891 VT == MVT::v4f64) &&
19892 "Unknown vector type encountered!");
19894 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
19897 /// PerformShuffleCombine - Performs several different shuffle combines.
19898 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
19899 TargetLowering::DAGCombinerInfo &DCI,
19900 const X86Subtarget *Subtarget) {
19902 SDValue N0 = N->getOperand(0);
19903 SDValue N1 = N->getOperand(1);
19904 EVT VT = N->getValueType(0);
19906 // Don't create instructions with illegal types after legalize types has run.
19907 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19908 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
19911 // If we have legalized the vector types, look for blends of FADD and FSUB
19912 // nodes that we can fuse into an ADDSUB node.
19913 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
19914 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
19917 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
19918 if (Subtarget->hasFp256() && VT.is256BitVector() &&
19919 N->getOpcode() == ISD::VECTOR_SHUFFLE)
19920 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
19922 // During Type Legalization, when promoting illegal vector types,
19923 // the backend might introduce new shuffle dag nodes and bitcasts.
19925 // This code performs the following transformation:
19926 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
19927 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
19929 // We do this only if both the bitcast and the BINOP dag nodes have
19930 // one use. Also, perform this transformation only if the new binary
19931 // operation is legal. This is to avoid introducing dag nodes that
19932 // potentially need to be further expanded (or custom lowered) into a
19933 // less optimal sequence of dag nodes.
19934 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
19935 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
19936 N0.getOpcode() == ISD::BITCAST) {
19937 SDValue BC0 = N0.getOperand(0);
19938 EVT SVT = BC0.getValueType();
19939 unsigned Opcode = BC0.getOpcode();
19940 unsigned NumElts = VT.getVectorNumElements();
19942 if (BC0.hasOneUse() && SVT.isVector() &&
19943 SVT.getVectorNumElements() * 2 == NumElts &&
19944 TLI.isOperationLegal(Opcode, VT)) {
19945 bool CanFold = false;
19957 unsigned SVTNumElts = SVT.getVectorNumElements();
19958 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
19959 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
19960 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
19961 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
19962 CanFold = SVOp->getMaskElt(i) < 0;
19965 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
19966 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
19967 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
19968 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
19973 // Only handle 128 wide vector from here on.
19974 if (!VT.is128BitVector())
19977 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
19978 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
19979 // consecutive, non-overlapping, and in the right order.
19980 SmallVector<SDValue, 16> Elts;
19981 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
19982 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
19984 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
19988 if (isTargetShuffle(N->getOpcode())) {
19990 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
19991 if (Shuffle.getNode())
19994 // Try recursively combining arbitrary sequences of x86 shuffle
19995 // instructions into higher-order shuffles. We do this after combining
19996 // specific PSHUF instruction sequences into their minimal form so that we
19997 // can evaluate how many specialized shuffle instructions are involved in
19998 // a particular chain.
19999 SmallVector<int, 1> NonceMask; // Just a placeholder.
20000 NonceMask.push_back(0);
20001 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
20002 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
20004 return SDValue(); // This routine will use CombineTo to replace N.
20010 /// PerformTruncateCombine - Converts truncate operation to
20011 /// a sequence of vector shuffle operations.
20012 /// It is possible when we truncate 256-bit vector to 128-bit vector
20013 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
20014 TargetLowering::DAGCombinerInfo &DCI,
20015 const X86Subtarget *Subtarget) {
20019 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
20020 /// specific shuffle of a load can be folded into a single element load.
20021 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
20022 /// shuffles have been custom lowered so we need to handle those here.
20023 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
20024 TargetLowering::DAGCombinerInfo &DCI) {
20025 if (DCI.isBeforeLegalizeOps())
20028 SDValue InVec = N->getOperand(0);
20029 SDValue EltNo = N->getOperand(1);
20031 if (!isa<ConstantSDNode>(EltNo))
20034 EVT OriginalVT = InVec.getValueType();
20036 if (InVec.getOpcode() == ISD::BITCAST) {
20037 // Don't duplicate a load with other uses.
20038 if (!InVec.hasOneUse())
20040 EVT BCVT = InVec.getOperand(0).getValueType();
20041 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
20043 InVec = InVec.getOperand(0);
20046 EVT CurrentVT = InVec.getValueType();
20048 if (!isTargetShuffle(InVec.getOpcode()))
20051 // Don't duplicate a load with other uses.
20052 if (!InVec.hasOneUse())
20055 SmallVector<int, 16> ShuffleMask;
20057 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
20058 ShuffleMask, UnaryShuffle))
20061 // Select the input vector, guarding against out of range extract vector.
20062 unsigned NumElems = CurrentVT.getVectorNumElements();
20063 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
20064 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
20065 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
20066 : InVec.getOperand(1);
20068 // If inputs to shuffle are the same for both ops, then allow 2 uses
20069 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
20070 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
20072 if (LdNode.getOpcode() == ISD::BITCAST) {
20073 // Don't duplicate a load with other uses.
20074 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
20077 AllowedUses = 1; // only allow 1 load use if we have a bitcast
20078 LdNode = LdNode.getOperand(0);
20081 if (!ISD::isNormalLoad(LdNode.getNode()))
20084 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
20086 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
20089 EVT EltVT = N->getValueType(0);
20090 // If there's a bitcast before the shuffle, check if the load type and
20091 // alignment is valid.
20092 unsigned Align = LN0->getAlignment();
20093 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20094 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
20095 EltVT.getTypeForEVT(*DAG.getContext()));
20097 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
20100 // All checks match so transform back to vector_shuffle so that DAG combiner
20101 // can finish the job
20104 // Create shuffle node taking into account the case that its a unary shuffle
20105 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
20106 : InVec.getOperand(1);
20107 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
20108 InVec.getOperand(0), Shuffle,
20110 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
20111 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
20115 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
20116 /// special and don't usually play with other vector types, it's better to
20117 /// handle them early to be sure we emit efficient code by avoiding
20118 /// store-load conversions.
20119 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
20120 if (N->getValueType(0) != MVT::x86mmx ||
20121 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
20122 N->getOperand(0)->getValueType(0) != MVT::v2i32)
20125 SDValue V = N->getOperand(0);
20126 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
20127 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
20128 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
20129 N->getValueType(0), V.getOperand(0));
20134 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
20135 /// generation and convert it from being a bunch of shuffles and extracts
20136 /// into a somewhat faster sequence. For i686, the best sequence is apparently
20137 /// storing the value and loading scalars back, while for x64 we should
20138 /// use 64-bit extracts and shifts.
20139 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
20140 TargetLowering::DAGCombinerInfo &DCI) {
20141 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
20142 if (NewOp.getNode())
20145 SDValue InputVector = N->getOperand(0);
20147 // Detect mmx to i32 conversion through a v2i32 elt extract.
20148 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
20149 N->getValueType(0) == MVT::i32 &&
20150 InputVector.getValueType() == MVT::v2i32) {
20152 // The bitcast source is a direct mmx result.
20153 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
20154 if (MMXSrc.getValueType() == MVT::x86mmx)
20155 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
20156 N->getValueType(0),
20157 InputVector.getNode()->getOperand(0));
20159 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
20160 SDValue MMXSrcOp = MMXSrc.getOperand(0);
20161 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
20162 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
20163 MMXSrcOp.getOpcode() == ISD::BITCAST &&
20164 MMXSrcOp.getValueType() == MVT::v1i64 &&
20165 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
20166 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
20167 N->getValueType(0),
20168 MMXSrcOp.getOperand(0));
20171 // Only operate on vectors of 4 elements, where the alternative shuffling
20172 // gets to be more expensive.
20173 if (InputVector.getValueType() != MVT::v4i32)
20176 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
20177 // single use which is a sign-extend or zero-extend, and all elements are
20179 SmallVector<SDNode *, 4> Uses;
20180 unsigned ExtractedElements = 0;
20181 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
20182 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
20183 if (UI.getUse().getResNo() != InputVector.getResNo())
20186 SDNode *Extract = *UI;
20187 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
20190 if (Extract->getValueType(0) != MVT::i32)
20192 if (!Extract->hasOneUse())
20194 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
20195 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
20197 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
20200 // Record which element was extracted.
20201 ExtractedElements |=
20202 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
20204 Uses.push_back(Extract);
20207 // If not all the elements were used, this may not be worthwhile.
20208 if (ExtractedElements != 15)
20211 // Ok, we've now decided to do the transformation.
20212 // If 64-bit shifts are legal, use the extract-shift sequence,
20213 // otherwise bounce the vector off the cache.
20214 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20216 SDLoc dl(InputVector);
20218 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
20219 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
20220 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
20221 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
20222 DAG.getConstant(0, VecIdxTy));
20223 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
20224 DAG.getConstant(1, VecIdxTy));
20226 SDValue ShAmt = DAG.getConstant(32,
20227 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
20228 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
20229 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
20230 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
20231 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
20232 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
20233 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
20235 // Store the value to a temporary stack slot.
20236 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
20237 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
20238 MachinePointerInfo(), false, false, 0);
20240 EVT ElementType = InputVector.getValueType().getVectorElementType();
20241 unsigned EltSize = ElementType.getSizeInBits() / 8;
20243 // Replace each use (extract) with a load of the appropriate element.
20244 for (unsigned i = 0; i < 4; ++i) {
20245 uint64_t Offset = EltSize * i;
20246 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
20248 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
20249 StackPtr, OffsetVal);
20251 // Load the scalar.
20252 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
20253 ScalarAddr, MachinePointerInfo(),
20254 false, false, false, 0);
20259 // Replace the extracts
20260 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
20261 UE = Uses.end(); UI != UE; ++UI) {
20262 SDNode *Extract = *UI;
20264 SDValue Idx = Extract->getOperand(1);
20265 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
20266 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
20269 // The replacement was made in place; don't return anything.
20273 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
20274 static std::pair<unsigned, bool>
20275 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
20276 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
20277 if (!VT.isVector())
20278 return std::make_pair(0, false);
20280 bool NeedSplit = false;
20281 switch (VT.getSimpleVT().SimpleTy) {
20282 default: return std::make_pair(0, false);
20285 if (!Subtarget->hasVLX())
20286 return std::make_pair(0, false);
20290 if (!Subtarget->hasBWI())
20291 return std::make_pair(0, false);
20295 if (!Subtarget->hasAVX512())
20296 return std::make_pair(0, false);
20301 if (!Subtarget->hasAVX2())
20303 if (!Subtarget->hasAVX())
20304 return std::make_pair(0, false);
20309 if (!Subtarget->hasSSE2())
20310 return std::make_pair(0, false);
20313 // SSE2 has only a small subset of the operations.
20314 bool hasUnsigned = Subtarget->hasSSE41() ||
20315 (Subtarget->hasSSE2() && VT == MVT::v16i8);
20316 bool hasSigned = Subtarget->hasSSE41() ||
20317 (Subtarget->hasSSE2() && VT == MVT::v8i16);
20319 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
20322 // Check for x CC y ? x : y.
20323 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
20324 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
20329 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
20332 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
20335 Opc = hasSigned ? X86ISD::SMIN : 0; break;
20338 Opc = hasSigned ? X86ISD::SMAX : 0; break;
20340 // Check for x CC y ? y : x -- a min/max with reversed arms.
20341 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
20342 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
20347 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
20350 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
20353 Opc = hasSigned ? X86ISD::SMAX : 0; break;
20356 Opc = hasSigned ? X86ISD::SMIN : 0; break;
20360 return std::make_pair(Opc, NeedSplit);
20364 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
20365 const X86Subtarget *Subtarget) {
20367 SDValue Cond = N->getOperand(0);
20368 SDValue LHS = N->getOperand(1);
20369 SDValue RHS = N->getOperand(2);
20371 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
20372 SDValue CondSrc = Cond->getOperand(0);
20373 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
20374 Cond = CondSrc->getOperand(0);
20377 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
20380 // A vselect where all conditions and data are constants can be optimized into
20381 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
20382 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
20383 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
20386 unsigned MaskValue = 0;
20387 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
20390 MVT VT = N->getSimpleValueType(0);
20391 unsigned NumElems = VT.getVectorNumElements();
20392 SmallVector<int, 8> ShuffleMask(NumElems, -1);
20393 for (unsigned i = 0; i < NumElems; ++i) {
20394 // Be sure we emit undef where we can.
20395 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
20396 ShuffleMask[i] = -1;
20398 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
20401 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20402 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
20404 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
20407 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
20409 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
20410 TargetLowering::DAGCombinerInfo &DCI,
20411 const X86Subtarget *Subtarget) {
20413 SDValue Cond = N->getOperand(0);
20414 // Get the LHS/RHS of the select.
20415 SDValue LHS = N->getOperand(1);
20416 SDValue RHS = N->getOperand(2);
20417 EVT VT = LHS.getValueType();
20418 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20420 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
20421 // instructions match the semantics of the common C idiom x<y?x:y but not
20422 // x<=y?x:y, because of how they handle negative zero (which can be
20423 // ignored in unsafe-math mode).
20424 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
20425 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
20426 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
20427 (Subtarget->hasSSE2() ||
20428 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
20429 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
20431 unsigned Opcode = 0;
20432 // Check for x CC y ? x : y.
20433 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
20434 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
20438 // Converting this to a min would handle NaNs incorrectly, and swapping
20439 // the operands would cause it to handle comparisons between positive
20440 // and negative zero incorrectly.
20441 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
20442 if (!DAG.getTarget().Options.UnsafeFPMath &&
20443 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
20445 std::swap(LHS, RHS);
20447 Opcode = X86ISD::FMIN;
20450 // Converting this to a min would handle comparisons between positive
20451 // and negative zero incorrectly.
20452 if (!DAG.getTarget().Options.UnsafeFPMath &&
20453 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
20455 Opcode = X86ISD::FMIN;
20458 // Converting this to a min would handle both negative zeros and NaNs
20459 // incorrectly, but we can swap the operands to fix both.
20460 std::swap(LHS, RHS);
20464 Opcode = X86ISD::FMIN;
20468 // Converting this to a max would handle comparisons between positive
20469 // and negative zero incorrectly.
20470 if (!DAG.getTarget().Options.UnsafeFPMath &&
20471 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
20473 Opcode = X86ISD::FMAX;
20476 // Converting this to a max would handle NaNs incorrectly, and swapping
20477 // the operands would cause it to handle comparisons between positive
20478 // and negative zero incorrectly.
20479 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
20480 if (!DAG.getTarget().Options.UnsafeFPMath &&
20481 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
20483 std::swap(LHS, RHS);
20485 Opcode = X86ISD::FMAX;
20488 // Converting this to a max would handle both negative zeros and NaNs
20489 // incorrectly, but we can swap the operands to fix both.
20490 std::swap(LHS, RHS);
20494 Opcode = X86ISD::FMAX;
20497 // Check for x CC y ? y : x -- a min/max with reversed arms.
20498 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
20499 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
20503 // Converting this to a min would handle comparisons between positive
20504 // and negative zero incorrectly, and swapping the operands would
20505 // cause it to handle NaNs incorrectly.
20506 if (!DAG.getTarget().Options.UnsafeFPMath &&
20507 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
20508 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
20510 std::swap(LHS, RHS);
20512 Opcode = X86ISD::FMIN;
20515 // Converting this to a min would handle NaNs incorrectly.
20516 if (!DAG.getTarget().Options.UnsafeFPMath &&
20517 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
20519 Opcode = X86ISD::FMIN;
20522 // Converting this to a min would handle both negative zeros and NaNs
20523 // incorrectly, but we can swap the operands to fix both.
20524 std::swap(LHS, RHS);
20528 Opcode = X86ISD::FMIN;
20532 // Converting this to a max would handle NaNs incorrectly.
20533 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
20535 Opcode = X86ISD::FMAX;
20538 // Converting this to a max would handle comparisons between positive
20539 // and negative zero incorrectly, and swapping the operands would
20540 // cause it to handle NaNs incorrectly.
20541 if (!DAG.getTarget().Options.UnsafeFPMath &&
20542 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
20543 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
20545 std::swap(LHS, RHS);
20547 Opcode = X86ISD::FMAX;
20550 // Converting this to a max would handle both negative zeros and NaNs
20551 // incorrectly, but we can swap the operands to fix both.
20552 std::swap(LHS, RHS);
20556 Opcode = X86ISD::FMAX;
20562 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
20565 EVT CondVT = Cond.getValueType();
20566 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
20567 CondVT.getVectorElementType() == MVT::i1) {
20568 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
20569 // lowering on KNL. In this case we convert it to
20570 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
20571 // The same situation for all 128 and 256-bit vectors of i8 and i16.
20572 // Since SKX these selects have a proper lowering.
20573 EVT OpVT = LHS.getValueType();
20574 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
20575 (OpVT.getVectorElementType() == MVT::i8 ||
20576 OpVT.getVectorElementType() == MVT::i16) &&
20577 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
20578 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
20579 DCI.AddToWorklist(Cond.getNode());
20580 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
20583 // If this is a select between two integer constants, try to do some
20585 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
20586 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
20587 // Don't do this for crazy integer types.
20588 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
20589 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
20590 // so that TrueC (the true value) is larger than FalseC.
20591 bool NeedsCondInvert = false;
20593 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
20594 // Efficiently invertible.
20595 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
20596 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
20597 isa<ConstantSDNode>(Cond.getOperand(1))))) {
20598 NeedsCondInvert = true;
20599 std::swap(TrueC, FalseC);
20602 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
20603 if (FalseC->getAPIntValue() == 0 &&
20604 TrueC->getAPIntValue().isPowerOf2()) {
20605 if (NeedsCondInvert) // Invert the condition if needed.
20606 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
20607 DAG.getConstant(1, Cond.getValueType()));
20609 // Zero extend the condition if needed.
20610 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
20612 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
20613 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
20614 DAG.getConstant(ShAmt, MVT::i8));
20617 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
20618 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
20619 if (NeedsCondInvert) // Invert the condition if needed.
20620 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
20621 DAG.getConstant(1, Cond.getValueType()));
20623 // Zero extend the condition if needed.
20624 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
20625 FalseC->getValueType(0), Cond);
20626 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
20627 SDValue(FalseC, 0));
20630 // Optimize cases that will turn into an LEA instruction. This requires
20631 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
20632 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
20633 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
20634 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
20636 bool isFastMultiplier = false;
20638 switch ((unsigned char)Diff) {
20640 case 1: // result = add base, cond
20641 case 2: // result = lea base( , cond*2)
20642 case 3: // result = lea base(cond, cond*2)
20643 case 4: // result = lea base( , cond*4)
20644 case 5: // result = lea base(cond, cond*4)
20645 case 8: // result = lea base( , cond*8)
20646 case 9: // result = lea base(cond, cond*8)
20647 isFastMultiplier = true;
20652 if (isFastMultiplier) {
20653 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
20654 if (NeedsCondInvert) // Invert the condition if needed.
20655 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
20656 DAG.getConstant(1, Cond.getValueType()));
20658 // Zero extend the condition if needed.
20659 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
20661 // Scale the condition by the difference.
20663 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
20664 DAG.getConstant(Diff, Cond.getValueType()));
20666 // Add the base if non-zero.
20667 if (FalseC->getAPIntValue() != 0)
20668 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
20669 SDValue(FalseC, 0));
20676 // Canonicalize max and min:
20677 // (x > y) ? x : y -> (x >= y) ? x : y
20678 // (x < y) ? x : y -> (x <= y) ? x : y
20679 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
20680 // the need for an extra compare
20681 // against zero. e.g.
20682 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
20684 // testl %edi, %edi
20686 // cmovgl %edi, %eax
20690 // cmovsl %eax, %edi
20691 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
20692 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
20693 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
20694 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
20699 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
20700 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
20701 Cond.getOperand(0), Cond.getOperand(1), NewCC);
20702 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
20707 // Early exit check
20708 if (!TLI.isTypeLegal(VT))
20711 // Match VSELECTs into subs with unsigned saturation.
20712 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
20713 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
20714 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
20715 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
20716 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
20718 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
20719 // left side invert the predicate to simplify logic below.
20721 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
20723 CC = ISD::getSetCCInverse(CC, true);
20724 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
20728 if (Other.getNode() && Other->getNumOperands() == 2 &&
20729 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
20730 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
20731 SDValue CondRHS = Cond->getOperand(1);
20733 // Look for a general sub with unsigned saturation first.
20734 // x >= y ? x-y : 0 --> subus x, y
20735 // x > y ? x-y : 0 --> subus x, y
20736 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
20737 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
20738 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
20740 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
20741 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
20742 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
20743 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
20744 // If the RHS is a constant we have to reverse the const
20745 // canonicalization.
20746 // x > C-1 ? x+-C : 0 --> subus x, C
20747 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
20748 CondRHSConst->getAPIntValue() ==
20749 (-OpRHSConst->getAPIntValue() - 1))
20750 return DAG.getNode(
20751 X86ISD::SUBUS, DL, VT, OpLHS,
20752 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
20754 // Another special case: If C was a sign bit, the sub has been
20755 // canonicalized into a xor.
20756 // FIXME: Would it be better to use computeKnownBits to determine
20757 // whether it's safe to decanonicalize the xor?
20758 // x s< 0 ? x^C : 0 --> subus x, C
20759 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
20760 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
20761 OpRHSConst->getAPIntValue().isSignBit())
20762 // Note that we have to rebuild the RHS constant here to ensure we
20763 // don't rely on particular values of undef lanes.
20764 return DAG.getNode(
20765 X86ISD::SUBUS, DL, VT, OpLHS,
20766 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
20771 // Try to match a min/max vector operation.
20772 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
20773 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
20774 unsigned Opc = ret.first;
20775 bool NeedSplit = ret.second;
20777 if (Opc && NeedSplit) {
20778 unsigned NumElems = VT.getVectorNumElements();
20779 // Extract the LHS vectors
20780 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
20781 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
20783 // Extract the RHS vectors
20784 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
20785 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
20787 // Create min/max for each subvector
20788 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
20789 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
20791 // Merge the result
20792 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
20794 return DAG.getNode(Opc, DL, VT, LHS, RHS);
20797 // Simplify vector selection if condition value type matches vselect
20799 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
20800 assert(Cond.getValueType().isVector() &&
20801 "vector select expects a vector selector!");
20803 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
20804 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
20806 // Try invert the condition if true value is not all 1s and false value
20808 if (!TValIsAllOnes && !FValIsAllZeros &&
20809 // Check if the selector will be produced by CMPP*/PCMP*
20810 Cond.getOpcode() == ISD::SETCC &&
20811 // Check if SETCC has already been promoted
20812 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
20813 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
20814 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
20816 if (TValIsAllZeros || FValIsAllOnes) {
20817 SDValue CC = Cond.getOperand(2);
20818 ISD::CondCode NewCC =
20819 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
20820 Cond.getOperand(0).getValueType().isInteger());
20821 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
20822 std::swap(LHS, RHS);
20823 TValIsAllOnes = FValIsAllOnes;
20824 FValIsAllZeros = TValIsAllZeros;
20828 if (TValIsAllOnes || FValIsAllZeros) {
20831 if (TValIsAllOnes && FValIsAllZeros)
20833 else if (TValIsAllOnes)
20834 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
20835 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
20836 else if (FValIsAllZeros)
20837 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
20838 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
20840 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
20844 // If we know that this node is legal then we know that it is going to be
20845 // matched by one of the SSE/AVX BLEND instructions. These instructions only
20846 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
20847 // to simplify previous instructions.
20848 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
20849 !DCI.isBeforeLegalize() &&
20850 // We explicitly check against SSE4.1, v8i16 and v16i16 because, although
20851 // vselect nodes may be marked as Custom, they might only be legal when
20852 // Cond is a build_vector of constants. This will be taken care in
20853 // a later condition.
20854 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) &&
20855 Subtarget->hasSSE41() && VT != MVT::v16i16 && VT != MVT::v8i16) &&
20856 // Don't optimize vector of constants. Those are handled by
20857 // the generic code and all the bits must be properly set for
20858 // the generic optimizer.
20859 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
20860 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
20862 // Don't optimize vector selects that map to mask-registers.
20866 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
20867 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
20869 APInt KnownZero, KnownOne;
20870 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
20871 DCI.isBeforeLegalizeOps());
20872 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
20873 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
20875 // If we changed the computation somewhere in the DAG, this change
20876 // will affect all users of Cond.
20877 // Make sure it is fine and update all the nodes so that we do not
20878 // use the generic VSELECT anymore. Otherwise, we may perform
20879 // wrong optimizations as we messed up with the actual expectation
20880 // for the vector boolean values.
20881 if (Cond != TLO.Old) {
20882 // Check all uses of that condition operand to check whether it will be
20883 // consumed by non-BLEND instructions, which may depend on all bits are
20885 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
20887 if (I->getOpcode() != ISD::VSELECT)
20888 // TODO: Add other opcodes eventually lowered into BLEND.
20891 // Update all the users of the condition, before committing the change,
20892 // so that the VSELECT optimizations that expect the correct vector
20893 // boolean value will not be triggered.
20894 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
20896 DAG.ReplaceAllUsesOfValueWith(
20898 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
20899 Cond, I->getOperand(1), I->getOperand(2)));
20900 DCI.CommitTargetLoweringOpt(TLO);
20903 // At this point, only Cond is changed. Change the condition
20904 // just for N to keep the opportunity to optimize all other
20905 // users their own way.
20906 DAG.ReplaceAllUsesOfValueWith(
20908 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
20909 TLO.New, N->getOperand(1), N->getOperand(2)));
20914 // We should generate an X86ISD::BLENDI from a vselect if its argument
20915 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
20916 // constants. This specific pattern gets generated when we split a
20917 // selector for a 512 bit vector in a machine without AVX512 (but with
20918 // 256-bit vectors), during legalization:
20920 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
20922 // Iff we find this pattern and the build_vectors are built from
20923 // constants, we translate the vselect into a shuffle_vector that we
20924 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
20925 if ((N->getOpcode() == ISD::VSELECT ||
20926 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
20927 !DCI.isBeforeLegalize()) {
20928 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
20929 if (Shuffle.getNode())
20936 // Check whether a boolean test is testing a boolean value generated by
20937 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
20940 // Simplify the following patterns:
20941 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
20942 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
20943 // to (Op EFLAGS Cond)
20945 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
20946 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
20947 // to (Op EFLAGS !Cond)
20949 // where Op could be BRCOND or CMOV.
20951 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
20952 // Quit if not CMP and SUB with its value result used.
20953 if (Cmp.getOpcode() != X86ISD::CMP &&
20954 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
20957 // Quit if not used as a boolean value.
20958 if (CC != X86::COND_E && CC != X86::COND_NE)
20961 // Check CMP operands. One of them should be 0 or 1 and the other should be
20962 // an SetCC or extended from it.
20963 SDValue Op1 = Cmp.getOperand(0);
20964 SDValue Op2 = Cmp.getOperand(1);
20967 const ConstantSDNode* C = nullptr;
20968 bool needOppositeCond = (CC == X86::COND_E);
20969 bool checkAgainstTrue = false; // Is it a comparison against 1?
20971 if ((C = dyn_cast<ConstantSDNode>(Op1)))
20973 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
20975 else // Quit if all operands are not constants.
20978 if (C->getZExtValue() == 1) {
20979 needOppositeCond = !needOppositeCond;
20980 checkAgainstTrue = true;
20981 } else if (C->getZExtValue() != 0)
20982 // Quit if the constant is neither 0 or 1.
20985 bool truncatedToBoolWithAnd = false;
20986 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
20987 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
20988 SetCC.getOpcode() == ISD::TRUNCATE ||
20989 SetCC.getOpcode() == ISD::AND) {
20990 if (SetCC.getOpcode() == ISD::AND) {
20992 ConstantSDNode *CS;
20993 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
20994 CS->getZExtValue() == 1)
20996 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
20997 CS->getZExtValue() == 1)
21001 SetCC = SetCC.getOperand(OpIdx);
21002 truncatedToBoolWithAnd = true;
21004 SetCC = SetCC.getOperand(0);
21007 switch (SetCC.getOpcode()) {
21008 case X86ISD::SETCC_CARRY:
21009 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
21010 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
21011 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
21012 // truncated to i1 using 'and'.
21013 if (checkAgainstTrue && !truncatedToBoolWithAnd)
21015 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
21016 "Invalid use of SETCC_CARRY!");
21018 case X86ISD::SETCC:
21019 // Set the condition code or opposite one if necessary.
21020 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
21021 if (needOppositeCond)
21022 CC = X86::GetOppositeBranchCondition(CC);
21023 return SetCC.getOperand(1);
21024 case X86ISD::CMOV: {
21025 // Check whether false/true value has canonical one, i.e. 0 or 1.
21026 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
21027 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
21028 // Quit if true value is not a constant.
21031 // Quit if false value is not a constant.
21033 SDValue Op = SetCC.getOperand(0);
21034 // Skip 'zext' or 'trunc' node.
21035 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
21036 Op.getOpcode() == ISD::TRUNCATE)
21037 Op = Op.getOperand(0);
21038 // A special case for rdrand/rdseed, where 0 is set if false cond is
21040 if ((Op.getOpcode() != X86ISD::RDRAND &&
21041 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
21044 // Quit if false value is not the constant 0 or 1.
21045 bool FValIsFalse = true;
21046 if (FVal && FVal->getZExtValue() != 0) {
21047 if (FVal->getZExtValue() != 1)
21049 // If FVal is 1, opposite cond is needed.
21050 needOppositeCond = !needOppositeCond;
21051 FValIsFalse = false;
21053 // Quit if TVal is not the constant opposite of FVal.
21054 if (FValIsFalse && TVal->getZExtValue() != 1)
21056 if (!FValIsFalse && TVal->getZExtValue() != 0)
21058 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
21059 if (needOppositeCond)
21060 CC = X86::GetOppositeBranchCondition(CC);
21061 return SetCC.getOperand(3);
21068 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
21069 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
21070 TargetLowering::DAGCombinerInfo &DCI,
21071 const X86Subtarget *Subtarget) {
21074 // If the flag operand isn't dead, don't touch this CMOV.
21075 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
21078 SDValue FalseOp = N->getOperand(0);
21079 SDValue TrueOp = N->getOperand(1);
21080 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
21081 SDValue Cond = N->getOperand(3);
21083 if (CC == X86::COND_E || CC == X86::COND_NE) {
21084 switch (Cond.getOpcode()) {
21088 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
21089 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
21090 return (CC == X86::COND_E) ? FalseOp : TrueOp;
21096 Flags = checkBoolTestSetCCCombine(Cond, CC);
21097 if (Flags.getNode() &&
21098 // Extra check as FCMOV only supports a subset of X86 cond.
21099 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
21100 SDValue Ops[] = { FalseOp, TrueOp,
21101 DAG.getConstant(CC, MVT::i8), Flags };
21102 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
21105 // If this is a select between two integer constants, try to do some
21106 // optimizations. Note that the operands are ordered the opposite of SELECT
21108 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
21109 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
21110 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
21111 // larger than FalseC (the false value).
21112 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
21113 CC = X86::GetOppositeBranchCondition(CC);
21114 std::swap(TrueC, FalseC);
21115 std::swap(TrueOp, FalseOp);
21118 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
21119 // This is efficient for any integer data type (including i8/i16) and
21121 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
21122 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
21123 DAG.getConstant(CC, MVT::i8), Cond);
21125 // Zero extend the condition if needed.
21126 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
21128 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
21129 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
21130 DAG.getConstant(ShAmt, MVT::i8));
21131 if (N->getNumValues() == 2) // Dead flag value?
21132 return DCI.CombineTo(N, Cond, SDValue());
21136 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
21137 // for any integer data type, including i8/i16.
21138 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
21139 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
21140 DAG.getConstant(CC, MVT::i8), Cond);
21142 // Zero extend the condition if needed.
21143 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
21144 FalseC->getValueType(0), Cond);
21145 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
21146 SDValue(FalseC, 0));
21148 if (N->getNumValues() == 2) // Dead flag value?
21149 return DCI.CombineTo(N, Cond, SDValue());
21153 // Optimize cases that will turn into an LEA instruction. This requires
21154 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
21155 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
21156 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
21157 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
21159 bool isFastMultiplier = false;
21161 switch ((unsigned char)Diff) {
21163 case 1: // result = add base, cond
21164 case 2: // result = lea base( , cond*2)
21165 case 3: // result = lea base(cond, cond*2)
21166 case 4: // result = lea base( , cond*4)
21167 case 5: // result = lea base(cond, cond*4)
21168 case 8: // result = lea base( , cond*8)
21169 case 9: // result = lea base(cond, cond*8)
21170 isFastMultiplier = true;
21175 if (isFastMultiplier) {
21176 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
21177 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
21178 DAG.getConstant(CC, MVT::i8), Cond);
21179 // Zero extend the condition if needed.
21180 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
21182 // Scale the condition by the difference.
21184 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
21185 DAG.getConstant(Diff, Cond.getValueType()));
21187 // Add the base if non-zero.
21188 if (FalseC->getAPIntValue() != 0)
21189 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
21190 SDValue(FalseC, 0));
21191 if (N->getNumValues() == 2) // Dead flag value?
21192 return DCI.CombineTo(N, Cond, SDValue());
21199 // Handle these cases:
21200 // (select (x != c), e, c) -> select (x != c), e, x),
21201 // (select (x == c), c, e) -> select (x == c), x, e)
21202 // where the c is an integer constant, and the "select" is the combination
21203 // of CMOV and CMP.
21205 // The rationale for this change is that the conditional-move from a constant
21206 // needs two instructions, however, conditional-move from a register needs
21207 // only one instruction.
21209 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
21210 // some instruction-combining opportunities. This opt needs to be
21211 // postponed as late as possible.
21213 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
21214 // the DCI.xxxx conditions are provided to postpone the optimization as
21215 // late as possible.
21217 ConstantSDNode *CmpAgainst = nullptr;
21218 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
21219 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
21220 !isa<ConstantSDNode>(Cond.getOperand(0))) {
21222 if (CC == X86::COND_NE &&
21223 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
21224 CC = X86::GetOppositeBranchCondition(CC);
21225 std::swap(TrueOp, FalseOp);
21228 if (CC == X86::COND_E &&
21229 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
21230 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
21231 DAG.getConstant(CC, MVT::i8), Cond };
21232 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
21240 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
21241 const X86Subtarget *Subtarget) {
21242 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
21244 default: return SDValue();
21245 // SSE/AVX/AVX2 blend intrinsics.
21246 case Intrinsic::x86_avx2_pblendvb:
21247 case Intrinsic::x86_avx2_pblendw:
21248 case Intrinsic::x86_avx2_pblendd_128:
21249 case Intrinsic::x86_avx2_pblendd_256:
21250 // Don't try to simplify this intrinsic if we don't have AVX2.
21251 if (!Subtarget->hasAVX2())
21254 case Intrinsic::x86_avx_blend_pd_256:
21255 case Intrinsic::x86_avx_blend_ps_256:
21256 case Intrinsic::x86_avx_blendv_pd_256:
21257 case Intrinsic::x86_avx_blendv_ps_256:
21258 // Don't try to simplify this intrinsic if we don't have AVX.
21259 if (!Subtarget->hasAVX())
21262 case Intrinsic::x86_sse41_pblendw:
21263 case Intrinsic::x86_sse41_blendpd:
21264 case Intrinsic::x86_sse41_blendps:
21265 case Intrinsic::x86_sse41_blendvps:
21266 case Intrinsic::x86_sse41_blendvpd:
21267 case Intrinsic::x86_sse41_pblendvb: {
21268 SDValue Op0 = N->getOperand(1);
21269 SDValue Op1 = N->getOperand(2);
21270 SDValue Mask = N->getOperand(3);
21272 // Don't try to simplify this intrinsic if we don't have SSE4.1.
21273 if (!Subtarget->hasSSE41())
21276 // fold (blend A, A, Mask) -> A
21279 // fold (blend A, B, allZeros) -> A
21280 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
21282 // fold (blend A, B, allOnes) -> B
21283 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
21286 // Simplify the case where the mask is a constant i32 value.
21287 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
21288 if (C->isNullValue())
21290 if (C->isAllOnesValue())
21297 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
21298 case Intrinsic::x86_sse2_psrai_w:
21299 case Intrinsic::x86_sse2_psrai_d:
21300 case Intrinsic::x86_avx2_psrai_w:
21301 case Intrinsic::x86_avx2_psrai_d:
21302 case Intrinsic::x86_sse2_psra_w:
21303 case Intrinsic::x86_sse2_psra_d:
21304 case Intrinsic::x86_avx2_psra_w:
21305 case Intrinsic::x86_avx2_psra_d: {
21306 SDValue Op0 = N->getOperand(1);
21307 SDValue Op1 = N->getOperand(2);
21308 EVT VT = Op0.getValueType();
21309 assert(VT.isVector() && "Expected a vector type!");
21311 if (isa<BuildVectorSDNode>(Op1))
21312 Op1 = Op1.getOperand(0);
21314 if (!isa<ConstantSDNode>(Op1))
21317 EVT SVT = VT.getVectorElementType();
21318 unsigned SVTBits = SVT.getSizeInBits();
21320 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
21321 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
21322 uint64_t ShAmt = C.getZExtValue();
21324 // Don't try to convert this shift into a ISD::SRA if the shift
21325 // count is bigger than or equal to the element size.
21326 if (ShAmt >= SVTBits)
21329 // Trivial case: if the shift count is zero, then fold this
21330 // into the first operand.
21334 // Replace this packed shift intrinsic with a target independent
21336 SDValue Splat = DAG.getConstant(C, VT);
21337 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
21342 /// PerformMulCombine - Optimize a single multiply with constant into two
21343 /// in order to implement it with two cheaper instructions, e.g.
21344 /// LEA + SHL, LEA + LEA.
21345 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
21346 TargetLowering::DAGCombinerInfo &DCI) {
21347 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
21350 EVT VT = N->getValueType(0);
21351 if (VT != MVT::i64 && VT != MVT::i32)
21354 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
21357 uint64_t MulAmt = C->getZExtValue();
21358 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
21361 uint64_t MulAmt1 = 0;
21362 uint64_t MulAmt2 = 0;
21363 if ((MulAmt % 9) == 0) {
21365 MulAmt2 = MulAmt / 9;
21366 } else if ((MulAmt % 5) == 0) {
21368 MulAmt2 = MulAmt / 5;
21369 } else if ((MulAmt % 3) == 0) {
21371 MulAmt2 = MulAmt / 3;
21374 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
21377 if (isPowerOf2_64(MulAmt2) &&
21378 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
21379 // If second multiplifer is pow2, issue it first. We want the multiply by
21380 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
21382 std::swap(MulAmt1, MulAmt2);
21385 if (isPowerOf2_64(MulAmt1))
21386 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
21387 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
21389 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
21390 DAG.getConstant(MulAmt1, VT));
21392 if (isPowerOf2_64(MulAmt2))
21393 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
21394 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
21396 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
21397 DAG.getConstant(MulAmt2, VT));
21399 // Do not add new nodes to DAG combiner worklist.
21400 DCI.CombineTo(N, NewMul, false);
21405 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
21406 SDValue N0 = N->getOperand(0);
21407 SDValue N1 = N->getOperand(1);
21408 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
21409 EVT VT = N0.getValueType();
21411 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
21412 // since the result of setcc_c is all zero's or all ones.
21413 if (VT.isInteger() && !VT.isVector() &&
21414 N1C && N0.getOpcode() == ISD::AND &&
21415 N0.getOperand(1).getOpcode() == ISD::Constant) {
21416 SDValue N00 = N0.getOperand(0);
21417 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
21418 ((N00.getOpcode() == ISD::ANY_EXTEND ||
21419 N00.getOpcode() == ISD::ZERO_EXTEND) &&
21420 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
21421 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
21422 APInt ShAmt = N1C->getAPIntValue();
21423 Mask = Mask.shl(ShAmt);
21425 return DAG.getNode(ISD::AND, SDLoc(N), VT,
21426 N00, DAG.getConstant(Mask, VT));
21430 // Hardware support for vector shifts is sparse which makes us scalarize the
21431 // vector operations in many cases. Also, on sandybridge ADD is faster than
21433 // (shl V, 1) -> add V,V
21434 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
21435 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
21436 assert(N0.getValueType().isVector() && "Invalid vector shift type");
21437 // We shift all of the values by one. In many cases we do not have
21438 // hardware support for this operation. This is better expressed as an ADD
21440 if (N1SplatC->getZExtValue() == 1)
21441 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
21447 /// \brief Returns a vector of 0s if the node in input is a vector logical
21448 /// shift by a constant amount which is known to be bigger than or equal
21449 /// to the vector element size in bits.
21450 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
21451 const X86Subtarget *Subtarget) {
21452 EVT VT = N->getValueType(0);
21454 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
21455 (!Subtarget->hasInt256() ||
21456 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
21459 SDValue Amt = N->getOperand(1);
21461 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
21462 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
21463 APInt ShiftAmt = AmtSplat->getAPIntValue();
21464 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
21466 // SSE2/AVX2 logical shifts always return a vector of 0s
21467 // if the shift amount is bigger than or equal to
21468 // the element size. The constant shift amount will be
21469 // encoded as a 8-bit immediate.
21470 if (ShiftAmt.trunc(8).uge(MaxAmount))
21471 return getZeroVector(VT, Subtarget, DAG, DL);
21477 /// PerformShiftCombine - Combine shifts.
21478 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
21479 TargetLowering::DAGCombinerInfo &DCI,
21480 const X86Subtarget *Subtarget) {
21481 if (N->getOpcode() == ISD::SHL) {
21482 SDValue V = PerformSHLCombine(N, DAG);
21483 if (V.getNode()) return V;
21486 if (N->getOpcode() != ISD::SRA) {
21487 // Try to fold this logical shift into a zero vector.
21488 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
21489 if (V.getNode()) return V;
21495 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
21496 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
21497 // and friends. Likewise for OR -> CMPNEQSS.
21498 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
21499 TargetLowering::DAGCombinerInfo &DCI,
21500 const X86Subtarget *Subtarget) {
21503 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
21504 // we're requiring SSE2 for both.
21505 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
21506 SDValue N0 = N->getOperand(0);
21507 SDValue N1 = N->getOperand(1);
21508 SDValue CMP0 = N0->getOperand(1);
21509 SDValue CMP1 = N1->getOperand(1);
21512 // The SETCCs should both refer to the same CMP.
21513 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
21516 SDValue CMP00 = CMP0->getOperand(0);
21517 SDValue CMP01 = CMP0->getOperand(1);
21518 EVT VT = CMP00.getValueType();
21520 if (VT == MVT::f32 || VT == MVT::f64) {
21521 bool ExpectingFlags = false;
21522 // Check for any users that want flags:
21523 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
21524 !ExpectingFlags && UI != UE; ++UI)
21525 switch (UI->getOpcode()) {
21530 ExpectingFlags = true;
21532 case ISD::CopyToReg:
21533 case ISD::SIGN_EXTEND:
21534 case ISD::ZERO_EXTEND:
21535 case ISD::ANY_EXTEND:
21539 if (!ExpectingFlags) {
21540 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
21541 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
21543 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
21544 X86::CondCode tmp = cc0;
21549 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
21550 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
21551 // FIXME: need symbolic constants for these magic numbers.
21552 // See X86ATTInstPrinter.cpp:printSSECC().
21553 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
21554 if (Subtarget->hasAVX512()) {
21555 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
21556 CMP01, DAG.getConstant(x86cc, MVT::i8));
21557 if (N->getValueType(0) != MVT::i1)
21558 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
21562 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
21563 CMP00.getValueType(), CMP00, CMP01,
21564 DAG.getConstant(x86cc, MVT::i8));
21566 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
21567 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
21569 if (is64BitFP && !Subtarget->is64Bit()) {
21570 // On a 32-bit target, we cannot bitcast the 64-bit float to a
21571 // 64-bit integer, since that's not a legal type. Since
21572 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
21573 // bits, but can do this little dance to extract the lowest 32 bits
21574 // and work with those going forward.
21575 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
21577 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
21579 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
21580 Vector32, DAG.getIntPtrConstant(0));
21584 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
21585 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
21586 DAG.getConstant(1, IntVT));
21587 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
21588 return OneBitOfTruth;
21596 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
21597 /// so it can be folded inside ANDNP.
21598 static bool CanFoldXORWithAllOnes(const SDNode *N) {
21599 EVT VT = N->getValueType(0);
21601 // Match direct AllOnes for 128 and 256-bit vectors
21602 if (ISD::isBuildVectorAllOnes(N))
21605 // Look through a bit convert.
21606 if (N->getOpcode() == ISD::BITCAST)
21607 N = N->getOperand(0).getNode();
21609 // Sometimes the operand may come from a insert_subvector building a 256-bit
21611 if (VT.is256BitVector() &&
21612 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
21613 SDValue V1 = N->getOperand(0);
21614 SDValue V2 = N->getOperand(1);
21616 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
21617 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
21618 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
21619 ISD::isBuildVectorAllOnes(V2.getNode()))
21626 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
21627 // register. In most cases we actually compare or select YMM-sized registers
21628 // and mixing the two types creates horrible code. This method optimizes
21629 // some of the transition sequences.
21630 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
21631 TargetLowering::DAGCombinerInfo &DCI,
21632 const X86Subtarget *Subtarget) {
21633 EVT VT = N->getValueType(0);
21634 if (!VT.is256BitVector())
21637 assert((N->getOpcode() == ISD::ANY_EXTEND ||
21638 N->getOpcode() == ISD::ZERO_EXTEND ||
21639 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
21641 SDValue Narrow = N->getOperand(0);
21642 EVT NarrowVT = Narrow->getValueType(0);
21643 if (!NarrowVT.is128BitVector())
21646 if (Narrow->getOpcode() != ISD::XOR &&
21647 Narrow->getOpcode() != ISD::AND &&
21648 Narrow->getOpcode() != ISD::OR)
21651 SDValue N0 = Narrow->getOperand(0);
21652 SDValue N1 = Narrow->getOperand(1);
21655 // The Left side has to be a trunc.
21656 if (N0.getOpcode() != ISD::TRUNCATE)
21659 // The type of the truncated inputs.
21660 EVT WideVT = N0->getOperand(0)->getValueType(0);
21664 // The right side has to be a 'trunc' or a constant vector.
21665 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
21666 ConstantSDNode *RHSConstSplat = nullptr;
21667 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
21668 RHSConstSplat = RHSBV->getConstantSplatNode();
21669 if (!RHSTrunc && !RHSConstSplat)
21672 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21674 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
21677 // Set N0 and N1 to hold the inputs to the new wide operation.
21678 N0 = N0->getOperand(0);
21679 if (RHSConstSplat) {
21680 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
21681 SDValue(RHSConstSplat, 0));
21682 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
21683 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
21684 } else if (RHSTrunc) {
21685 N1 = N1->getOperand(0);
21688 // Generate the wide operation.
21689 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
21690 unsigned Opcode = N->getOpcode();
21692 case ISD::ANY_EXTEND:
21694 case ISD::ZERO_EXTEND: {
21695 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
21696 APInt Mask = APInt::getAllOnesValue(InBits);
21697 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
21698 return DAG.getNode(ISD::AND, DL, VT,
21699 Op, DAG.getConstant(Mask, VT));
21701 case ISD::SIGN_EXTEND:
21702 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
21703 Op, DAG.getValueType(NarrowVT));
21705 llvm_unreachable("Unexpected opcode");
21709 static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
21710 TargetLowering::DAGCombinerInfo &DCI,
21711 const X86Subtarget *Subtarget) {
21712 SDValue N0 = N->getOperand(0);
21713 SDValue N1 = N->getOperand(1);
21716 // A vector zext_in_reg may be represented as a shuffle,
21717 // feeding into a bitcast (this represents anyext) feeding into
21718 // an and with a mask.
21719 // We'd like to try to combine that into a shuffle with zero
21720 // plus a bitcast, removing the and.
21721 if (N0.getOpcode() != ISD::BITCAST ||
21722 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
21725 // The other side of the AND should be a splat of 2^C, where C
21726 // is the number of bits in the source type.
21727 if (N1.getOpcode() == ISD::BITCAST)
21728 N1 = N1.getOperand(0);
21729 if (N1.getOpcode() != ISD::BUILD_VECTOR)
21731 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
21733 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
21734 EVT SrcType = Shuffle->getValueType(0);
21736 // We expect a single-source shuffle
21737 if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
21740 unsigned SrcSize = SrcType.getScalarSizeInBits();
21742 APInt SplatValue, SplatUndef;
21743 unsigned SplatBitSize;
21745 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
21746 SplatBitSize, HasAnyUndefs))
21749 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
21750 // Make sure the splat matches the mask we expect
21751 if (SplatBitSize > ResSize ||
21752 (SplatValue + 1).exactLogBase2() != (int)SrcSize)
21755 // Make sure the input and output size make sense
21756 if (SrcSize >= ResSize || ResSize % SrcSize)
21759 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
21760 // The number of u's between each two values depends on the ratio between
21761 // the source and dest type.
21762 unsigned ZextRatio = ResSize / SrcSize;
21763 bool IsZext = true;
21764 for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
21765 if (i % ZextRatio) {
21766 if (Shuffle->getMaskElt(i) > 0) {
21772 if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
21773 // Expected element number
21783 // Ok, perform the transformation - replace the shuffle with
21784 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
21785 // (instead of undef) where the k elements come from the zero vector.
21786 SmallVector<int, 8> Mask;
21787 unsigned NumElems = SrcType.getVectorNumElements();
21788 for (unsigned i = 0; i < NumElems; ++i)
21790 Mask.push_back(NumElems);
21792 Mask.push_back(i / ZextRatio);
21794 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
21795 Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
21796 return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
21799 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
21800 TargetLowering::DAGCombinerInfo &DCI,
21801 const X86Subtarget *Subtarget) {
21802 if (DCI.isBeforeLegalizeOps())
21805 SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget);
21806 if (Zext.getNode())
21809 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
21813 EVT VT = N->getValueType(0);
21814 SDValue N0 = N->getOperand(0);
21815 SDValue N1 = N->getOperand(1);
21818 // Create BEXTR instructions
21819 // BEXTR is ((X >> imm) & (2**size-1))
21820 if (VT == MVT::i32 || VT == MVT::i64) {
21821 // Check for BEXTR.
21822 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
21823 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
21824 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
21825 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
21826 if (MaskNode && ShiftNode) {
21827 uint64_t Mask = MaskNode->getZExtValue();
21828 uint64_t Shift = ShiftNode->getZExtValue();
21829 if (isMask_64(Mask)) {
21830 uint64_t MaskSize = countPopulation(Mask);
21831 if (Shift + MaskSize <= VT.getSizeInBits())
21832 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
21833 DAG.getConstant(Shift | (MaskSize << 8), VT));
21841 // Want to form ANDNP nodes:
21842 // 1) In the hopes of then easily combining them with OR and AND nodes
21843 // to form PBLEND/PSIGN.
21844 // 2) To match ANDN packed intrinsics
21845 if (VT != MVT::v2i64 && VT != MVT::v4i64)
21848 // Check LHS for vnot
21849 if (N0.getOpcode() == ISD::XOR &&
21850 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
21851 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
21852 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
21854 // Check RHS for vnot
21855 if (N1.getOpcode() == ISD::XOR &&
21856 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
21857 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
21858 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
21863 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
21864 TargetLowering::DAGCombinerInfo &DCI,
21865 const X86Subtarget *Subtarget) {
21866 if (DCI.isBeforeLegalizeOps())
21869 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
21873 SDValue N0 = N->getOperand(0);
21874 SDValue N1 = N->getOperand(1);
21875 EVT VT = N->getValueType(0);
21877 // look for psign/blend
21878 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
21879 if (!Subtarget->hasSSSE3() ||
21880 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
21883 // Canonicalize pandn to RHS
21884 if (N0.getOpcode() == X86ISD::ANDNP)
21886 // or (and (m, y), (pandn m, x))
21887 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
21888 SDValue Mask = N1.getOperand(0);
21889 SDValue X = N1.getOperand(1);
21891 if (N0.getOperand(0) == Mask)
21892 Y = N0.getOperand(1);
21893 if (N0.getOperand(1) == Mask)
21894 Y = N0.getOperand(0);
21896 // Check to see if the mask appeared in both the AND and ANDNP and
21900 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
21901 // Look through mask bitcast.
21902 if (Mask.getOpcode() == ISD::BITCAST)
21903 Mask = Mask.getOperand(0);
21904 if (X.getOpcode() == ISD::BITCAST)
21905 X = X.getOperand(0);
21906 if (Y.getOpcode() == ISD::BITCAST)
21907 Y = Y.getOperand(0);
21909 EVT MaskVT = Mask.getValueType();
21911 // Validate that the Mask operand is a vector sra node.
21912 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
21913 // there is no psrai.b
21914 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
21915 unsigned SraAmt = ~0;
21916 if (Mask.getOpcode() == ISD::SRA) {
21917 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
21918 if (auto *AmtConst = AmtBV->getConstantSplatNode())
21919 SraAmt = AmtConst->getZExtValue();
21920 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
21921 SDValue SraC = Mask.getOperand(1);
21922 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
21924 if ((SraAmt + 1) != EltBits)
21929 // Now we know we at least have a plendvb with the mask val. See if
21930 // we can form a psignb/w/d.
21931 // psign = x.type == y.type == mask.type && y = sub(0, x);
21932 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
21933 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
21934 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
21935 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
21936 "Unsupported VT for PSIGN");
21937 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
21938 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
21940 // PBLENDVB only available on SSE 4.1
21941 if (!Subtarget->hasSSE41())
21944 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
21946 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
21947 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
21948 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
21949 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
21950 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
21954 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
21957 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
21958 MachineFunction &MF = DAG.getMachineFunction();
21960 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
21962 // SHLD/SHRD instructions have lower register pressure, but on some
21963 // platforms they have higher latency than the equivalent
21964 // series of shifts/or that would otherwise be generated.
21965 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
21966 // have higher latencies and we are not optimizing for size.
21967 if (!OptForSize && Subtarget->isSHLDSlow())
21970 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
21972 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
21974 if (!N0.hasOneUse() || !N1.hasOneUse())
21977 SDValue ShAmt0 = N0.getOperand(1);
21978 if (ShAmt0.getValueType() != MVT::i8)
21980 SDValue ShAmt1 = N1.getOperand(1);
21981 if (ShAmt1.getValueType() != MVT::i8)
21983 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
21984 ShAmt0 = ShAmt0.getOperand(0);
21985 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
21986 ShAmt1 = ShAmt1.getOperand(0);
21989 unsigned Opc = X86ISD::SHLD;
21990 SDValue Op0 = N0.getOperand(0);
21991 SDValue Op1 = N1.getOperand(0);
21992 if (ShAmt0.getOpcode() == ISD::SUB) {
21993 Opc = X86ISD::SHRD;
21994 std::swap(Op0, Op1);
21995 std::swap(ShAmt0, ShAmt1);
21998 unsigned Bits = VT.getSizeInBits();
21999 if (ShAmt1.getOpcode() == ISD::SUB) {
22000 SDValue Sum = ShAmt1.getOperand(0);
22001 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
22002 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
22003 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
22004 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
22005 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
22006 return DAG.getNode(Opc, DL, VT,
22008 DAG.getNode(ISD::TRUNCATE, DL,
22011 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
22012 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
22014 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
22015 return DAG.getNode(Opc, DL, VT,
22016 N0.getOperand(0), N1.getOperand(0),
22017 DAG.getNode(ISD::TRUNCATE, DL,
22024 // Generate NEG and CMOV for integer abs.
22025 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
22026 EVT VT = N->getValueType(0);
22028 // Since X86 does not have CMOV for 8-bit integer, we don't convert
22029 // 8-bit integer abs to NEG and CMOV.
22030 if (VT.isInteger() && VT.getSizeInBits() == 8)
22033 SDValue N0 = N->getOperand(0);
22034 SDValue N1 = N->getOperand(1);
22037 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
22038 // and change it to SUB and CMOV.
22039 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
22040 N0.getOpcode() == ISD::ADD &&
22041 N0.getOperand(1) == N1 &&
22042 N1.getOpcode() == ISD::SRA &&
22043 N1.getOperand(0) == N0.getOperand(0))
22044 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
22045 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
22046 // Generate SUB & CMOV.
22047 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
22048 DAG.getConstant(0, VT), N0.getOperand(0));
22050 SDValue Ops[] = { N0.getOperand(0), Neg,
22051 DAG.getConstant(X86::COND_GE, MVT::i8),
22052 SDValue(Neg.getNode(), 1) };
22053 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
22058 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
22059 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
22060 TargetLowering::DAGCombinerInfo &DCI,
22061 const X86Subtarget *Subtarget) {
22062 if (DCI.isBeforeLegalizeOps())
22065 if (Subtarget->hasCMov()) {
22066 SDValue RV = performIntegerAbsCombine(N, DAG);
22074 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
22075 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
22076 TargetLowering::DAGCombinerInfo &DCI,
22077 const X86Subtarget *Subtarget) {
22078 LoadSDNode *Ld = cast<LoadSDNode>(N);
22079 EVT RegVT = Ld->getValueType(0);
22080 EVT MemVT = Ld->getMemoryVT();
22082 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22084 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
22085 // into two 16-byte operations.
22086 ISD::LoadExtType Ext = Ld->getExtensionType();
22087 unsigned Alignment = Ld->getAlignment();
22088 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
22089 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
22090 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
22091 unsigned NumElems = RegVT.getVectorNumElements();
22095 SDValue Ptr = Ld->getBasePtr();
22096 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
22098 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
22100 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
22101 Ld->getPointerInfo(), Ld->isVolatile(),
22102 Ld->isNonTemporal(), Ld->isInvariant(),
22104 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
22105 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
22106 Ld->getPointerInfo(), Ld->isVolatile(),
22107 Ld->isNonTemporal(), Ld->isInvariant(),
22108 std::min(16U, Alignment));
22109 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22111 Load2.getValue(1));
22113 SDValue NewVec = DAG.getUNDEF(RegVT);
22114 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
22115 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
22116 return DCI.CombineTo(N, NewVec, TF, true);
22122 /// PerformMLOADCombine - Resolve extending loads
22123 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
22124 TargetLowering::DAGCombinerInfo &DCI,
22125 const X86Subtarget *Subtarget) {
22126 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
22127 if (Mld->getExtensionType() != ISD::SEXTLOAD)
22130 EVT VT = Mld->getValueType(0);
22131 unsigned NumElems = VT.getVectorNumElements();
22132 EVT LdVT = Mld->getMemoryVT();
22135 assert(LdVT != VT && "Cannot extend to the same type");
22136 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
22137 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
22138 // From, To sizes and ElemCount must be pow of two
22139 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
22140 "Unexpected size for extending masked load");
22142 unsigned SizeRatio = ToSz / FromSz;
22143 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
22145 // Create a type on which we perform the shuffle
22146 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
22147 LdVT.getScalarType(), NumElems*SizeRatio);
22148 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
22150 // Convert Src0 value
22151 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
22152 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
22153 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
22154 for (unsigned i = 0; i != NumElems; ++i)
22155 ShuffleVec[i] = i * SizeRatio;
22157 // Can't shuffle using an illegal type.
22158 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
22159 && "WideVecVT should be legal");
22160 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
22161 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
22163 // Prepare the new mask
22165 SDValue Mask = Mld->getMask();
22166 if (Mask.getValueType() == VT) {
22167 // Mask and original value have the same type
22168 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
22169 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
22170 for (unsigned i = 0; i != NumElems; ++i)
22171 ShuffleVec[i] = i * SizeRatio;
22172 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
22173 ShuffleVec[i] = NumElems*SizeRatio;
22174 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
22175 DAG.getConstant(0, WideVecVT),
22179 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
22180 unsigned WidenNumElts = NumElems*SizeRatio;
22181 unsigned MaskNumElts = VT.getVectorNumElements();
22182 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
22185 unsigned NumConcat = WidenNumElts / MaskNumElts;
22186 SmallVector<SDValue, 16> Ops(NumConcat);
22187 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
22189 for (unsigned i = 1; i != NumConcat; ++i)
22192 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
22195 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
22196 Mld->getBasePtr(), NewMask, WideSrc0,
22197 Mld->getMemoryVT(), Mld->getMemOperand(),
22199 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
22200 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
22203 /// PerformMSTORECombine - Resolve truncating stores
22204 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
22205 const X86Subtarget *Subtarget) {
22206 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
22207 if (!Mst->isTruncatingStore())
22210 EVT VT = Mst->getValue().getValueType();
22211 unsigned NumElems = VT.getVectorNumElements();
22212 EVT StVT = Mst->getMemoryVT();
22215 assert(StVT != VT && "Cannot truncate to the same type");
22216 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
22217 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
22219 // From, To sizes and ElemCount must be pow of two
22220 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
22221 "Unexpected size for truncating masked store");
22222 // We are going to use the original vector elt for storing.
22223 // Accumulated smaller vector elements must be a multiple of the store size.
22224 assert (((NumElems * FromSz) % ToSz) == 0 &&
22225 "Unexpected ratio for truncating masked store");
22227 unsigned SizeRatio = FromSz / ToSz;
22228 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
22230 // Create a type on which we perform the shuffle
22231 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
22232 StVT.getScalarType(), NumElems*SizeRatio);
22234 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
22236 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
22237 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
22238 for (unsigned i = 0; i != NumElems; ++i)
22239 ShuffleVec[i] = i * SizeRatio;
22241 // Can't shuffle using an illegal type.
22242 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
22243 && "WideVecVT should be legal");
22245 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
22246 DAG.getUNDEF(WideVecVT),
22250 SDValue Mask = Mst->getMask();
22251 if (Mask.getValueType() == VT) {
22252 // Mask and original value have the same type
22253 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
22254 for (unsigned i = 0; i != NumElems; ++i)
22255 ShuffleVec[i] = i * SizeRatio;
22256 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
22257 ShuffleVec[i] = NumElems*SizeRatio;
22258 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
22259 DAG.getConstant(0, WideVecVT),
22263 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
22264 unsigned WidenNumElts = NumElems*SizeRatio;
22265 unsigned MaskNumElts = VT.getVectorNumElements();
22266 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
22269 unsigned NumConcat = WidenNumElts / MaskNumElts;
22270 SmallVector<SDValue, 16> Ops(NumConcat);
22271 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
22273 for (unsigned i = 1; i != NumConcat; ++i)
22276 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
22279 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
22280 NewMask, StVT, Mst->getMemOperand(), false);
22282 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
22283 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
22284 const X86Subtarget *Subtarget) {
22285 StoreSDNode *St = cast<StoreSDNode>(N);
22286 EVT VT = St->getValue().getValueType();
22287 EVT StVT = St->getMemoryVT();
22289 SDValue StoredVal = St->getOperand(1);
22290 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22292 // If we are saving a concatenation of two XMM registers and 32-byte stores
22293 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
22294 unsigned Alignment = St->getAlignment();
22295 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
22296 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
22297 StVT == VT && !IsAligned) {
22298 unsigned NumElems = VT.getVectorNumElements();
22302 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
22303 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
22305 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
22306 SDValue Ptr0 = St->getBasePtr();
22307 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
22309 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
22310 St->getPointerInfo(), St->isVolatile(),
22311 St->isNonTemporal(), Alignment);
22312 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
22313 St->getPointerInfo(), St->isVolatile(),
22314 St->isNonTemporal(),
22315 std::min(16U, Alignment));
22316 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
22319 // Optimize trunc store (of multiple scalars) to shuffle and store.
22320 // First, pack all of the elements in one place. Next, store to memory
22321 // in fewer chunks.
22322 if (St->isTruncatingStore() && VT.isVector()) {
22323 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22324 unsigned NumElems = VT.getVectorNumElements();
22325 assert(StVT != VT && "Cannot truncate to the same type");
22326 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
22327 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
22329 // From, To sizes and ElemCount must be pow of two
22330 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
22331 // We are going to use the original vector elt for storing.
22332 // Accumulated smaller vector elements must be a multiple of the store size.
22333 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
22335 unsigned SizeRatio = FromSz / ToSz;
22337 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
22339 // Create a type on which we perform the shuffle
22340 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
22341 StVT.getScalarType(), NumElems*SizeRatio);
22343 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
22345 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
22346 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
22347 for (unsigned i = 0; i != NumElems; ++i)
22348 ShuffleVec[i] = i * SizeRatio;
22350 // Can't shuffle using an illegal type.
22351 if (!TLI.isTypeLegal(WideVecVT))
22354 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
22355 DAG.getUNDEF(WideVecVT),
22357 // At this point all of the data is stored at the bottom of the
22358 // register. We now need to save it to mem.
22360 // Find the largest store unit
22361 MVT StoreType = MVT::i8;
22362 for (MVT Tp : MVT::integer_valuetypes()) {
22363 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
22367 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
22368 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
22369 (64 <= NumElems * ToSz))
22370 StoreType = MVT::f64;
22372 // Bitcast the original vector into a vector of store-size units
22373 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
22374 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
22375 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
22376 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
22377 SmallVector<SDValue, 8> Chains;
22378 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
22379 TLI.getPointerTy());
22380 SDValue Ptr = St->getBasePtr();
22382 // Perform one or more big stores into memory.
22383 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
22384 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
22385 StoreType, ShuffWide,
22386 DAG.getIntPtrConstant(i));
22387 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
22388 St->getPointerInfo(), St->isVolatile(),
22389 St->isNonTemporal(), St->getAlignment());
22390 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
22391 Chains.push_back(Ch);
22394 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
22397 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
22398 // the FP state in cases where an emms may be missing.
22399 // A preferable solution to the general problem is to figure out the right
22400 // places to insert EMMS. This qualifies as a quick hack.
22402 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
22403 if (VT.getSizeInBits() != 64)
22406 const Function *F = DAG.getMachineFunction().getFunction();
22407 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
22408 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
22409 && Subtarget->hasSSE2();
22410 if ((VT.isVector() ||
22411 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
22412 isa<LoadSDNode>(St->getValue()) &&
22413 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
22414 St->getChain().hasOneUse() && !St->isVolatile()) {
22415 SDNode* LdVal = St->getValue().getNode();
22416 LoadSDNode *Ld = nullptr;
22417 int TokenFactorIndex = -1;
22418 SmallVector<SDValue, 8> Ops;
22419 SDNode* ChainVal = St->getChain().getNode();
22420 // Must be a store of a load. We currently handle two cases: the load
22421 // is a direct child, and it's under an intervening TokenFactor. It is
22422 // possible to dig deeper under nested TokenFactors.
22423 if (ChainVal == LdVal)
22424 Ld = cast<LoadSDNode>(St->getChain());
22425 else if (St->getValue().hasOneUse() &&
22426 ChainVal->getOpcode() == ISD::TokenFactor) {
22427 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
22428 if (ChainVal->getOperand(i).getNode() == LdVal) {
22429 TokenFactorIndex = i;
22430 Ld = cast<LoadSDNode>(St->getValue());
22432 Ops.push_back(ChainVal->getOperand(i));
22436 if (!Ld || !ISD::isNormalLoad(Ld))
22439 // If this is not the MMX case, i.e. we are just turning i64 load/store
22440 // into f64 load/store, avoid the transformation if there are multiple
22441 // uses of the loaded value.
22442 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
22447 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
22448 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
22450 if (Subtarget->is64Bit() || F64IsLegal) {
22451 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
22452 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
22453 Ld->getPointerInfo(), Ld->isVolatile(),
22454 Ld->isNonTemporal(), Ld->isInvariant(),
22455 Ld->getAlignment());
22456 SDValue NewChain = NewLd.getValue(1);
22457 if (TokenFactorIndex != -1) {
22458 Ops.push_back(NewChain);
22459 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
22461 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
22462 St->getPointerInfo(),
22463 St->isVolatile(), St->isNonTemporal(),
22464 St->getAlignment());
22467 // Otherwise, lower to two pairs of 32-bit loads / stores.
22468 SDValue LoAddr = Ld->getBasePtr();
22469 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
22470 DAG.getConstant(4, MVT::i32));
22472 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
22473 Ld->getPointerInfo(),
22474 Ld->isVolatile(), Ld->isNonTemporal(),
22475 Ld->isInvariant(), Ld->getAlignment());
22476 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
22477 Ld->getPointerInfo().getWithOffset(4),
22478 Ld->isVolatile(), Ld->isNonTemporal(),
22480 MinAlign(Ld->getAlignment(), 4));
22482 SDValue NewChain = LoLd.getValue(1);
22483 if (TokenFactorIndex != -1) {
22484 Ops.push_back(LoLd);
22485 Ops.push_back(HiLd);
22486 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
22489 LoAddr = St->getBasePtr();
22490 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
22491 DAG.getConstant(4, MVT::i32));
22493 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
22494 St->getPointerInfo(),
22495 St->isVolatile(), St->isNonTemporal(),
22496 St->getAlignment());
22497 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
22498 St->getPointerInfo().getWithOffset(4),
22500 St->isNonTemporal(),
22501 MinAlign(St->getAlignment(), 4));
22502 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
22507 /// Return 'true' if this vector operation is "horizontal"
22508 /// and return the operands for the horizontal operation in LHS and RHS. A
22509 /// horizontal operation performs the binary operation on successive elements
22510 /// of its first operand, then on successive elements of its second operand,
22511 /// returning the resulting values in a vector. For example, if
22512 /// A = < float a0, float a1, float a2, float a3 >
22514 /// B = < float b0, float b1, float b2, float b3 >
22515 /// then the result of doing a horizontal operation on A and B is
22516 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
22517 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
22518 /// A horizontal-op B, for some already available A and B, and if so then LHS is
22519 /// set to A, RHS to B, and the routine returns 'true'.
22520 /// Note that the binary operation should have the property that if one of the
22521 /// operands is UNDEF then the result is UNDEF.
22522 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
22523 // Look for the following pattern: if
22524 // A = < float a0, float a1, float a2, float a3 >
22525 // B = < float b0, float b1, float b2, float b3 >
22527 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
22528 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
22529 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
22530 // which is A horizontal-op B.
22532 // At least one of the operands should be a vector shuffle.
22533 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
22534 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
22537 MVT VT = LHS.getSimpleValueType();
22539 assert((VT.is128BitVector() || VT.is256BitVector()) &&
22540 "Unsupported vector type for horizontal add/sub");
22542 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
22543 // operate independently on 128-bit lanes.
22544 unsigned NumElts = VT.getVectorNumElements();
22545 unsigned NumLanes = VT.getSizeInBits()/128;
22546 unsigned NumLaneElts = NumElts / NumLanes;
22547 assert((NumLaneElts % 2 == 0) &&
22548 "Vector type should have an even number of elements in each lane");
22549 unsigned HalfLaneElts = NumLaneElts/2;
22551 // View LHS in the form
22552 // LHS = VECTOR_SHUFFLE A, B, LMask
22553 // If LHS is not a shuffle then pretend it is the shuffle
22554 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
22555 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
22558 SmallVector<int, 16> LMask(NumElts);
22559 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
22560 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
22561 A = LHS.getOperand(0);
22562 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
22563 B = LHS.getOperand(1);
22564 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
22565 std::copy(Mask.begin(), Mask.end(), LMask.begin());
22567 if (LHS.getOpcode() != ISD::UNDEF)
22569 for (unsigned i = 0; i != NumElts; ++i)
22573 // Likewise, view RHS in the form
22574 // RHS = VECTOR_SHUFFLE C, D, RMask
22576 SmallVector<int, 16> RMask(NumElts);
22577 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
22578 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
22579 C = RHS.getOperand(0);
22580 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
22581 D = RHS.getOperand(1);
22582 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
22583 std::copy(Mask.begin(), Mask.end(), RMask.begin());
22585 if (RHS.getOpcode() != ISD::UNDEF)
22587 for (unsigned i = 0; i != NumElts; ++i)
22591 // Check that the shuffles are both shuffling the same vectors.
22592 if (!(A == C && B == D) && !(A == D && B == C))
22595 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
22596 if (!A.getNode() && !B.getNode())
22599 // If A and B occur in reverse order in RHS, then "swap" them (which means
22600 // rewriting the mask).
22602 CommuteVectorShuffleMask(RMask, NumElts);
22604 // At this point LHS and RHS are equivalent to
22605 // LHS = VECTOR_SHUFFLE A, B, LMask
22606 // RHS = VECTOR_SHUFFLE A, B, RMask
22607 // Check that the masks correspond to performing a horizontal operation.
22608 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
22609 for (unsigned i = 0; i != NumLaneElts; ++i) {
22610 int LIdx = LMask[i+l], RIdx = RMask[i+l];
22612 // Ignore any UNDEF components.
22613 if (LIdx < 0 || RIdx < 0 ||
22614 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
22615 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
22618 // Check that successive elements are being operated on. If not, this is
22619 // not a horizontal operation.
22620 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
22621 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
22622 if (!(LIdx == Index && RIdx == Index + 1) &&
22623 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
22628 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
22629 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
22633 /// Do target-specific dag combines on floating point adds.
22634 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
22635 const X86Subtarget *Subtarget) {
22636 EVT VT = N->getValueType(0);
22637 SDValue LHS = N->getOperand(0);
22638 SDValue RHS = N->getOperand(1);
22640 // Try to synthesize horizontal adds from adds of shuffles.
22641 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
22642 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
22643 isHorizontalBinOp(LHS, RHS, true))
22644 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
22648 /// Do target-specific dag combines on floating point subs.
22649 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
22650 const X86Subtarget *Subtarget) {
22651 EVT VT = N->getValueType(0);
22652 SDValue LHS = N->getOperand(0);
22653 SDValue RHS = N->getOperand(1);
22655 // Try to synthesize horizontal subs from subs of shuffles.
22656 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
22657 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
22658 isHorizontalBinOp(LHS, RHS, false))
22659 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
22663 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
22664 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
22665 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
22667 // F[X]OR(0.0, x) -> x
22668 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
22669 if (C->getValueAPF().isPosZero())
22670 return N->getOperand(1);
22672 // F[X]OR(x, 0.0) -> x
22673 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
22674 if (C->getValueAPF().isPosZero())
22675 return N->getOperand(0);
22679 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
22680 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
22681 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
22683 // Only perform optimizations if UnsafeMath is used.
22684 if (!DAG.getTarget().Options.UnsafeFPMath)
22687 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
22688 // into FMINC and FMAXC, which are Commutative operations.
22689 unsigned NewOp = 0;
22690 switch (N->getOpcode()) {
22691 default: llvm_unreachable("unknown opcode");
22692 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
22693 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
22696 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
22697 N->getOperand(0), N->getOperand(1));
22700 /// Do target-specific dag combines on X86ISD::FAND nodes.
22701 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
22702 // FAND(0.0, x) -> 0.0
22703 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
22704 if (C->getValueAPF().isPosZero())
22705 return N->getOperand(0);
22707 // FAND(x, 0.0) -> 0.0
22708 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
22709 if (C->getValueAPF().isPosZero())
22710 return N->getOperand(1);
22715 /// Do target-specific dag combines on X86ISD::FANDN nodes
22716 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
22717 // FANDN(0.0, x) -> x
22718 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
22719 if (C->getValueAPF().isPosZero())
22720 return N->getOperand(1);
22722 // FANDN(x, 0.0) -> 0.0
22723 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
22724 if (C->getValueAPF().isPosZero())
22725 return N->getOperand(1);
22730 static SDValue PerformBTCombine(SDNode *N,
22732 TargetLowering::DAGCombinerInfo &DCI) {
22733 // BT ignores high bits in the bit index operand.
22734 SDValue Op1 = N->getOperand(1);
22735 if (Op1.hasOneUse()) {
22736 unsigned BitWidth = Op1.getValueSizeInBits();
22737 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
22738 APInt KnownZero, KnownOne;
22739 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
22740 !DCI.isBeforeLegalizeOps());
22741 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22742 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
22743 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
22744 DCI.CommitTargetLoweringOpt(TLO);
22749 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
22750 SDValue Op = N->getOperand(0);
22751 if (Op.getOpcode() == ISD::BITCAST)
22752 Op = Op.getOperand(0);
22753 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
22754 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
22755 VT.getVectorElementType().getSizeInBits() ==
22756 OpVT.getVectorElementType().getSizeInBits()) {
22757 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
22762 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
22763 const X86Subtarget *Subtarget) {
22764 EVT VT = N->getValueType(0);
22765 if (!VT.isVector())
22768 SDValue N0 = N->getOperand(0);
22769 SDValue N1 = N->getOperand(1);
22770 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
22773 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
22774 // both SSE and AVX2 since there is no sign-extended shift right
22775 // operation on a vector with 64-bit elements.
22776 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
22777 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
22778 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
22779 N0.getOpcode() == ISD::SIGN_EXTEND)) {
22780 SDValue N00 = N0.getOperand(0);
22782 // EXTLOAD has a better solution on AVX2,
22783 // it may be replaced with X86ISD::VSEXT node.
22784 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
22785 if (!ISD::isNormalLoad(N00.getNode()))
22788 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
22789 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
22791 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
22797 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
22798 TargetLowering::DAGCombinerInfo &DCI,
22799 const X86Subtarget *Subtarget) {
22800 SDValue N0 = N->getOperand(0);
22801 EVT VT = N->getValueType(0);
22803 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
22804 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
22805 // This exposes the sext to the sdivrem lowering, so that it directly extends
22806 // from AH (which we otherwise need to do contortions to access).
22807 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
22808 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
22810 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
22811 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
22812 N0.getOperand(0), N0.getOperand(1));
22813 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
22814 return R.getValue(1);
22817 if (!DCI.isBeforeLegalizeOps())
22820 if (!Subtarget->hasFp256())
22823 if (VT.isVector() && VT.getSizeInBits() == 256) {
22824 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
22832 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
22833 const X86Subtarget* Subtarget) {
22835 EVT VT = N->getValueType(0);
22837 // Let legalize expand this if it isn't a legal type yet.
22838 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
22841 EVT ScalarVT = VT.getScalarType();
22842 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
22843 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
22846 SDValue A = N->getOperand(0);
22847 SDValue B = N->getOperand(1);
22848 SDValue C = N->getOperand(2);
22850 bool NegA = (A.getOpcode() == ISD::FNEG);
22851 bool NegB = (B.getOpcode() == ISD::FNEG);
22852 bool NegC = (C.getOpcode() == ISD::FNEG);
22854 // Negative multiplication when NegA xor NegB
22855 bool NegMul = (NegA != NegB);
22857 A = A.getOperand(0);
22859 B = B.getOperand(0);
22861 C = C.getOperand(0);
22865 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
22867 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
22869 return DAG.getNode(Opcode, dl, VT, A, B, C);
22872 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
22873 TargetLowering::DAGCombinerInfo &DCI,
22874 const X86Subtarget *Subtarget) {
22875 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
22876 // (and (i32 x86isd::setcc_carry), 1)
22877 // This eliminates the zext. This transformation is necessary because
22878 // ISD::SETCC is always legalized to i8.
22880 SDValue N0 = N->getOperand(0);
22881 EVT VT = N->getValueType(0);
22883 if (N0.getOpcode() == ISD::AND &&
22885 N0.getOperand(0).hasOneUse()) {
22886 SDValue N00 = N0.getOperand(0);
22887 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
22888 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
22889 if (!C || C->getZExtValue() != 1)
22891 return DAG.getNode(ISD::AND, dl, VT,
22892 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
22893 N00.getOperand(0), N00.getOperand(1)),
22894 DAG.getConstant(1, VT));
22898 if (N0.getOpcode() == ISD::TRUNCATE &&
22900 N0.getOperand(0).hasOneUse()) {
22901 SDValue N00 = N0.getOperand(0);
22902 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
22903 return DAG.getNode(ISD::AND, dl, VT,
22904 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
22905 N00.getOperand(0), N00.getOperand(1)),
22906 DAG.getConstant(1, VT));
22909 if (VT.is256BitVector()) {
22910 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
22915 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
22916 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
22917 // This exposes the zext to the udivrem lowering, so that it directly extends
22918 // from AH (which we otherwise need to do contortions to access).
22919 if (N0.getOpcode() == ISD::UDIVREM &&
22920 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
22921 (VT == MVT::i32 || VT == MVT::i64)) {
22922 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
22923 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
22924 N0.getOperand(0), N0.getOperand(1));
22925 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
22926 return R.getValue(1);
22932 // Optimize x == -y --> x+y == 0
22933 // x != -y --> x+y != 0
22934 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
22935 const X86Subtarget* Subtarget) {
22936 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
22937 SDValue LHS = N->getOperand(0);
22938 SDValue RHS = N->getOperand(1);
22939 EVT VT = N->getValueType(0);
22942 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
22943 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
22944 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
22945 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
22946 LHS.getValueType(), RHS, LHS.getOperand(1));
22947 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
22948 addV, DAG.getConstant(0, addV.getValueType()), CC);
22950 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
22951 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
22952 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
22953 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
22954 RHS.getValueType(), LHS, RHS.getOperand(1));
22955 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
22956 addV, DAG.getConstant(0, addV.getValueType()), CC);
22959 if (VT.getScalarType() == MVT::i1) {
22960 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
22961 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
22962 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
22963 if (!IsSEXT0 && !IsVZero0)
22965 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
22966 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
22967 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
22969 if (!IsSEXT1 && !IsVZero1)
22972 if (IsSEXT0 && IsVZero1) {
22973 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
22974 if (CC == ISD::SETEQ)
22975 return DAG.getNOT(DL, LHS.getOperand(0), VT);
22976 return LHS.getOperand(0);
22978 if (IsSEXT1 && IsVZero0) {
22979 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
22980 if (CC == ISD::SETEQ)
22981 return DAG.getNOT(DL, RHS.getOperand(0), VT);
22982 return RHS.getOperand(0);
22989 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
22990 SelectionDAG &DAG) {
22992 MVT VT = Load->getSimpleValueType(0);
22993 MVT EVT = VT.getVectorElementType();
22994 SDValue Addr = Load->getOperand(1);
22995 SDValue NewAddr = DAG.getNode(
22996 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
22997 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
23000 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
23001 DAG.getMachineFunction().getMachineMemOperand(
23002 Load->getMemOperand(), 0, EVT.getStoreSize()));
23006 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
23007 const X86Subtarget *Subtarget) {
23009 MVT VT = N->getOperand(1)->getSimpleValueType(0);
23010 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
23011 "X86insertps is only defined for v4x32");
23013 SDValue Ld = N->getOperand(1);
23014 if (MayFoldLoad(Ld)) {
23015 // Extract the countS bits from the immediate so we can get the proper
23016 // address when narrowing the vector load to a specific element.
23017 // When the second source op is a memory address, interps doesn't use
23018 // countS and just gets an f32 from that address.
23019 unsigned DestIndex =
23020 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
23021 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
23025 // Create this as a scalar to vector to match the instruction pattern.
23026 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
23027 // countS bits are ignored when loading from memory on insertps, which
23028 // means we don't need to explicitly set them to 0.
23029 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
23030 LoadScalarToVector, N->getOperand(2));
23033 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
23034 // as "sbb reg,reg", since it can be extended without zext and produces
23035 // an all-ones bit which is more useful than 0/1 in some cases.
23036 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
23039 return DAG.getNode(ISD::AND, DL, VT,
23040 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
23041 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
23042 DAG.getConstant(1, VT));
23043 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
23044 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
23045 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
23046 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
23049 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
23050 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
23051 TargetLowering::DAGCombinerInfo &DCI,
23052 const X86Subtarget *Subtarget) {
23054 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
23055 SDValue EFLAGS = N->getOperand(1);
23057 if (CC == X86::COND_A) {
23058 // Try to convert COND_A into COND_B in an attempt to facilitate
23059 // materializing "setb reg".
23061 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
23062 // cannot take an immediate as its first operand.
23064 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
23065 EFLAGS.getValueType().isInteger() &&
23066 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
23067 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
23068 EFLAGS.getNode()->getVTList(),
23069 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
23070 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
23071 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
23075 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
23076 // a zext and produces an all-ones bit which is more useful than 0/1 in some
23078 if (CC == X86::COND_B)
23079 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
23083 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
23084 if (Flags.getNode()) {
23085 SDValue Cond = DAG.getConstant(CC, MVT::i8);
23086 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
23092 // Optimize branch condition evaluation.
23094 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
23095 TargetLowering::DAGCombinerInfo &DCI,
23096 const X86Subtarget *Subtarget) {
23098 SDValue Chain = N->getOperand(0);
23099 SDValue Dest = N->getOperand(1);
23100 SDValue EFLAGS = N->getOperand(3);
23101 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
23105 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
23106 if (Flags.getNode()) {
23107 SDValue Cond = DAG.getConstant(CC, MVT::i8);
23108 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
23115 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
23116 SelectionDAG &DAG) {
23117 // Take advantage of vector comparisons producing 0 or -1 in each lane to
23118 // optimize away operation when it's from a constant.
23120 // The general transformation is:
23121 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
23122 // AND(VECTOR_CMP(x,y), constant2)
23123 // constant2 = UNARYOP(constant)
23125 // Early exit if this isn't a vector operation, the operand of the
23126 // unary operation isn't a bitwise AND, or if the sizes of the operations
23127 // aren't the same.
23128 EVT VT = N->getValueType(0);
23129 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
23130 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
23131 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
23134 // Now check that the other operand of the AND is a constant. We could
23135 // make the transformation for non-constant splats as well, but it's unclear
23136 // that would be a benefit as it would not eliminate any operations, just
23137 // perform one more step in scalar code before moving to the vector unit.
23138 if (BuildVectorSDNode *BV =
23139 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
23140 // Bail out if the vector isn't a constant.
23141 if (!BV->isConstant())
23144 // Everything checks out. Build up the new and improved node.
23146 EVT IntVT = BV->getValueType(0);
23147 // Create a new constant of the appropriate type for the transformed
23149 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
23150 // The AND node needs bitcasts to/from an integer vector type around it.
23151 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
23152 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
23153 N->getOperand(0)->getOperand(0), MaskConst);
23154 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
23161 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
23162 const X86Subtarget *Subtarget) {
23163 // First try to optimize away the conversion entirely when it's
23164 // conditionally from a constant. Vectors only.
23165 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
23166 if (Res != SDValue())
23169 // Now move on to more general possibilities.
23170 SDValue Op0 = N->getOperand(0);
23171 EVT InVT = Op0->getValueType(0);
23173 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
23174 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
23176 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
23177 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
23178 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
23181 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
23182 // a 32-bit target where SSE doesn't support i64->FP operations.
23183 if (Op0.getOpcode() == ISD::LOAD) {
23184 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
23185 EVT VT = Ld->getValueType(0);
23186 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
23187 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
23188 !Subtarget->is64Bit() && VT == MVT::i64) {
23189 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
23190 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
23191 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
23198 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
23199 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
23200 X86TargetLowering::DAGCombinerInfo &DCI) {
23201 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
23202 // the result is either zero or one (depending on the input carry bit).
23203 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
23204 if (X86::isZeroNode(N->getOperand(0)) &&
23205 X86::isZeroNode(N->getOperand(1)) &&
23206 // We don't have a good way to replace an EFLAGS use, so only do this when
23208 SDValue(N, 1).use_empty()) {
23210 EVT VT = N->getValueType(0);
23211 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
23212 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
23213 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
23214 DAG.getConstant(X86::COND_B,MVT::i8),
23216 DAG.getConstant(1, VT));
23217 return DCI.CombineTo(N, Res1, CarryOut);
23223 // fold (add Y, (sete X, 0)) -> adc 0, Y
23224 // (add Y, (setne X, 0)) -> sbb -1, Y
23225 // (sub (sete X, 0), Y) -> sbb 0, Y
23226 // (sub (setne X, 0), Y) -> adc -1, Y
23227 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
23230 // Look through ZExts.
23231 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
23232 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
23235 SDValue SetCC = Ext.getOperand(0);
23236 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
23239 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
23240 if (CC != X86::COND_E && CC != X86::COND_NE)
23243 SDValue Cmp = SetCC.getOperand(1);
23244 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
23245 !X86::isZeroNode(Cmp.getOperand(1)) ||
23246 !Cmp.getOperand(0).getValueType().isInteger())
23249 SDValue CmpOp0 = Cmp.getOperand(0);
23250 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
23251 DAG.getConstant(1, CmpOp0.getValueType()));
23253 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
23254 if (CC == X86::COND_NE)
23255 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
23256 DL, OtherVal.getValueType(), OtherVal,
23257 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
23258 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
23259 DL, OtherVal.getValueType(), OtherVal,
23260 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
23263 /// PerformADDCombine - Do target-specific dag combines on integer adds.
23264 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
23265 const X86Subtarget *Subtarget) {
23266 EVT VT = N->getValueType(0);
23267 SDValue Op0 = N->getOperand(0);
23268 SDValue Op1 = N->getOperand(1);
23270 // Try to synthesize horizontal adds from adds of shuffles.
23271 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
23272 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
23273 isHorizontalBinOp(Op0, Op1, true))
23274 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
23276 return OptimizeConditionalInDecrement(N, DAG);
23279 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
23280 const X86Subtarget *Subtarget) {
23281 SDValue Op0 = N->getOperand(0);
23282 SDValue Op1 = N->getOperand(1);
23284 // X86 can't encode an immediate LHS of a sub. See if we can push the
23285 // negation into a preceding instruction.
23286 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
23287 // If the RHS of the sub is a XOR with one use and a constant, invert the
23288 // immediate. Then add one to the LHS of the sub so we can turn
23289 // X-Y -> X+~Y+1, saving one register.
23290 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
23291 isa<ConstantSDNode>(Op1.getOperand(1))) {
23292 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
23293 EVT VT = Op0.getValueType();
23294 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
23296 DAG.getConstant(~XorC, VT));
23297 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
23298 DAG.getConstant(C->getAPIntValue()+1, VT));
23302 // Try to synthesize horizontal adds from adds of shuffles.
23303 EVT VT = N->getValueType(0);
23304 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
23305 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
23306 isHorizontalBinOp(Op0, Op1, true))
23307 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
23309 return OptimizeConditionalInDecrement(N, DAG);
23312 /// performVZEXTCombine - Performs build vector combines
23313 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
23314 TargetLowering::DAGCombinerInfo &DCI,
23315 const X86Subtarget *Subtarget) {
23317 MVT VT = N->getSimpleValueType(0);
23318 SDValue Op = N->getOperand(0);
23319 MVT OpVT = Op.getSimpleValueType();
23320 MVT OpEltVT = OpVT.getVectorElementType();
23321 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
23323 // (vzext (bitcast (vzext (x)) -> (vzext x)
23325 while (V.getOpcode() == ISD::BITCAST)
23326 V = V.getOperand(0);
23328 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
23329 MVT InnerVT = V.getSimpleValueType();
23330 MVT InnerEltVT = InnerVT.getVectorElementType();
23332 // If the element sizes match exactly, we can just do one larger vzext. This
23333 // is always an exact type match as vzext operates on integer types.
23334 if (OpEltVT == InnerEltVT) {
23335 assert(OpVT == InnerVT && "Types must match for vzext!");
23336 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
23339 // The only other way we can combine them is if only a single element of the
23340 // inner vzext is used in the input to the outer vzext.
23341 if (InnerEltVT.getSizeInBits() < InputBits)
23344 // In this case, the inner vzext is completely dead because we're going to
23345 // only look at bits inside of the low element. Just do the outer vzext on
23346 // a bitcast of the input to the inner.
23347 return DAG.getNode(X86ISD::VZEXT, DL, VT,
23348 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
23351 // Check if we can bypass extracting and re-inserting an element of an input
23352 // vector. Essentialy:
23353 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
23354 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
23355 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
23356 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
23357 SDValue ExtractedV = V.getOperand(0);
23358 SDValue OrigV = ExtractedV.getOperand(0);
23359 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
23360 if (ExtractIdx->getZExtValue() == 0) {
23361 MVT OrigVT = OrigV.getSimpleValueType();
23362 // Extract a subvector if necessary...
23363 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
23364 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
23365 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
23366 OrigVT.getVectorNumElements() / Ratio);
23367 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
23368 DAG.getIntPtrConstant(0));
23370 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
23371 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
23378 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
23379 DAGCombinerInfo &DCI) const {
23380 SelectionDAG &DAG = DCI.DAG;
23381 switch (N->getOpcode()) {
23383 case ISD::EXTRACT_VECTOR_ELT:
23384 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
23387 case X86ISD::SHRUNKBLEND:
23388 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
23389 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
23390 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
23391 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
23392 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
23393 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
23394 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
23397 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
23398 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
23399 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
23400 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
23401 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
23402 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
23403 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
23404 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
23405 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
23406 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
23407 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
23409 case X86ISD::FOR: return PerformFORCombine(N, DAG);
23411 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
23412 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
23413 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
23414 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
23415 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
23416 case ISD::ANY_EXTEND:
23417 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
23418 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
23419 case ISD::SIGN_EXTEND_INREG:
23420 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
23421 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
23422 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
23423 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
23424 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
23425 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
23426 case X86ISD::SHUFP: // Handle all target specific shuffles
23427 case X86ISD::PALIGNR:
23428 case X86ISD::UNPCKH:
23429 case X86ISD::UNPCKL:
23430 case X86ISD::MOVHLPS:
23431 case X86ISD::MOVLHPS:
23432 case X86ISD::PSHUFB:
23433 case X86ISD::PSHUFD:
23434 case X86ISD::PSHUFHW:
23435 case X86ISD::PSHUFLW:
23436 case X86ISD::MOVSS:
23437 case X86ISD::MOVSD:
23438 case X86ISD::VPERMILPI:
23439 case X86ISD::VPERM2X128:
23440 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
23441 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
23442 case ISD::INTRINSIC_WO_CHAIN:
23443 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
23444 case X86ISD::INSERTPS: {
23445 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
23446 return PerformINSERTPSCombine(N, DAG, Subtarget);
23449 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
23455 /// isTypeDesirableForOp - Return true if the target has native support for
23456 /// the specified value type and it is 'desirable' to use the type for the
23457 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
23458 /// instruction encodings are longer and some i16 instructions are slow.
23459 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
23460 if (!isTypeLegal(VT))
23462 if (VT != MVT::i16)
23469 case ISD::SIGN_EXTEND:
23470 case ISD::ZERO_EXTEND:
23471 case ISD::ANY_EXTEND:
23484 /// IsDesirableToPromoteOp - This method query the target whether it is
23485 /// beneficial for dag combiner to promote the specified node. If true, it
23486 /// should return the desired promotion type by reference.
23487 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
23488 EVT VT = Op.getValueType();
23489 if (VT != MVT::i16)
23492 bool Promote = false;
23493 bool Commute = false;
23494 switch (Op.getOpcode()) {
23497 LoadSDNode *LD = cast<LoadSDNode>(Op);
23498 // If the non-extending load has a single use and it's not live out, then it
23499 // might be folded.
23500 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
23501 Op.hasOneUse()*/) {
23502 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
23503 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
23504 // The only case where we'd want to promote LOAD (rather then it being
23505 // promoted as an operand is when it's only use is liveout.
23506 if (UI->getOpcode() != ISD::CopyToReg)
23513 case ISD::SIGN_EXTEND:
23514 case ISD::ZERO_EXTEND:
23515 case ISD::ANY_EXTEND:
23520 SDValue N0 = Op.getOperand(0);
23521 // Look out for (store (shl (load), x)).
23522 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
23535 SDValue N0 = Op.getOperand(0);
23536 SDValue N1 = Op.getOperand(1);
23537 if (!Commute && MayFoldLoad(N1))
23539 // Avoid disabling potential load folding opportunities.
23540 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
23542 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
23552 //===----------------------------------------------------------------------===//
23553 // X86 Inline Assembly Support
23554 //===----------------------------------------------------------------------===//
23557 // Helper to match a string separated by whitespace.
23558 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
23559 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
23561 for (unsigned i = 0, e = args.size(); i != e; ++i) {
23562 StringRef piece(*args[i]);
23563 if (!s.startswith(piece)) // Check if the piece matches.
23566 s = s.substr(piece.size());
23567 StringRef::size_type pos = s.find_first_not_of(" \t");
23568 if (pos == 0) // We matched a prefix.
23576 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
23579 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
23581 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
23582 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
23583 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
23584 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
23586 if (AsmPieces.size() == 3)
23588 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
23595 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
23596 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
23598 std::string AsmStr = IA->getAsmString();
23600 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
23601 if (!Ty || Ty->getBitWidth() % 16 != 0)
23604 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
23605 SmallVector<StringRef, 4> AsmPieces;
23606 SplitString(AsmStr, AsmPieces, ";\n");
23608 switch (AsmPieces.size()) {
23609 default: return false;
23611 // FIXME: this should verify that we are targeting a 486 or better. If not,
23612 // we will turn this bswap into something that will be lowered to logical
23613 // ops instead of emitting the bswap asm. For now, we don't support 486 or
23614 // lower so don't worry about this.
23616 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
23617 matchAsm(AsmPieces[0], "bswapl", "$0") ||
23618 matchAsm(AsmPieces[0], "bswapq", "$0") ||
23619 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
23620 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
23621 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
23622 // No need to check constraints, nothing other than the equivalent of
23623 // "=r,0" would be valid here.
23624 return IntrinsicLowering::LowerToByteSwap(CI);
23627 // rorw $$8, ${0:w} --> llvm.bswap.i16
23628 if (CI->getType()->isIntegerTy(16) &&
23629 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
23630 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
23631 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
23633 const std::string &ConstraintsStr = IA->getConstraintString();
23634 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
23635 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
23636 if (clobbersFlagRegisters(AsmPieces))
23637 return IntrinsicLowering::LowerToByteSwap(CI);
23641 if (CI->getType()->isIntegerTy(32) &&
23642 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
23643 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
23644 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
23645 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
23647 const std::string &ConstraintsStr = IA->getConstraintString();
23648 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
23649 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
23650 if (clobbersFlagRegisters(AsmPieces))
23651 return IntrinsicLowering::LowerToByteSwap(CI);
23654 if (CI->getType()->isIntegerTy(64)) {
23655 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
23656 if (Constraints.size() >= 2 &&
23657 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
23658 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
23659 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
23660 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
23661 matchAsm(AsmPieces[1], "bswap", "%edx") &&
23662 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
23663 return IntrinsicLowering::LowerToByteSwap(CI);
23671 /// getConstraintType - Given a constraint letter, return the type of
23672 /// constraint it is for this target.
23673 X86TargetLowering::ConstraintType
23674 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
23675 if (Constraint.size() == 1) {
23676 switch (Constraint[0]) {
23687 return C_RegisterClass;
23711 return TargetLowering::getConstraintType(Constraint);
23714 /// Examine constraint type and operand type and determine a weight value.
23715 /// This object must already have been set up with the operand type
23716 /// and the current alternative constraint selected.
23717 TargetLowering::ConstraintWeight
23718 X86TargetLowering::getSingleConstraintMatchWeight(
23719 AsmOperandInfo &info, const char *constraint) const {
23720 ConstraintWeight weight = CW_Invalid;
23721 Value *CallOperandVal = info.CallOperandVal;
23722 // If we don't have a value, we can't do a match,
23723 // but allow it at the lowest weight.
23724 if (!CallOperandVal)
23726 Type *type = CallOperandVal->getType();
23727 // Look at the constraint type.
23728 switch (*constraint) {
23730 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
23741 if (CallOperandVal->getType()->isIntegerTy())
23742 weight = CW_SpecificReg;
23747 if (type->isFloatingPointTy())
23748 weight = CW_SpecificReg;
23751 if (type->isX86_MMXTy() && Subtarget->hasMMX())
23752 weight = CW_SpecificReg;
23756 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
23757 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
23758 weight = CW_Register;
23761 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
23762 if (C->getZExtValue() <= 31)
23763 weight = CW_Constant;
23767 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
23768 if (C->getZExtValue() <= 63)
23769 weight = CW_Constant;
23773 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
23774 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
23775 weight = CW_Constant;
23779 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
23780 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
23781 weight = CW_Constant;
23785 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
23786 if (C->getZExtValue() <= 3)
23787 weight = CW_Constant;
23791 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
23792 if (C->getZExtValue() <= 0xff)
23793 weight = CW_Constant;
23798 if (dyn_cast<ConstantFP>(CallOperandVal)) {
23799 weight = CW_Constant;
23803 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
23804 if ((C->getSExtValue() >= -0x80000000LL) &&
23805 (C->getSExtValue() <= 0x7fffffffLL))
23806 weight = CW_Constant;
23810 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
23811 if (C->getZExtValue() <= 0xffffffff)
23812 weight = CW_Constant;
23819 /// LowerXConstraint - try to replace an X constraint, which matches anything,
23820 /// with another that has more specific requirements based on the type of the
23821 /// corresponding operand.
23822 const char *X86TargetLowering::
23823 LowerXConstraint(EVT ConstraintVT) const {
23824 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
23825 // 'f' like normal targets.
23826 if (ConstraintVT.isFloatingPoint()) {
23827 if (Subtarget->hasSSE2())
23829 if (Subtarget->hasSSE1())
23833 return TargetLowering::LowerXConstraint(ConstraintVT);
23836 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
23837 /// vector. If it is invalid, don't add anything to Ops.
23838 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
23839 std::string &Constraint,
23840 std::vector<SDValue>&Ops,
23841 SelectionDAG &DAG) const {
23844 // Only support length 1 constraints for now.
23845 if (Constraint.length() > 1) return;
23847 char ConstraintLetter = Constraint[0];
23848 switch (ConstraintLetter) {
23851 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23852 if (C->getZExtValue() <= 31) {
23853 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
23859 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23860 if (C->getZExtValue() <= 63) {
23861 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
23867 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23868 if (isInt<8>(C->getSExtValue())) {
23869 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
23875 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23876 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
23877 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
23878 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
23884 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23885 if (C->getZExtValue() <= 3) {
23886 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
23892 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23893 if (C->getZExtValue() <= 255) {
23894 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
23900 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23901 if (C->getZExtValue() <= 127) {
23902 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
23908 // 32-bit signed value
23909 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23910 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
23911 C->getSExtValue())) {
23912 // Widen to 64 bits here to get it sign extended.
23913 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
23916 // FIXME gcc accepts some relocatable values here too, but only in certain
23917 // memory models; it's complicated.
23922 // 32-bit unsigned value
23923 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
23924 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
23925 C->getZExtValue())) {
23926 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
23930 // FIXME gcc accepts some relocatable values here too, but only in certain
23931 // memory models; it's complicated.
23935 // Literal immediates are always ok.
23936 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
23937 // Widen to 64 bits here to get it sign extended.
23938 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
23942 // In any sort of PIC mode addresses need to be computed at runtime by
23943 // adding in a register or some sort of table lookup. These can't
23944 // be used as immediates.
23945 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
23948 // If we are in non-pic codegen mode, we allow the address of a global (with
23949 // an optional displacement) to be used with 'i'.
23950 GlobalAddressSDNode *GA = nullptr;
23951 int64_t Offset = 0;
23953 // Match either (GA), (GA+C), (GA+C1+C2), etc.
23955 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
23956 Offset += GA->getOffset();
23958 } else if (Op.getOpcode() == ISD::ADD) {
23959 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
23960 Offset += C->getZExtValue();
23961 Op = Op.getOperand(0);
23964 } else if (Op.getOpcode() == ISD::SUB) {
23965 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
23966 Offset += -C->getZExtValue();
23967 Op = Op.getOperand(0);
23972 // Otherwise, this isn't something we can handle, reject it.
23976 const GlobalValue *GV = GA->getGlobal();
23977 // If we require an extra load to get this address, as in PIC mode, we
23978 // can't accept it.
23979 if (isGlobalStubReference(
23980 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
23983 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
23984 GA->getValueType(0), Offset);
23989 if (Result.getNode()) {
23990 Ops.push_back(Result);
23993 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
23996 std::pair<unsigned, const TargetRegisterClass*>
23997 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
23999 // First, see if this is a constraint that directly corresponds to an LLVM
24001 if (Constraint.size() == 1) {
24002 // GCC Constraint Letters
24003 switch (Constraint[0]) {
24005 // TODO: Slight differences here in allocation order and leaving
24006 // RIP in the class. Do they matter any more here than they do
24007 // in the normal allocation?
24008 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
24009 if (Subtarget->is64Bit()) {
24010 if (VT == MVT::i32 || VT == MVT::f32)
24011 return std::make_pair(0U, &X86::GR32RegClass);
24012 if (VT == MVT::i16)
24013 return std::make_pair(0U, &X86::GR16RegClass);
24014 if (VT == MVT::i8 || VT == MVT::i1)
24015 return std::make_pair(0U, &X86::GR8RegClass);
24016 if (VT == MVT::i64 || VT == MVT::f64)
24017 return std::make_pair(0U, &X86::GR64RegClass);
24020 // 32-bit fallthrough
24021 case 'Q': // Q_REGS
24022 if (VT == MVT::i32 || VT == MVT::f32)
24023 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
24024 if (VT == MVT::i16)
24025 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
24026 if (VT == MVT::i8 || VT == MVT::i1)
24027 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
24028 if (VT == MVT::i64)
24029 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
24031 case 'r': // GENERAL_REGS
24032 case 'l': // INDEX_REGS
24033 if (VT == MVT::i8 || VT == MVT::i1)
24034 return std::make_pair(0U, &X86::GR8RegClass);
24035 if (VT == MVT::i16)
24036 return std::make_pair(0U, &X86::GR16RegClass);
24037 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
24038 return std::make_pair(0U, &X86::GR32RegClass);
24039 return std::make_pair(0U, &X86::GR64RegClass);
24040 case 'R': // LEGACY_REGS
24041 if (VT == MVT::i8 || VT == MVT::i1)
24042 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
24043 if (VT == MVT::i16)
24044 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
24045 if (VT == MVT::i32 || !Subtarget->is64Bit())
24046 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
24047 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
24048 case 'f': // FP Stack registers.
24049 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
24050 // value to the correct fpstack register class.
24051 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
24052 return std::make_pair(0U, &X86::RFP32RegClass);
24053 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
24054 return std::make_pair(0U, &X86::RFP64RegClass);
24055 return std::make_pair(0U, &X86::RFP80RegClass);
24056 case 'y': // MMX_REGS if MMX allowed.
24057 if (!Subtarget->hasMMX()) break;
24058 return std::make_pair(0U, &X86::VR64RegClass);
24059 case 'Y': // SSE_REGS if SSE2 allowed
24060 if (!Subtarget->hasSSE2()) break;
24062 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
24063 if (!Subtarget->hasSSE1()) break;
24065 switch (VT.SimpleTy) {
24067 // Scalar SSE types.
24070 return std::make_pair(0U, &X86::FR32RegClass);
24073 return std::make_pair(0U, &X86::FR64RegClass);
24081 return std::make_pair(0U, &X86::VR128RegClass);
24089 return std::make_pair(0U, &X86::VR256RegClass);
24094 return std::make_pair(0U, &X86::VR512RegClass);
24100 // Use the default implementation in TargetLowering to convert the register
24101 // constraint into a member of a register class.
24102 std::pair<unsigned, const TargetRegisterClass*> Res;
24103 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
24105 // Not found as a standard register?
24107 // Map st(0) -> st(7) -> ST0
24108 if (Constraint.size() == 7 && Constraint[0] == '{' &&
24109 tolower(Constraint[1]) == 's' &&
24110 tolower(Constraint[2]) == 't' &&
24111 Constraint[3] == '(' &&
24112 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
24113 Constraint[5] == ')' &&
24114 Constraint[6] == '}') {
24116 Res.first = X86::FP0+Constraint[4]-'0';
24117 Res.second = &X86::RFP80RegClass;
24121 // GCC allows "st(0)" to be called just plain "st".
24122 if (StringRef("{st}").equals_lower(Constraint)) {
24123 Res.first = X86::FP0;
24124 Res.second = &X86::RFP80RegClass;
24129 if (StringRef("{flags}").equals_lower(Constraint)) {
24130 Res.first = X86::EFLAGS;
24131 Res.second = &X86::CCRRegClass;
24135 // 'A' means EAX + EDX.
24136 if (Constraint == "A") {
24137 Res.first = X86::EAX;
24138 Res.second = &X86::GR32_ADRegClass;
24144 // Otherwise, check to see if this is a register class of the wrong value
24145 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
24146 // turn into {ax},{dx}.
24147 if (Res.second->hasType(VT))
24148 return Res; // Correct type already, nothing to do.
24150 // All of the single-register GCC register classes map their values onto
24151 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
24152 // really want an 8-bit or 32-bit register, map to the appropriate register
24153 // class and return the appropriate register.
24154 if (Res.second == &X86::GR16RegClass) {
24155 if (VT == MVT::i8 || VT == MVT::i1) {
24156 unsigned DestReg = 0;
24157 switch (Res.first) {
24159 case X86::AX: DestReg = X86::AL; break;
24160 case X86::DX: DestReg = X86::DL; break;
24161 case X86::CX: DestReg = X86::CL; break;
24162 case X86::BX: DestReg = X86::BL; break;
24165 Res.first = DestReg;
24166 Res.second = &X86::GR8RegClass;
24168 } else if (VT == MVT::i32 || VT == MVT::f32) {
24169 unsigned DestReg = 0;
24170 switch (Res.first) {
24172 case X86::AX: DestReg = X86::EAX; break;
24173 case X86::DX: DestReg = X86::EDX; break;
24174 case X86::CX: DestReg = X86::ECX; break;
24175 case X86::BX: DestReg = X86::EBX; break;
24176 case X86::SI: DestReg = X86::ESI; break;
24177 case X86::DI: DestReg = X86::EDI; break;
24178 case X86::BP: DestReg = X86::EBP; break;
24179 case X86::SP: DestReg = X86::ESP; break;
24182 Res.first = DestReg;
24183 Res.second = &X86::GR32RegClass;
24185 } else if (VT == MVT::i64 || VT == MVT::f64) {
24186 unsigned DestReg = 0;
24187 switch (Res.first) {
24189 case X86::AX: DestReg = X86::RAX; break;
24190 case X86::DX: DestReg = X86::RDX; break;
24191 case X86::CX: DestReg = X86::RCX; break;
24192 case X86::BX: DestReg = X86::RBX; break;
24193 case X86::SI: DestReg = X86::RSI; break;
24194 case X86::DI: DestReg = X86::RDI; break;
24195 case X86::BP: DestReg = X86::RBP; break;
24196 case X86::SP: DestReg = X86::RSP; break;
24199 Res.first = DestReg;
24200 Res.second = &X86::GR64RegClass;
24203 } else if (Res.second == &X86::FR32RegClass ||
24204 Res.second == &X86::FR64RegClass ||
24205 Res.second == &X86::VR128RegClass ||
24206 Res.second == &X86::VR256RegClass ||
24207 Res.second == &X86::FR32XRegClass ||
24208 Res.second == &X86::FR64XRegClass ||
24209 Res.second == &X86::VR128XRegClass ||
24210 Res.second == &X86::VR256XRegClass ||
24211 Res.second == &X86::VR512RegClass) {
24212 // Handle references to XMM physical registers that got mapped into the
24213 // wrong class. This can happen with constraints like {xmm0} where the
24214 // target independent register mapper will just pick the first match it can
24215 // find, ignoring the required type.
24217 if (VT == MVT::f32 || VT == MVT::i32)
24218 Res.second = &X86::FR32RegClass;
24219 else if (VT == MVT::f64 || VT == MVT::i64)
24220 Res.second = &X86::FR64RegClass;
24221 else if (X86::VR128RegClass.hasType(VT))
24222 Res.second = &X86::VR128RegClass;
24223 else if (X86::VR256RegClass.hasType(VT))
24224 Res.second = &X86::VR256RegClass;
24225 else if (X86::VR512RegClass.hasType(VT))
24226 Res.second = &X86::VR512RegClass;
24232 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
24234 // Scaling factors are not free at all.
24235 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
24236 // will take 2 allocations in the out of order engine instead of 1
24237 // for plain addressing mode, i.e. inst (reg1).
24239 // vaddps (%rsi,%drx), %ymm0, %ymm1
24240 // Requires two allocations (one for the load, one for the computation)
24242 // vaddps (%rsi), %ymm0, %ymm1
24243 // Requires just 1 allocation, i.e., freeing allocations for other operations
24244 // and having less micro operations to execute.
24246 // For some X86 architectures, this is even worse because for instance for
24247 // stores, the complex addressing mode forces the instruction to use the
24248 // "load" ports instead of the dedicated "store" port.
24249 // E.g., on Haswell:
24250 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
24251 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
24252 if (isLegalAddressingMode(AM, Ty))
24253 // Scale represents reg2 * scale, thus account for 1
24254 // as soon as we use a second register.
24255 return AM.Scale != 0;
24259 bool X86TargetLowering::isTargetFTOL() const {
24260 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();