1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/WinEHFuncInfo.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/CallingConv.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalAlias.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/MC/MCAsmInfo.h"
47 #include "llvm/MC/MCContext.h"
48 #include "llvm/MC/MCExpr.h"
49 #include "llvm/MC/MCSymbol.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Target/TargetOptions.h"
55 #include "X86IntrinsicsInfo.h"
61 #define DEBUG_TYPE "x86-isel"
63 STATISTIC(NumTailCalls, "Number of tail calls");
65 static cl::opt<bool> ExperimentalVectorWideningLegalization(
66 "x86-experimental-vector-widening-legalization", cl::init(false),
67 cl::desc("Enable an experimental vector type legalization through widening "
68 "rather than promotion."),
71 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
72 const X86Subtarget &STI)
73 : TargetLowering(TM), Subtarget(&STI) {
74 X86ScalarSSEf64 = Subtarget->hasSSE2();
75 X86ScalarSSEf32 = Subtarget->hasSSE1();
76 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
78 // Set up the TargetLowering object.
80 // X86 is weird. It always uses i8 for shift amounts and setcc results.
81 setBooleanContents(ZeroOrOneBooleanContent);
82 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
83 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
85 // For 64-bit, since we have so many registers, use the ILP scheduler.
86 // For 32-bit, use the register pressure specific scheduling.
87 // For Atom, always use ILP scheduling.
88 if (Subtarget->isAtom())
89 setSchedulingPreference(Sched::ILP);
90 else if (Subtarget->is64Bit())
91 setSchedulingPreference(Sched::ILP);
93 setSchedulingPreference(Sched::RegPressure);
94 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
95 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
97 // Bypass expensive divides on Atom when compiling with O2.
98 if (TM.getOptLevel() >= CodeGenOpt::Default) {
99 if (Subtarget->hasSlowDivide32())
100 addBypassSlowDiv(32, 8);
101 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
102 addBypassSlowDiv(64, 16);
105 if (Subtarget->isTargetKnownWindowsMSVC()) {
106 // Setup Windows compiler runtime calls.
107 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
108 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
109 setLibcallName(RTLIB::SREM_I64, "_allrem");
110 setLibcallName(RTLIB::UREM_I64, "_aullrem");
111 setLibcallName(RTLIB::MUL_I64, "_allmul");
112 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
113 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
114 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
115 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
116 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
119 if (Subtarget->isTargetDarwin()) {
120 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
121 setUseUnderscoreSetJmp(false);
122 setUseUnderscoreLongJmp(false);
123 } else if (Subtarget->isTargetWindowsGNU()) {
124 // MS runtime is weird: it exports _setjmp, but longjmp!
125 setUseUnderscoreSetJmp(true);
126 setUseUnderscoreLongJmp(false);
128 setUseUnderscoreSetJmp(true);
129 setUseUnderscoreLongJmp(true);
132 // Set up the register classes.
133 addRegisterClass(MVT::i8, &X86::GR8RegClass);
134 addRegisterClass(MVT::i16, &X86::GR16RegClass);
135 addRegisterClass(MVT::i32, &X86::GR32RegClass);
136 if (Subtarget->is64Bit())
137 addRegisterClass(MVT::i64, &X86::GR64RegClass);
139 for (MVT VT : MVT::integer_valuetypes())
140 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
142 // We don't accept any truncstore of integer registers.
143 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
144 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
145 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
146 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
147 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
148 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
150 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
152 // SETOEQ and SETUNE require checking two conditions.
153 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
154 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
155 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
156 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
157 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
158 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
160 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
162 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
163 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
164 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
166 if (Subtarget->is64Bit()) {
167 if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512())
168 // f32/f64 are legal, f80 is custom.
169 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
171 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
172 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
173 } else if (!Subtarget->useSoftFloat()) {
174 // We have an algorithm for SSE2->double, and we turn this into a
175 // 64-bit FILD followed by conditional FADD for other targets.
176 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
177 // We have an algorithm for SSE2, and we turn this into a 64-bit
178 // FILD or VCVTUSI2SS/SD for other targets.
179 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
182 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
184 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
185 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
187 if (!Subtarget->useSoftFloat()) {
188 // SSE has no i16 to fp conversion, only i32
189 if (X86ScalarSSEf32) {
190 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
191 // f32 and f64 cases are Legal, f80 case is not
192 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
194 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
195 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
198 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
199 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
202 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
204 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
205 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
207 if (!Subtarget->useSoftFloat()) {
208 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
209 // are Legal, f80 is custom lowered.
210 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
211 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
213 if (X86ScalarSSEf32) {
214 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
215 // f32 and f64 cases are Legal, f80 case is not
216 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
218 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
219 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
222 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
223 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
224 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
227 // Handle FP_TO_UINT by promoting the destination to a larger signed
229 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
230 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
231 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
233 if (Subtarget->is64Bit()) {
234 if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
235 // FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
236 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
237 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
239 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
240 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
242 } else if (!Subtarget->useSoftFloat()) {
243 // Since AVX is a superset of SSE3, only check for SSE here.
244 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
245 // Expand FP_TO_UINT into a select.
246 // FIXME: We would like to use a Custom expander here eventually to do
247 // the optimal thing for SSE vs. the default expansion in the legalizer.
248 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
250 // With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom.
251 // With SSE3 we can use fisttpll to convert to a signed i64; without
252 // SSE, we're stuck with a fistpll.
253 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
255 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
258 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
259 if (!X86ScalarSSEf64) {
260 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
261 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
262 if (Subtarget->is64Bit()) {
263 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
264 // Without SSE, i64->f64 goes through memory.
265 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
269 // Scalar integer divide and remainder are lowered to use operations that
270 // produce two results, to match the available instructions. This exposes
271 // the two-result form to trivial CSE, which is able to combine x/y and x%y
272 // into a single instruction.
274 // Scalar integer multiply-high is also lowered to use two-result
275 // operations, to match the available instructions. However, plain multiply
276 // (low) operations are left as Legal, as there are single-result
277 // instructions for this in x86. Using the two-result multiply instructions
278 // when both high and low results are needed must be arranged by dagcombine.
279 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
280 setOperationAction(ISD::MULHS, VT, Expand);
281 setOperationAction(ISD::MULHU, VT, Expand);
282 setOperationAction(ISD::SDIV, VT, Expand);
283 setOperationAction(ISD::UDIV, VT, Expand);
284 setOperationAction(ISD::SREM, VT, Expand);
285 setOperationAction(ISD::UREM, VT, Expand);
287 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
288 setOperationAction(ISD::ADDC, VT, Custom);
289 setOperationAction(ISD::ADDE, VT, Custom);
290 setOperationAction(ISD::SUBC, VT, Custom);
291 setOperationAction(ISD::SUBE, VT, Custom);
294 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
295 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
296 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
297 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
298 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
299 setOperationAction(ISD::BR_CC , MVT::f128, Expand);
300 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
301 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
302 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
303 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
304 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
305 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
306 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
307 setOperationAction(ISD::SELECT_CC , MVT::f128, Expand);
308 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
309 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
310 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
311 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
312 if (Subtarget->is64Bit())
313 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
314 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
315 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
316 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
317 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
319 if (Subtarget->is32Bit() && Subtarget->isTargetKnownWindowsMSVC()) {
320 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
321 // is. We should promote the value to 64-bits to solve this.
322 // This is what the CRT headers do - `fmodf` is an inline header
323 // function casting to f64 and calling `fmod`.
324 setOperationAction(ISD::FREM , MVT::f32 , Promote);
326 setOperationAction(ISD::FREM , MVT::f32 , Expand);
329 setOperationAction(ISD::FREM , MVT::f64 , Expand);
330 setOperationAction(ISD::FREM , MVT::f80 , Expand);
331 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
333 // Promote the i8 variants and force them on up to i32 which has a shorter
335 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
336 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
337 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
338 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
339 if (Subtarget->hasBMI()) {
340 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
341 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
342 if (Subtarget->is64Bit())
343 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
345 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
346 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
347 if (Subtarget->is64Bit())
348 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
351 if (Subtarget->hasLZCNT()) {
352 // When promoting the i8 variants, force them to i32 for a shorter
354 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
355 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
356 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
357 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
358 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
359 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
360 if (Subtarget->is64Bit())
361 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
363 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
364 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
365 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
366 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
367 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
368 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
369 if (Subtarget->is64Bit()) {
370 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
371 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
375 // Special handling for half-precision floating point conversions.
376 // If we don't have F16C support, then lower half float conversions
377 // into library calls.
378 if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) {
379 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
380 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
383 // There's never any support for operations beyond MVT::f32.
384 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
385 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
386 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
387 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
389 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
390 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
391 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
392 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
393 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
394 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
396 if (Subtarget->hasPOPCNT()) {
397 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
399 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
400 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
401 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
402 if (Subtarget->is64Bit())
403 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
406 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
408 if (!Subtarget->hasMOVBE())
409 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
411 // These should be promoted to a larger select which is supported.
412 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
413 // X86 wants to expand cmov itself.
414 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
415 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
416 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
417 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
418 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
419 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
420 setOperationAction(ISD::SELECT , MVT::f128 , Custom);
421 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
422 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
423 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
424 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
425 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
426 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
427 setOperationAction(ISD::SETCC , MVT::f128 , Custom);
428 setOperationAction(ISD::SETCCE , MVT::i8 , Custom);
429 setOperationAction(ISD::SETCCE , MVT::i16 , Custom);
430 setOperationAction(ISD::SETCCE , MVT::i32 , Custom);
431 if (Subtarget->is64Bit()) {
432 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
433 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
434 setOperationAction(ISD::SETCCE , MVT::i64 , Custom);
436 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
437 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
438 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
439 // support continuation, user-level threading, and etc.. As a result, no
440 // other SjLj exception interfaces are implemented and please don't build
441 // your own exception handling based on them.
442 // LLVM/Clang supports zero-cost DWARF exception handling.
443 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
444 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
447 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
448 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
449 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
450 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
451 if (Subtarget->is64Bit())
452 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
453 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
454 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
455 if (Subtarget->is64Bit()) {
456 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
457 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
458 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
459 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
460 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
462 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
463 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
464 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
465 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
466 if (Subtarget->is64Bit()) {
467 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
468 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
469 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
472 if (Subtarget->hasSSE1())
473 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
475 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
477 // Expand certain atomics
478 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
479 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
480 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
481 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
484 if (Subtarget->hasCmpxchg16b()) {
485 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
488 // FIXME - use subtarget debug flags
489 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
490 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
491 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
494 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
495 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
497 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
498 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
500 setOperationAction(ISD::TRAP, MVT::Other, Legal);
501 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
503 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
504 setOperationAction(ISD::VASTART , MVT::Other, Custom);
505 setOperationAction(ISD::VAEND , MVT::Other, Expand);
506 if (Subtarget->is64Bit()) {
507 setOperationAction(ISD::VAARG , MVT::Other, Custom);
508 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
510 // TargetInfo::CharPtrBuiltinVaList
511 setOperationAction(ISD::VAARG , MVT::Other, Expand);
512 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
515 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
516 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
518 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
520 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
521 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
522 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
524 if (!Subtarget->useSoftFloat() && X86ScalarSSEf64) {
525 // f32 and f64 use SSE.
526 // Set up the FP register classes.
527 addRegisterClass(MVT::f32, &X86::FR32RegClass);
528 addRegisterClass(MVT::f64, &X86::FR64RegClass);
530 // Use ANDPD to simulate FABS.
531 setOperationAction(ISD::FABS , MVT::f64, Custom);
532 setOperationAction(ISD::FABS , MVT::f32, Custom);
534 // Use XORP to simulate FNEG.
535 setOperationAction(ISD::FNEG , MVT::f64, Custom);
536 setOperationAction(ISD::FNEG , MVT::f32, Custom);
538 // Use ANDPD and ORPD to simulate FCOPYSIGN.
539 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
540 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
542 // Lower this to FGETSIGNx86 plus an AND.
543 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
544 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
546 // We don't support sin/cos/fmod
547 setOperationAction(ISD::FSIN , MVT::f64, Expand);
548 setOperationAction(ISD::FCOS , MVT::f64, Expand);
549 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
550 setOperationAction(ISD::FSIN , MVT::f32, Expand);
551 setOperationAction(ISD::FCOS , MVT::f32, Expand);
552 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
554 // Expand FP immediates into loads from the stack, except for the special
556 addLegalFPImmediate(APFloat(+0.0)); // xorpd
557 addLegalFPImmediate(APFloat(+0.0f)); // xorps
558 } else if (!Subtarget->useSoftFloat() && X86ScalarSSEf32) {
559 // Use SSE for f32, x87 for f64.
560 // Set up the FP register classes.
561 addRegisterClass(MVT::f32, &X86::FR32RegClass);
562 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
564 // Use ANDPS to simulate FABS.
565 setOperationAction(ISD::FABS , MVT::f32, Custom);
567 // Use XORP to simulate FNEG.
568 setOperationAction(ISD::FNEG , MVT::f32, Custom);
570 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
572 // Use ANDPS and ORPS to simulate FCOPYSIGN.
573 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
574 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
576 // We don't support sin/cos/fmod
577 setOperationAction(ISD::FSIN , MVT::f32, Expand);
578 setOperationAction(ISD::FCOS , MVT::f32, Expand);
579 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
581 // Special cases we handle for FP constants.
582 addLegalFPImmediate(APFloat(+0.0f)); // xorps
583 addLegalFPImmediate(APFloat(+0.0)); // FLD0
584 addLegalFPImmediate(APFloat(+1.0)); // FLD1
585 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
586 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
588 if (!TM.Options.UnsafeFPMath) {
589 setOperationAction(ISD::FSIN , MVT::f64, Expand);
590 setOperationAction(ISD::FCOS , MVT::f64, Expand);
591 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
593 } else if (!Subtarget->useSoftFloat()) {
594 // f32 and f64 in x87.
595 // Set up the FP register classes.
596 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
597 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
599 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
600 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
601 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
602 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
604 if (!TM.Options.UnsafeFPMath) {
605 setOperationAction(ISD::FSIN , MVT::f64, Expand);
606 setOperationAction(ISD::FSIN , MVT::f32, Expand);
607 setOperationAction(ISD::FCOS , MVT::f64, Expand);
608 setOperationAction(ISD::FCOS , MVT::f32, Expand);
609 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
610 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
612 addLegalFPImmediate(APFloat(+0.0)); // FLD0
613 addLegalFPImmediate(APFloat(+1.0)); // FLD1
614 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
615 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
616 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
617 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
618 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
619 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
622 // We don't support FMA.
623 setOperationAction(ISD::FMA, MVT::f64, Expand);
624 setOperationAction(ISD::FMA, MVT::f32, Expand);
626 // Long double always uses X87, except f128 in MMX.
627 if (!Subtarget->useSoftFloat()) {
628 if (Subtarget->is64Bit() && Subtarget->hasMMX()) {
629 addRegisterClass(MVT::f128, &X86::FR128RegClass);
630 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
631 setOperationAction(ISD::FABS , MVT::f128, Custom);
632 setOperationAction(ISD::FNEG , MVT::f128, Custom);
633 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
636 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
637 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
638 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
640 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
641 addLegalFPImmediate(TmpFlt); // FLD0
643 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
646 APFloat TmpFlt2(+1.0);
647 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
649 addLegalFPImmediate(TmpFlt2); // FLD1
650 TmpFlt2.changeSign();
651 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
654 if (!TM.Options.UnsafeFPMath) {
655 setOperationAction(ISD::FSIN , MVT::f80, Expand);
656 setOperationAction(ISD::FCOS , MVT::f80, Expand);
657 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
660 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
661 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
662 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
663 setOperationAction(ISD::FRINT, MVT::f80, Expand);
664 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
665 setOperationAction(ISD::FMA, MVT::f80, Expand);
668 // Always use a library call for pow.
669 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
670 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
671 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
673 setOperationAction(ISD::FLOG, MVT::f80, Expand);
674 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
675 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
676 setOperationAction(ISD::FEXP, MVT::f80, Expand);
677 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
678 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
679 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
681 // First set operation action for all vector types to either promote
682 // (for widening) or expand (for scalarization). Then we will selectively
683 // turn on ones that can be effectively codegen'd.
684 for (MVT VT : MVT::vector_valuetypes()) {
685 setOperationAction(ISD::ADD , VT, Expand);
686 setOperationAction(ISD::SUB , VT, Expand);
687 setOperationAction(ISD::FADD, VT, Expand);
688 setOperationAction(ISD::FNEG, VT, Expand);
689 setOperationAction(ISD::FSUB, VT, Expand);
690 setOperationAction(ISD::MUL , VT, Expand);
691 setOperationAction(ISD::FMUL, VT, Expand);
692 setOperationAction(ISD::SDIV, VT, Expand);
693 setOperationAction(ISD::UDIV, VT, Expand);
694 setOperationAction(ISD::FDIV, VT, Expand);
695 setOperationAction(ISD::SREM, VT, Expand);
696 setOperationAction(ISD::UREM, VT, Expand);
697 setOperationAction(ISD::LOAD, VT, Expand);
698 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
699 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
700 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
701 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
702 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
703 setOperationAction(ISD::FABS, VT, Expand);
704 setOperationAction(ISD::FSIN, VT, Expand);
705 setOperationAction(ISD::FSINCOS, VT, Expand);
706 setOperationAction(ISD::FCOS, VT, Expand);
707 setOperationAction(ISD::FSINCOS, VT, Expand);
708 setOperationAction(ISD::FREM, VT, Expand);
709 setOperationAction(ISD::FMA, VT, Expand);
710 setOperationAction(ISD::FPOWI, VT, Expand);
711 setOperationAction(ISD::FSQRT, VT, Expand);
712 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
713 setOperationAction(ISD::FFLOOR, VT, Expand);
714 setOperationAction(ISD::FCEIL, VT, Expand);
715 setOperationAction(ISD::FTRUNC, VT, Expand);
716 setOperationAction(ISD::FRINT, VT, Expand);
717 setOperationAction(ISD::FNEARBYINT, VT, Expand);
718 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
719 setOperationAction(ISD::MULHS, VT, Expand);
720 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
721 setOperationAction(ISD::MULHU, VT, Expand);
722 setOperationAction(ISD::SDIVREM, VT, Expand);
723 setOperationAction(ISD::UDIVREM, VT, Expand);
724 setOperationAction(ISD::FPOW, VT, Expand);
725 setOperationAction(ISD::CTPOP, VT, Expand);
726 setOperationAction(ISD::CTTZ, VT, Expand);
727 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
728 setOperationAction(ISD::CTLZ, VT, Expand);
729 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
730 setOperationAction(ISD::SHL, VT, Expand);
731 setOperationAction(ISD::SRA, VT, Expand);
732 setOperationAction(ISD::SRL, VT, Expand);
733 setOperationAction(ISD::ROTL, VT, Expand);
734 setOperationAction(ISD::ROTR, VT, Expand);
735 setOperationAction(ISD::BSWAP, VT, Expand);
736 setOperationAction(ISD::SETCC, VT, Expand);
737 setOperationAction(ISD::FLOG, VT, Expand);
738 setOperationAction(ISD::FLOG2, VT, Expand);
739 setOperationAction(ISD::FLOG10, VT, Expand);
740 setOperationAction(ISD::FEXP, VT, Expand);
741 setOperationAction(ISD::FEXP2, VT, Expand);
742 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
743 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
744 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
745 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
746 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
747 setOperationAction(ISD::TRUNCATE, VT, Expand);
748 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
749 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
750 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
751 setOperationAction(ISD::VSELECT, VT, Expand);
752 setOperationAction(ISD::SELECT_CC, VT, Expand);
753 for (MVT InnerVT : MVT::vector_valuetypes()) {
754 setTruncStoreAction(InnerVT, VT, Expand);
756 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
757 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
759 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
760 // types, we have to deal with them whether we ask for Expansion or not.
761 // Setting Expand causes its own optimisation problems though, so leave
763 if (VT.getVectorElementType() == MVT::i1)
764 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
766 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
767 // split/scalarized right now.
768 if (VT.getVectorElementType() == MVT::f16)
769 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
773 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
774 // with -msoft-float, disable use of MMX as well.
775 if (!Subtarget->useSoftFloat() && Subtarget->hasMMX()) {
776 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
777 // No operations on x86mmx supported, everything uses intrinsics.
780 // MMX-sized vectors (other than x86mmx) are expected to be expanded
781 // into smaller operations.
782 for (MVT MMXTy : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64}) {
783 setOperationAction(ISD::MULHS, MMXTy, Expand);
784 setOperationAction(ISD::AND, MMXTy, Expand);
785 setOperationAction(ISD::OR, MMXTy, Expand);
786 setOperationAction(ISD::XOR, MMXTy, Expand);
787 setOperationAction(ISD::SCALAR_TO_VECTOR, MMXTy, Expand);
788 setOperationAction(ISD::SELECT, MMXTy, Expand);
789 setOperationAction(ISD::BITCAST, MMXTy, Expand);
791 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
793 if (!Subtarget->useSoftFloat() && Subtarget->hasSSE1()) {
794 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
796 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
797 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
798 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
799 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
800 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
801 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
802 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
803 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
804 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
805 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
806 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
807 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
808 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
809 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
812 if (!Subtarget->useSoftFloat() && Subtarget->hasSSE2()) {
813 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
815 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
816 // registers cannot be used even for integer operations.
817 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
818 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
819 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
820 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
822 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
823 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
824 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
825 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
826 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
827 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
828 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
829 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
830 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
831 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
832 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
833 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
834 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
835 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
836 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
837 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
838 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
839 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
840 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
841 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
842 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
843 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
844 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
846 setOperationAction(ISD::SMAX, MVT::v8i16, Legal);
847 setOperationAction(ISD::UMAX, MVT::v16i8, Legal);
848 setOperationAction(ISD::SMIN, MVT::v8i16, Legal);
849 setOperationAction(ISD::UMIN, MVT::v16i8, Legal);
851 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
852 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
853 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
854 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
856 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
857 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
858 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
859 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
860 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
862 setOperationAction(ISD::CTPOP, MVT::v16i8, Custom);
863 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom);
864 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
865 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
867 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
868 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
869 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
870 // ISD::CTTZ v2i64 - scalarization is faster.
871 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
872 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
873 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
874 // ISD::CTTZ_ZERO_UNDEF v2i64 - scalarization is faster.
876 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
877 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
878 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
879 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
880 setOperationAction(ISD::VSELECT, VT, Custom);
881 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
884 // We support custom legalizing of sext and anyext loads for specific
885 // memory vector types which we can load as a scalar (or sequence of
886 // scalars) and extend in-register to a legal 128-bit vector type. For sext
887 // loads these must work with a single scalar load.
888 for (MVT VT : MVT::integer_vector_valuetypes()) {
889 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
890 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
891 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
892 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
893 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
894 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
895 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
896 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
897 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
900 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
901 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
902 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
903 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
904 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
905 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
907 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
909 if (Subtarget->is64Bit()) {
910 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
911 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
914 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
915 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
916 setOperationAction(ISD::AND, VT, Promote);
917 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
918 setOperationAction(ISD::OR, VT, Promote);
919 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
920 setOperationAction(ISD::XOR, VT, Promote);
921 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
922 setOperationAction(ISD::LOAD, VT, Promote);
923 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
924 setOperationAction(ISD::SELECT, VT, Promote);
925 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
928 // Custom lower v2i64 and v2f64 selects.
929 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
930 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
931 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
932 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
934 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
935 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
937 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
939 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
940 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
941 // As there is no 64-bit GPR available, we need build a special custom
942 // sequence to convert from v2i32 to v2f32.
943 if (!Subtarget->is64Bit())
944 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
946 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
947 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
949 for (MVT VT : MVT::fp_vector_valuetypes())
950 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
952 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
953 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
954 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
957 if (!Subtarget->useSoftFloat() && Subtarget->hasSSE41()) {
958 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
959 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
960 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
961 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
962 setOperationAction(ISD::FRINT, RoundedTy, Legal);
963 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
966 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
967 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
968 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
969 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
970 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
971 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
972 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
973 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
975 // FIXME: Do we need to handle scalar-to-vector here?
976 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
978 // We directly match byte blends in the backend as they match the VSELECT
980 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
982 // SSE41 brings specific instructions for doing vector sign extend even in
983 // cases where we don't have SRA.
984 for (MVT VT : MVT::integer_vector_valuetypes()) {
985 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
986 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
987 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
990 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
991 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
992 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
993 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
994 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
995 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
996 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
998 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
999 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1000 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1001 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1002 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1003 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1005 // i8 and i16 vectors are custom because the source register and source
1006 // source memory operand types are not the same width. f32 vectors are
1007 // custom since the immediate controlling the insert encodes additional
1009 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1010 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1011 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1012 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1014 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1015 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1016 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1017 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1019 // FIXME: these should be Legal, but that's only for the case where
1020 // the index is constant. For now custom expand to deal with that.
1021 if (Subtarget->is64Bit()) {
1022 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1023 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1027 if (Subtarget->hasSSE2()) {
1028 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1029 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1030 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1032 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1033 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1035 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1036 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1038 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1039 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1041 // In the customized shift lowering, the legal cases in AVX2 will be
1043 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1044 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1046 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1047 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1049 setOperationAction(ISD::SRA, MVT::v2i64, Custom);
1050 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1053 if (Subtarget->hasXOP()) {
1054 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1055 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1056 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1057 setOperationAction(ISD::ROTL, MVT::v2i64, Custom);
1058 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1059 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1060 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1061 setOperationAction(ISD::ROTL, MVT::v4i64, Custom);
1064 if (!Subtarget->useSoftFloat() && Subtarget->hasFp256()) {
1065 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1066 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1067 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1068 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1069 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1070 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1072 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1073 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1074 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1076 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1077 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1078 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1079 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1080 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1084 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1086 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1087 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1089 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1090 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1091 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1092 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1093 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1094 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1095 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1096 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1097 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1098 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1099 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1100 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1102 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1103 // even though v8i16 is a legal type.
1104 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1105 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1106 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1108 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1109 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1110 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1112 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1113 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1115 for (MVT VT : MVT::fp_vector_valuetypes())
1116 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1118 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1119 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1121 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1122 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1124 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1125 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1127 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1128 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1129 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1130 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1132 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1133 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1134 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1136 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1137 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1138 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1139 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1140 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1141 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1142 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1143 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1144 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1145 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1146 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1147 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1149 setOperationAction(ISD::CTPOP, MVT::v32i8, Custom);
1150 setOperationAction(ISD::CTPOP, MVT::v16i16, Custom);
1151 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1152 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1154 setOperationAction(ISD::CTTZ, MVT::v32i8, Custom);
1155 setOperationAction(ISD::CTTZ, MVT::v16i16, Custom);
1156 setOperationAction(ISD::CTTZ, MVT::v8i32, Custom);
1157 setOperationAction(ISD::CTTZ, MVT::v4i64, Custom);
1158 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v32i8, Custom);
1159 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i16, Custom);
1160 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
1161 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
1163 if (Subtarget->hasAnyFMA()) {
1164 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1165 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1166 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1167 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1168 setOperationAction(ISD::FMA, MVT::f32, Legal);
1169 setOperationAction(ISD::FMA, MVT::f64, Legal);
1172 if (Subtarget->hasInt256()) {
1173 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1174 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1175 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1176 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1178 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1179 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1180 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1181 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1183 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1184 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1185 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1186 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1188 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1189 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1190 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1191 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1193 setOperationAction(ISD::SMAX, MVT::v32i8, Legal);
1194 setOperationAction(ISD::SMAX, MVT::v16i16, Legal);
1195 setOperationAction(ISD::SMAX, MVT::v8i32, Legal);
1196 setOperationAction(ISD::UMAX, MVT::v32i8, Legal);
1197 setOperationAction(ISD::UMAX, MVT::v16i16, Legal);
1198 setOperationAction(ISD::UMAX, MVT::v8i32, Legal);
1199 setOperationAction(ISD::SMIN, MVT::v32i8, Legal);
1200 setOperationAction(ISD::SMIN, MVT::v16i16, Legal);
1201 setOperationAction(ISD::SMIN, MVT::v8i32, Legal);
1202 setOperationAction(ISD::UMIN, MVT::v32i8, Legal);
1203 setOperationAction(ISD::UMIN, MVT::v16i16, Legal);
1204 setOperationAction(ISD::UMIN, MVT::v8i32, Legal);
1206 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1207 // when we have a 256bit-wide blend with immediate.
1208 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1210 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1211 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1212 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1213 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1214 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1215 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1216 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1218 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1219 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1220 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1221 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1222 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1223 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1225 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1226 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1227 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1228 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1230 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1231 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1232 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1233 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1235 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1236 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1237 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1238 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SMAX, MVT::v32i8, Custom);
1241 setOperationAction(ISD::SMAX, MVT::v16i16, Custom);
1242 setOperationAction(ISD::SMAX, MVT::v8i32, Custom);
1243 setOperationAction(ISD::UMAX, MVT::v32i8, Custom);
1244 setOperationAction(ISD::UMAX, MVT::v16i16, Custom);
1245 setOperationAction(ISD::UMAX, MVT::v8i32, Custom);
1246 setOperationAction(ISD::SMIN, MVT::v32i8, Custom);
1247 setOperationAction(ISD::SMIN, MVT::v16i16, Custom);
1248 setOperationAction(ISD::SMIN, MVT::v8i32, Custom);
1249 setOperationAction(ISD::UMIN, MVT::v32i8, Custom);
1250 setOperationAction(ISD::UMIN, MVT::v16i16, Custom);
1251 setOperationAction(ISD::UMIN, MVT::v8i32, Custom);
1254 // In the customized shift lowering, the legal cases in AVX2 will be
1256 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1257 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1259 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1260 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1262 setOperationAction(ISD::SRA, MVT::v4i64, Custom);
1263 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1265 // Custom lower several nodes for 256-bit types.
1266 for (MVT VT : MVT::vector_valuetypes()) {
1267 if (VT.getScalarSizeInBits() >= 32) {
1268 setOperationAction(ISD::MLOAD, VT, Legal);
1269 setOperationAction(ISD::MSTORE, VT, Legal);
1271 // Extract subvector is special because the value type
1272 // (result) is 128-bit but the source is 256-bit wide.
1273 if (VT.is128BitVector()) {
1274 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1276 // Do not attempt to custom lower other non-256-bit vectors
1277 if (!VT.is256BitVector())
1280 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1281 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1282 setOperationAction(ISD::VSELECT, VT, Custom);
1283 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1284 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1285 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1286 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1287 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1290 if (Subtarget->hasInt256())
1291 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1293 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1294 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1295 setOperationAction(ISD::AND, VT, Promote);
1296 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1297 setOperationAction(ISD::OR, VT, Promote);
1298 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1299 setOperationAction(ISD::XOR, VT, Promote);
1300 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1301 setOperationAction(ISD::LOAD, VT, Promote);
1302 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1303 setOperationAction(ISD::SELECT, VT, Promote);
1304 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1308 if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
1309 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1310 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1311 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1312 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1314 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1315 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1316 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1318 for (MVT VT : MVT::fp_vector_valuetypes())
1319 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i32, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i32, MVT::v16i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i32, MVT::v16i16, Legal);
1324 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i32, MVT::v16i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v32i16, MVT::v32i8, Legal);
1326 setLoadExtAction(ISD::SEXTLOAD, MVT::v32i16, MVT::v32i8, Legal);
1327 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i8, Legal);
1328 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i8, Legal);
1329 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i16, Legal);
1330 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i16, Legal);
1331 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
1332 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
1334 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1335 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1336 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
1337 setOperationAction(ISD::XOR, MVT::i1, Legal);
1338 setOperationAction(ISD::OR, MVT::i1, Legal);
1339 setOperationAction(ISD::AND, MVT::i1, Legal);
1340 setOperationAction(ISD::SUB, MVT::i1, Custom);
1341 setOperationAction(ISD::ADD, MVT::i1, Custom);
1342 setOperationAction(ISD::MUL, MVT::i1, Custom);
1343 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1344 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1345 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1346 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1347 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1349 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1350 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1351 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1352 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1353 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1354 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1355 setOperationAction(ISD::FABS, MVT::v16f32, Custom);
1357 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1358 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1359 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1360 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1361 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1362 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1363 setOperationAction(ISD::FABS, MVT::v8f64, Custom);
1364 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1365 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1367 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1368 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1369 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1370 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1371 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1372 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1373 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1374 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1375 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1376 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1377 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1378 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1379 setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
1380 setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Custom);
1381 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1382 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1384 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1385 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1386 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1387 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1388 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1389 if (Subtarget->hasVLX()){
1390 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1391 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1392 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1393 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1394 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1396 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1397 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1398 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1399 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1400 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1402 setOperationAction(ISD::MLOAD, MVT::v8i32, Custom);
1403 setOperationAction(ISD::MLOAD, MVT::v8f32, Custom);
1404 setOperationAction(ISD::MSTORE, MVT::v8i32, Custom);
1405 setOperationAction(ISD::MSTORE, MVT::v8f32, Custom);
1407 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1408 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1409 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1410 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i1, Custom);
1411 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i1, Custom);
1412 if (Subtarget->hasDQI()) {
1413 setOperationAction(ISD::TRUNCATE, MVT::v2i1, Custom);
1414 setOperationAction(ISD::TRUNCATE, MVT::v4i1, Custom);
1416 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1417 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1418 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1419 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1420 if (Subtarget->hasVLX()) {
1421 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Legal);
1422 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1423 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Legal);
1424 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1425 setOperationAction(ISD::FP_TO_SINT, MVT::v4i64, Legal);
1426 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1427 setOperationAction(ISD::FP_TO_UINT, MVT::v4i64, Legal);
1428 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1431 if (Subtarget->hasVLX()) {
1432 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1433 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1434 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1435 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1436 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1437 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1438 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1439 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1441 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1442 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1443 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1444 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1445 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1446 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1447 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1448 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1449 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1450 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1451 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1452 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1453 if (Subtarget->hasDQI()) {
1454 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Custom);
1455 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Custom);
1457 setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
1458 setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
1459 setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
1460 setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
1461 setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
1462 setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
1463 setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
1464 setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
1465 setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
1466 setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
1468 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1469 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1470 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1471 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1472 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
1474 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1475 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1477 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1479 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1480 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1481 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
1482 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1483 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1484 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1485 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1487 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1488 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1489 setOperationAction(ISD::SELECT, MVT::v16i1, Custom);
1490 setOperationAction(ISD::SELECT, MVT::v8i1, Custom);
1492 setOperationAction(ISD::SMAX, MVT::v16i32, Legal);
1493 setOperationAction(ISD::SMAX, MVT::v8i64, Legal);
1494 setOperationAction(ISD::UMAX, MVT::v16i32, Legal);
1495 setOperationAction(ISD::UMAX, MVT::v8i64, Legal);
1496 setOperationAction(ISD::SMIN, MVT::v16i32, Legal);
1497 setOperationAction(ISD::SMIN, MVT::v8i64, Legal);
1498 setOperationAction(ISD::UMIN, MVT::v16i32, Legal);
1499 setOperationAction(ISD::UMIN, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1502 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1504 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1505 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1507 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1509 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1510 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1512 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1513 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1515 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1516 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1518 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1519 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1521 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1522 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1523 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1525 if (Subtarget->hasCDI()) {
1526 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1527 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1528 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i64, Expand);
1529 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i32, Expand);
1531 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1532 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1533 setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
1534 setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
1535 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i16, Expand);
1536 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i8, Expand);
1537 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i16, Expand);
1538 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i8, Expand);
1540 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
1541 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
1543 if (Subtarget->hasVLX()) {
1544 setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
1545 setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
1546 setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
1547 setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
1548 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Expand);
1549 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Expand);
1550 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Expand);
1551 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Expand);
1553 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
1554 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
1555 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
1556 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
1558 setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
1559 setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
1560 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1561 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1562 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Expand);
1563 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Expand);
1564 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Expand);
1565 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Expand);
1567 } // Subtarget->hasCDI()
1569 if (Subtarget->hasDQI()) {
1570 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
1571 setOperationAction(ISD::MUL, MVT::v4i64, Legal);
1572 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1574 // Custom lower several nodes.
1575 for (MVT VT : MVT::vector_valuetypes()) {
1576 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1578 setOperationAction(ISD::AND, VT, Legal);
1579 setOperationAction(ISD::OR, VT, Legal);
1580 setOperationAction(ISD::XOR, VT, Legal);
1582 if ((VT.is128BitVector() || VT.is256BitVector()) && EltSize >= 32) {
1583 setOperationAction(ISD::MGATHER, VT, Custom);
1584 setOperationAction(ISD::MSCATTER, VT, Custom);
1586 // Extract subvector is special because the value type
1587 // (result) is 256/128-bit but the source is 512-bit wide.
1588 if (VT.is128BitVector() || VT.is256BitVector()) {
1589 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1591 if (VT.getVectorElementType() == MVT::i1)
1592 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1594 // Do not attempt to custom lower other non-512-bit vectors
1595 if (!VT.is512BitVector())
1598 if (EltSize >= 32) {
1599 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1600 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1601 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1602 setOperationAction(ISD::VSELECT, VT, Legal);
1603 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1604 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1605 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1606 setOperationAction(ISD::MLOAD, VT, Legal);
1607 setOperationAction(ISD::MSTORE, VT, Legal);
1608 setOperationAction(ISD::MGATHER, VT, Legal);
1609 setOperationAction(ISD::MSCATTER, VT, Custom);
1612 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
1613 setOperationAction(ISD::SELECT, VT, Promote);
1614 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1618 if (!Subtarget->useSoftFloat() && Subtarget->hasBWI()) {
1619 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1620 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1622 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1623 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1625 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1626 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1627 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1628 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1629 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1630 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1631 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1632 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1633 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1634 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1635 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1636 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1637 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1638 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1639 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1640 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1641 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1642 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Custom);
1643 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Custom);
1644 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1645 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1646 setOperationAction(ISD::SELECT, MVT::v32i1, Custom);
1647 setOperationAction(ISD::SELECT, MVT::v64i1, Custom);
1648 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1649 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1650 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1651 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1652 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1653 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1654 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1655 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1656 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i1, Custom);
1657 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i1, Custom);
1658 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1659 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1660 setOperationAction(ISD::VSELECT, MVT::v32i16, Legal);
1661 setOperationAction(ISD::VSELECT, MVT::v64i8, Legal);
1662 setOperationAction(ISD::TRUNCATE, MVT::v32i1, Custom);
1663 setOperationAction(ISD::TRUNCATE, MVT::v64i1, Custom);
1664 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1665 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i1, Custom);
1666 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i1, Custom);
1668 setOperationAction(ISD::SMAX, MVT::v64i8, Legal);
1669 setOperationAction(ISD::SMAX, MVT::v32i16, Legal);
1670 setOperationAction(ISD::UMAX, MVT::v64i8, Legal);
1671 setOperationAction(ISD::UMAX, MVT::v32i16, Legal);
1672 setOperationAction(ISD::SMIN, MVT::v64i8, Legal);
1673 setOperationAction(ISD::SMIN, MVT::v32i16, Legal);
1674 setOperationAction(ISD::UMIN, MVT::v64i8, Legal);
1675 setOperationAction(ISD::UMIN, MVT::v32i16, Legal);
1677 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1678 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1679 if (Subtarget->hasVLX())
1680 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1682 if (Subtarget->hasCDI()) {
1683 setOperationAction(ISD::CTLZ, MVT::v32i16, Custom);
1684 setOperationAction(ISD::CTLZ, MVT::v64i8, Custom);
1685 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i16, Expand);
1686 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v64i8, Expand);
1689 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1690 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1691 setOperationAction(ISD::VSELECT, VT, Legal);
1692 setOperationAction(ISD::SRL, VT, Custom);
1693 setOperationAction(ISD::SHL, VT, Custom);
1694 setOperationAction(ISD::SRA, VT, Custom);
1696 setOperationAction(ISD::AND, VT, Promote);
1697 AddPromotedToType (ISD::AND, VT, MVT::v8i64);
1698 setOperationAction(ISD::OR, VT, Promote);
1699 AddPromotedToType (ISD::OR, VT, MVT::v8i64);
1700 setOperationAction(ISD::XOR, VT, Promote);
1701 AddPromotedToType (ISD::XOR, VT, MVT::v8i64);
1705 if (!Subtarget->useSoftFloat() && Subtarget->hasVLX()) {
1706 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1707 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1709 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1710 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1711 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom);
1712 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1713 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Custom);
1714 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i1, Custom);
1715 setOperationAction(ISD::SELECT, MVT::v4i1, Custom);
1716 setOperationAction(ISD::SELECT, MVT::v2i1, Custom);
1717 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1718 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i1, Custom);
1719 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i1, Custom);
1720 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i1, Custom);
1722 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1723 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1724 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1725 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1726 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1727 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1728 setOperationAction(ISD::SRA, MVT::v2i64, Custom);
1729 setOperationAction(ISD::SRA, MVT::v4i64, Custom);
1731 setOperationAction(ISD::SMAX, MVT::v2i64, Legal);
1732 setOperationAction(ISD::SMAX, MVT::v4i64, Legal);
1733 setOperationAction(ISD::UMAX, MVT::v2i64, Legal);
1734 setOperationAction(ISD::UMAX, MVT::v4i64, Legal);
1735 setOperationAction(ISD::SMIN, MVT::v2i64, Legal);
1736 setOperationAction(ISD::SMIN, MVT::v4i64, Legal);
1737 setOperationAction(ISD::UMIN, MVT::v2i64, Legal);
1738 setOperationAction(ISD::UMIN, MVT::v4i64, Legal);
1741 // We want to custom lower some of our intrinsics.
1742 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1743 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1744 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1745 if (!Subtarget->is64Bit()) {
1746 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1747 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
1750 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1751 // handle type legalization for these operations here.
1753 // FIXME: We really should do custom legalization for addition and
1754 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1755 // than generic legalization for 64-bit multiplication-with-overflow, though.
1756 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1757 if (VT == MVT::i64 && !Subtarget->is64Bit())
1759 // Add/Sub/Mul with overflow operations are custom lowered.
1760 setOperationAction(ISD::SADDO, VT, Custom);
1761 setOperationAction(ISD::UADDO, VT, Custom);
1762 setOperationAction(ISD::SSUBO, VT, Custom);
1763 setOperationAction(ISD::USUBO, VT, Custom);
1764 setOperationAction(ISD::SMULO, VT, Custom);
1765 setOperationAction(ISD::UMULO, VT, Custom);
1768 if (!Subtarget->is64Bit()) {
1769 // These libcalls are not available in 32-bit.
1770 setLibcallName(RTLIB::SHL_I128, nullptr);
1771 setLibcallName(RTLIB::SRL_I128, nullptr);
1772 setLibcallName(RTLIB::SRA_I128, nullptr);
1775 // Combine sin / cos into one node or libcall if possible.
1776 if (Subtarget->hasSinCos()) {
1777 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1778 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1779 if (Subtarget->isTargetDarwin()) {
1780 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1781 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1782 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1783 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1787 if (Subtarget->isTargetWin64()) {
1788 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1789 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1790 setOperationAction(ISD::SREM, MVT::i128, Custom);
1791 setOperationAction(ISD::UREM, MVT::i128, Custom);
1792 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1793 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1796 // We have target-specific dag combine patterns for the following nodes:
1797 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1798 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1799 setTargetDAGCombine(ISD::BITCAST);
1800 setTargetDAGCombine(ISD::VSELECT);
1801 setTargetDAGCombine(ISD::SELECT);
1802 setTargetDAGCombine(ISD::SHL);
1803 setTargetDAGCombine(ISD::SRA);
1804 setTargetDAGCombine(ISD::SRL);
1805 setTargetDAGCombine(ISD::OR);
1806 setTargetDAGCombine(ISD::AND);
1807 setTargetDAGCombine(ISD::ADD);
1808 setTargetDAGCombine(ISD::FADD);
1809 setTargetDAGCombine(ISD::FSUB);
1810 setTargetDAGCombine(ISD::FNEG);
1811 setTargetDAGCombine(ISD::FMA);
1812 setTargetDAGCombine(ISD::FMINNUM);
1813 setTargetDAGCombine(ISD::FMAXNUM);
1814 setTargetDAGCombine(ISD::SUB);
1815 setTargetDAGCombine(ISD::LOAD);
1816 setTargetDAGCombine(ISD::MLOAD);
1817 setTargetDAGCombine(ISD::STORE);
1818 setTargetDAGCombine(ISD::MSTORE);
1819 setTargetDAGCombine(ISD::TRUNCATE);
1820 setTargetDAGCombine(ISD::ZERO_EXTEND);
1821 setTargetDAGCombine(ISD::ANY_EXTEND);
1822 setTargetDAGCombine(ISD::SIGN_EXTEND);
1823 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1824 setTargetDAGCombine(ISD::SINT_TO_FP);
1825 setTargetDAGCombine(ISD::UINT_TO_FP);
1826 setTargetDAGCombine(ISD::SETCC);
1827 setTargetDAGCombine(ISD::BUILD_VECTOR);
1828 setTargetDAGCombine(ISD::MUL);
1829 setTargetDAGCombine(ISD::XOR);
1830 setTargetDAGCombine(ISD::MSCATTER);
1831 setTargetDAGCombine(ISD::MGATHER);
1833 computeRegisterProperties(Subtarget->getRegisterInfo());
1835 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1836 MaxStoresPerMemsetOptSize = 8;
1837 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1838 MaxStoresPerMemcpyOptSize = 4;
1839 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1840 MaxStoresPerMemmoveOptSize = 4;
1841 setPrefLoopAlignment(4); // 2^4 bytes.
1843 // A predictable cmov does not hurt on an in-order CPU.
1844 // FIXME: Use a CPU attribute to trigger this, not a CPU model.
1845 PredictableSelectIsExpensive = !Subtarget->isAtom();
1846 EnableExtLdPromotion = true;
1847 setPrefFunctionAlignment(4); // 2^4 bytes.
1849 verifyIntrinsicTables();
1852 // This has so far only been implemented for 64-bit MachO.
1853 bool X86TargetLowering::useLoadStackGuardNode() const {
1854 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1857 TargetLoweringBase::LegalizeTypeAction
1858 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1859 if (ExperimentalVectorWideningLegalization &&
1860 VT.getVectorNumElements() != 1 &&
1861 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1862 return TypeWidenVector;
1864 return TargetLoweringBase::getPreferredVectorAction(VT);
1867 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1870 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1872 if (VT.isSimple()) {
1873 MVT VVT = VT.getSimpleVT();
1874 const unsigned NumElts = VVT.getVectorNumElements();
1875 const MVT EltVT = VVT.getVectorElementType();
1876 if (VVT.is512BitVector()) {
1877 if (Subtarget->hasAVX512())
1878 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1879 EltVT == MVT::f32 || EltVT == MVT::f64)
1881 case 8: return MVT::v8i1;
1882 case 16: return MVT::v16i1;
1884 if (Subtarget->hasBWI())
1885 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1887 case 32: return MVT::v32i1;
1888 case 64: return MVT::v64i1;
1892 if (VVT.is256BitVector() || VVT.is128BitVector()) {
1893 if (Subtarget->hasVLX())
1894 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1895 EltVT == MVT::f32 || EltVT == MVT::f64)
1897 case 2: return MVT::v2i1;
1898 case 4: return MVT::v4i1;
1899 case 8: return MVT::v8i1;
1901 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1902 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1904 case 8: return MVT::v8i1;
1905 case 16: return MVT::v16i1;
1906 case 32: return MVT::v32i1;
1911 return VT.changeVectorElementTypeToInteger();
1914 /// Helper for getByValTypeAlignment to determine
1915 /// the desired ByVal argument alignment.
1916 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1919 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1920 if (VTy->getBitWidth() == 128)
1922 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1923 unsigned EltAlign = 0;
1924 getMaxByValAlign(ATy->getElementType(), EltAlign);
1925 if (EltAlign > MaxAlign)
1926 MaxAlign = EltAlign;
1927 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1928 for (auto *EltTy : STy->elements()) {
1929 unsigned EltAlign = 0;
1930 getMaxByValAlign(EltTy, EltAlign);
1931 if (EltAlign > MaxAlign)
1932 MaxAlign = EltAlign;
1939 /// Return the desired alignment for ByVal aggregate
1940 /// function arguments in the caller parameter area. For X86, aggregates
1941 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1942 /// are at 4-byte boundaries.
1943 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
1944 const DataLayout &DL) const {
1945 if (Subtarget->is64Bit()) {
1946 // Max of 8 and alignment of type.
1947 unsigned TyAlign = DL.getABITypeAlignment(Ty);
1954 if (Subtarget->hasSSE1())
1955 getMaxByValAlign(Ty, Align);
1959 /// Returns the target specific optimal type for load
1960 /// and store operations as a result of memset, memcpy, and memmove
1961 /// lowering. If DstAlign is zero that means it's safe to destination
1962 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1963 /// means there isn't a need to check it against alignment requirement,
1964 /// probably because the source does not need to be loaded. If 'IsMemset' is
1965 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1966 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1967 /// source is constant so it does not need to be loaded.
1968 /// It returns EVT::Other if the type should be determined using generic
1969 /// target-independent logic.
1971 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1972 unsigned DstAlign, unsigned SrcAlign,
1973 bool IsMemset, bool ZeroMemset,
1975 MachineFunction &MF) const {
1976 const Function *F = MF.getFunction();
1977 if ((!IsMemset || ZeroMemset) &&
1978 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1980 (!Subtarget->isUnalignedMem16Slow() ||
1981 ((DstAlign == 0 || DstAlign >= 16) &&
1982 (SrcAlign == 0 || SrcAlign >= 16)))) {
1984 // FIXME: Check if unaligned 32-byte accesses are slow.
1985 if (Subtarget->hasInt256())
1987 if (Subtarget->hasFp256())
1990 if (Subtarget->hasSSE2())
1992 if (Subtarget->hasSSE1())
1994 } else if (!MemcpyStrSrc && Size >= 8 &&
1995 !Subtarget->is64Bit() &&
1996 Subtarget->hasSSE2()) {
1997 // Do not use f64 to lower memcpy if source is string constant. It's
1998 // better to use i32 to avoid the loads.
2002 // This is a compromise. If we reach here, unaligned accesses may be slow on
2003 // this target. However, creating smaller, aligned accesses could be even
2004 // slower and would certainly be a lot more code.
2005 if (Subtarget->is64Bit() && Size >= 8)
2010 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2012 return X86ScalarSSEf32;
2013 else if (VT == MVT::f64)
2014 return X86ScalarSSEf64;
2019 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
2024 switch (VT.getSizeInBits()) {
2026 // 8-byte and under are always assumed to be fast.
2030 *Fast = !Subtarget->isUnalignedMem16Slow();
2033 *Fast = !Subtarget->isUnalignedMem32Slow();
2035 // TODO: What about AVX-512 (512-bit) accesses?
2038 // Misaligned accesses of any size are always allowed.
2042 /// Return the entry encoding for a jump table in the
2043 /// current function. The returned value is a member of the
2044 /// MachineJumpTableInfo::JTEntryKind enum.
2045 unsigned X86TargetLowering::getJumpTableEncoding() const {
2046 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2048 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
2049 Subtarget->isPICStyleGOT())
2050 return MachineJumpTableInfo::EK_Custom32;
2052 // Otherwise, use the normal jump table encoding heuristics.
2053 return TargetLowering::getJumpTableEncoding();
2056 bool X86TargetLowering::useSoftFloat() const {
2057 return Subtarget->useSoftFloat();
2061 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2062 const MachineBasicBlock *MBB,
2063 unsigned uid,MCContext &Ctx) const{
2064 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
2065 Subtarget->isPICStyleGOT());
2066 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2068 return MCSymbolRefExpr::create(MBB->getSymbol(),
2069 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2072 /// Returns relocation base for the given PIC jumptable.
2073 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2074 SelectionDAG &DAG) const {
2075 if (!Subtarget->is64Bit())
2076 // This doesn't have SDLoc associated with it, but is not really the
2077 // same as a Register.
2078 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2079 getPointerTy(DAG.getDataLayout()));
2083 /// This returns the relocation base for the given PIC jumptable,
2084 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2085 const MCExpr *X86TargetLowering::
2086 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2087 MCContext &Ctx) const {
2088 // X86-64 uses RIP relative addressing based on the jump table label.
2089 if (Subtarget->isPICStyleRIPRel())
2090 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2092 // Otherwise, the reference is relative to the PIC base.
2093 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2096 std::pair<const TargetRegisterClass *, uint8_t>
2097 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2099 const TargetRegisterClass *RRC = nullptr;
2101 switch (VT.SimpleTy) {
2103 return TargetLowering::findRepresentativeClass(TRI, VT);
2104 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2105 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2108 RRC = &X86::VR64RegClass;
2110 case MVT::f32: case MVT::f64:
2111 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2112 case MVT::v4f32: case MVT::v2f64:
2113 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
2115 RRC = &X86::VR128RegClass;
2118 return std::make_pair(RRC, Cost);
2121 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
2122 unsigned &Offset) const {
2123 if (!Subtarget->isTargetLinux())
2126 if (Subtarget->is64Bit()) {
2127 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
2129 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
2141 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2142 if (!Subtarget->isTargetAndroid())
2143 return TargetLowering::getSafeStackPointerLocation(IRB);
2145 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2146 // definition of TLS_SLOT_SAFESTACK in
2147 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2148 unsigned AddressSpace, Offset;
2149 if (Subtarget->is64Bit()) {
2150 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2152 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
2162 return ConstantExpr::getIntToPtr(
2163 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2164 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2167 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2168 unsigned DestAS) const {
2169 assert(SrcAS != DestAS && "Expected different address spaces!");
2171 return SrcAS < 256 && DestAS < 256;
2174 //===----------------------------------------------------------------------===//
2175 // Return Value Calling Convention Implementation
2176 //===----------------------------------------------------------------------===//
2178 #include "X86GenCallingConv.inc"
2180 bool X86TargetLowering::CanLowerReturn(
2181 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2182 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2183 SmallVector<CCValAssign, 16> RVLocs;
2184 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2185 return CCInfo.CheckReturn(Outs, RetCC_X86);
2188 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2189 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2194 X86TargetLowering::LowerReturn(SDValue Chain,
2195 CallingConv::ID CallConv, bool isVarArg,
2196 const SmallVectorImpl<ISD::OutputArg> &Outs,
2197 const SmallVectorImpl<SDValue> &OutVals,
2198 SDLoc dl, SelectionDAG &DAG) const {
2199 MachineFunction &MF = DAG.getMachineFunction();
2200 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2202 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2203 report_fatal_error("X86 interrupts may not return any value");
2205 SmallVector<CCValAssign, 16> RVLocs;
2206 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2207 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2210 SmallVector<SDValue, 6> RetOps;
2211 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2212 // Operand #1 = Bytes To Pop
2213 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2216 // Copy the result values into the output registers.
2217 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2218 CCValAssign &VA = RVLocs[i];
2219 assert(VA.isRegLoc() && "Can only return in registers!");
2220 SDValue ValToCopy = OutVals[i];
2221 EVT ValVT = ValToCopy.getValueType();
2223 // Promote values to the appropriate types.
2224 if (VA.getLocInfo() == CCValAssign::SExt)
2225 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2226 else if (VA.getLocInfo() == CCValAssign::ZExt)
2227 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2228 else if (VA.getLocInfo() == CCValAssign::AExt) {
2229 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2230 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2232 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2234 else if (VA.getLocInfo() == CCValAssign::BCvt)
2235 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2237 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2238 "Unexpected FP-extend for return value.");
2240 // If this is x86-64, and we disabled SSE, we can't return FP values,
2241 // or SSE or MMX vectors.
2242 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2243 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2244 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2245 report_fatal_error("SSE register return with SSE disabled");
2247 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2248 // llvm-gcc has never done it right and no one has noticed, so this
2249 // should be OK for now.
2250 if (ValVT == MVT::f64 &&
2251 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2252 report_fatal_error("SSE2 register return with SSE2 disabled");
2254 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2255 // the RET instruction and handled by the FP Stackifier.
2256 if (VA.getLocReg() == X86::FP0 ||
2257 VA.getLocReg() == X86::FP1) {
2258 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2259 // change the value to the FP stack register class.
2260 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2261 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2262 RetOps.push_back(ValToCopy);
2263 // Don't emit a copytoreg.
2267 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2268 // which is returned in RAX / RDX.
2269 if (Subtarget->is64Bit()) {
2270 if (ValVT == MVT::x86mmx) {
2271 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2272 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2273 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2275 // If we don't have SSE2 available, convert to v4f32 so the generated
2276 // register is legal.
2277 if (!Subtarget->hasSSE2())
2278 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2283 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2284 Flag = Chain.getValue(1);
2285 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2288 // All x86 ABIs require that for returning structs by value we copy
2289 // the sret argument into %rax/%eax (depending on ABI) for the return.
2290 // We saved the argument into a virtual register in the entry block,
2291 // so now we copy the value out and into %rax/%eax.
2293 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2294 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2295 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2296 // either case FuncInfo->setSRetReturnReg() will have been called.
2297 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2298 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg,
2299 getPointerTy(MF.getDataLayout()));
2302 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2303 X86::RAX : X86::EAX;
2304 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2305 Flag = Chain.getValue(1);
2307 // RAX/EAX now acts like a return value.
2309 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2312 RetOps[0] = Chain; // Update chain.
2314 // Add the flag if we have it.
2316 RetOps.push_back(Flag);
2318 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2319 if (CallConv == CallingConv::X86_INTR)
2320 opcode = X86ISD::IRET;
2321 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2324 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2325 if (N->getNumValues() != 1)
2327 if (!N->hasNUsesOfValue(1, 0))
2330 SDValue TCChain = Chain;
2331 SDNode *Copy = *N->use_begin();
2332 if (Copy->getOpcode() == ISD::CopyToReg) {
2333 // If the copy has a glue operand, we conservatively assume it isn't safe to
2334 // perform a tail call.
2335 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2337 TCChain = Copy->getOperand(0);
2338 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2341 bool HasRet = false;
2342 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2344 if (UI->getOpcode() != X86ISD::RET_FLAG)
2346 // If we are returning more than one value, we can definitely
2347 // not make a tail call see PR19530
2348 if (UI->getNumOperands() > 4)
2350 if (UI->getNumOperands() == 4 &&
2351 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2364 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2365 ISD::NodeType ExtendKind) const {
2367 // TODO: Is this also valid on 32-bit?
2368 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2369 ReturnMVT = MVT::i8;
2371 ReturnMVT = MVT::i32;
2373 EVT MinVT = getRegisterType(Context, ReturnMVT);
2374 return VT.bitsLT(MinVT) ? MinVT : VT;
2377 /// Lower the result values of a call into the
2378 /// appropriate copies out of appropriate physical registers.
2381 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2382 CallingConv::ID CallConv, bool isVarArg,
2383 const SmallVectorImpl<ISD::InputArg> &Ins,
2384 SDLoc dl, SelectionDAG &DAG,
2385 SmallVectorImpl<SDValue> &InVals) const {
2387 // Assign locations to each value returned by this call.
2388 SmallVector<CCValAssign, 16> RVLocs;
2389 bool Is64Bit = Subtarget->is64Bit();
2390 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2392 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2394 // Copy all of the result registers out of their specified physreg.
2395 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2396 CCValAssign &VA = RVLocs[i];
2397 EVT CopyVT = VA.getLocVT();
2399 // If this is x86-64, and we disabled SSE, we can't return FP values
2400 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
2401 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2402 report_fatal_error("SSE register return with SSE disabled");
2405 // If we prefer to use the value in xmm registers, copy it out as f80 and
2406 // use a truncate to move it from fp stack reg to xmm reg.
2407 bool RoundAfterCopy = false;
2408 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2409 isScalarFPTypeInSSEReg(VA.getValVT())) {
2411 RoundAfterCopy = (CopyVT != VA.getLocVT());
2414 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2415 CopyVT, InFlag).getValue(1);
2416 SDValue Val = Chain.getValue(0);
2419 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2420 // This truncation won't change the value.
2421 DAG.getIntPtrConstant(1, dl));
2423 if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
2424 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
2426 InFlag = Chain.getValue(2);
2427 InVals.push_back(Val);
2433 //===----------------------------------------------------------------------===//
2434 // C & StdCall & Fast Calling Convention implementation
2435 //===----------------------------------------------------------------------===//
2436 // StdCall calling convention seems to be standard for many Windows' API
2437 // routines and around. It differs from C calling convention just a little:
2438 // callee should clean up the stack, not caller. Symbols should be also
2439 // decorated in some fancy way :) It doesn't support any vector arguments.
2440 // For info on fast calling convention see Fast Calling Convention (tail call)
2441 // implementation LowerX86_32FastCCCallTo.
2443 /// CallIsStructReturn - Determines whether a call uses struct return
2445 enum StructReturnType {
2450 static StructReturnType
2451 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsMCU) {
2453 return NotStructReturn;
2455 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2456 if (!Flags.isSRet())
2457 return NotStructReturn;
2458 if (Flags.isInReg() || IsMCU)
2459 return RegStructReturn;
2460 return StackStructReturn;
2463 /// Determines whether a function uses struct return semantics.
2464 static StructReturnType
2465 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsMCU) {
2467 return NotStructReturn;
2469 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2470 if (!Flags.isSRet())
2471 return NotStructReturn;
2472 if (Flags.isInReg() || IsMCU)
2473 return RegStructReturn;
2474 return StackStructReturn;
2477 /// Make a copy of an aggregate at address specified by "Src" to address
2478 /// "Dst" with size and alignment information specified by the specific
2479 /// parameter attribute. The copy will be passed as a byval function parameter.
2481 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2482 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2484 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
2486 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2487 /*isVolatile*/false, /*AlwaysInline=*/true,
2488 /*isTailCall*/false,
2489 MachinePointerInfo(), MachinePointerInfo());
2492 /// Return true if the calling convention is one that we can guarantee TCO for.
2493 static bool canGuaranteeTCO(CallingConv::ID CC) {
2494 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2495 CC == CallingConv::HiPE || CC == CallingConv::HHVM);
2498 /// Return true if we might ever do TCO for calls with this calling convention.
2499 static bool mayTailCallThisCC(CallingConv::ID CC) {
2501 // C calling conventions:
2502 case CallingConv::C:
2503 case CallingConv::X86_64_Win64:
2504 case CallingConv::X86_64_SysV:
2505 // Callee pop conventions:
2506 case CallingConv::X86_ThisCall:
2507 case CallingConv::X86_StdCall:
2508 case CallingConv::X86_VectorCall:
2509 case CallingConv::X86_FastCall:
2512 return canGuaranteeTCO(CC);
2516 /// Return true if the function is being made into a tailcall target by
2517 /// changing its ABI.
2518 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
2519 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
2522 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2524 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2525 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2529 CallingConv::ID CalleeCC = CS.getCallingConv();
2530 if (!mayTailCallThisCC(CalleeCC))
2537 X86TargetLowering::LowerMemArgument(SDValue Chain,
2538 CallingConv::ID CallConv,
2539 const SmallVectorImpl<ISD::InputArg> &Ins,
2540 SDLoc dl, SelectionDAG &DAG,
2541 const CCValAssign &VA,
2542 MachineFrameInfo *MFI,
2544 // Create the nodes corresponding to a load from this parameter slot.
2545 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2546 bool AlwaysUseMutable = shouldGuaranteeTCO(
2547 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2548 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2551 // If value is passed by pointer we have address passed instead of the value
2553 bool ExtendedInMem = VA.isExtInLoc() &&
2554 VA.getValVT().getScalarType() == MVT::i1;
2556 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
2557 ValVT = VA.getLocVT();
2559 ValVT = VA.getValVT();
2561 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
2562 // taken by a return address.
2564 if (CallConv == CallingConv::X86_INTR) {
2565 const X86Subtarget& Subtarget =
2566 static_cast<const X86Subtarget&>(DAG.getSubtarget());
2567 // X86 interrupts may take one or two arguments.
2568 // On the stack there will be no return address as in regular call.
2569 // Offset of last argument need to be set to -4/-8 bytes.
2570 // Where offset of the first argument out of two, should be set to 0 bytes.
2571 Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1);
2574 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2575 // changed with more analysis.
2576 // In case of tail call optimization mark all arguments mutable. Since they
2577 // could be overwritten by lowering of arguments in case of a tail call.
2578 if (Flags.isByVal()) {
2579 unsigned Bytes = Flags.getByValSize();
2580 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2581 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2582 // Adjust SP offset of interrupt parameter.
2583 if (CallConv == CallingConv::X86_INTR) {
2584 MFI->setObjectOffset(FI, Offset);
2586 return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2588 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2589 VA.getLocMemOffset(), isImmutable);
2590 // Adjust SP offset of interrupt parameter.
2591 if (CallConv == CallingConv::X86_INTR) {
2592 MFI->setObjectOffset(FI, Offset);
2595 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2596 SDValue Val = DAG.getLoad(
2597 ValVT, dl, Chain, FIN,
2598 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false,
2600 return ExtendedInMem ?
2601 DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val) : Val;
2605 // FIXME: Get this from tablegen.
2606 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2607 const X86Subtarget *Subtarget) {
2608 assert(Subtarget->is64Bit());
2610 if (Subtarget->isCallingConvWin64(CallConv)) {
2611 static const MCPhysReg GPR64ArgRegsWin64[] = {
2612 X86::RCX, X86::RDX, X86::R8, X86::R9
2614 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2617 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2618 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2620 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2623 // FIXME: Get this from tablegen.
2624 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2625 CallingConv::ID CallConv,
2626 const X86Subtarget *Subtarget) {
2627 assert(Subtarget->is64Bit());
2628 if (Subtarget->isCallingConvWin64(CallConv)) {
2629 // The XMM registers which might contain var arg parameters are shadowed
2630 // in their paired GPR. So we only need to save the GPR to their home
2632 // TODO: __vectorcall will change this.
2636 const Function *Fn = MF.getFunction();
2637 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2638 bool isSoftFloat = Subtarget->useSoftFloat();
2639 assert(!(isSoftFloat && NoImplicitFloatOps) &&
2640 "SSE register cannot be used when SSE is disabled!");
2641 if (isSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1())
2642 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2646 static const MCPhysReg XMMArgRegs64Bit[] = {
2647 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2648 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2650 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2653 SDValue X86TargetLowering::LowerFormalArguments(
2654 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2655 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
2656 SmallVectorImpl<SDValue> &InVals) const {
2657 MachineFunction &MF = DAG.getMachineFunction();
2658 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2659 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2661 const Function* Fn = MF.getFunction();
2662 if (Fn->hasExternalLinkage() &&
2663 Subtarget->isTargetCygMing() &&
2664 Fn->getName() == "main")
2665 FuncInfo->setForceFramePointer(true);
2667 MachineFrameInfo *MFI = MF.getFrameInfo();
2668 bool Is64Bit = Subtarget->is64Bit();
2669 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2671 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
2672 "Var args not supported with calling convention fastcc, ghc or hipe");
2674 if (CallConv == CallingConv::X86_INTR) {
2675 bool isLegal = Ins.size() == 1 ||
2676 (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) ||
2677 (!Is64Bit && Ins[1].VT == MVT::i32)));
2679 report_fatal_error("X86 interrupts may take one or two arguments");
2682 // Assign locations to all of the incoming arguments.
2683 SmallVector<CCValAssign, 16> ArgLocs;
2684 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2686 // Allocate shadow area for Win64
2688 CCInfo.AllocateStack(32, 8);
2690 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2692 unsigned LastVal = ~0U;
2694 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2695 CCValAssign &VA = ArgLocs[i];
2696 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2698 assert(VA.getValNo() != LastVal &&
2699 "Don't support value assigned to multiple locs yet");
2701 LastVal = VA.getValNo();
2703 if (VA.isRegLoc()) {
2704 EVT RegVT = VA.getLocVT();
2705 const TargetRegisterClass *RC;
2706 if (RegVT == MVT::i32)
2707 RC = &X86::GR32RegClass;
2708 else if (Is64Bit && RegVT == MVT::i64)
2709 RC = &X86::GR64RegClass;
2710 else if (RegVT == MVT::f32)
2711 RC = &X86::FR32RegClass;
2712 else if (RegVT == MVT::f64)
2713 RC = &X86::FR64RegClass;
2714 else if (RegVT == MVT::f128)
2715 RC = &X86::FR128RegClass;
2716 else if (RegVT.is512BitVector())
2717 RC = &X86::VR512RegClass;
2718 else if (RegVT.is256BitVector())
2719 RC = &X86::VR256RegClass;
2720 else if (RegVT.is128BitVector())
2721 RC = &X86::VR128RegClass;
2722 else if (RegVT == MVT::x86mmx)
2723 RC = &X86::VR64RegClass;
2724 else if (RegVT == MVT::i1)
2725 RC = &X86::VK1RegClass;
2726 else if (RegVT == MVT::v8i1)
2727 RC = &X86::VK8RegClass;
2728 else if (RegVT == MVT::v16i1)
2729 RC = &X86::VK16RegClass;
2730 else if (RegVT == MVT::v32i1)
2731 RC = &X86::VK32RegClass;
2732 else if (RegVT == MVT::v64i1)
2733 RC = &X86::VK64RegClass;
2735 llvm_unreachable("Unknown argument type!");
2737 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2738 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2740 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2741 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2743 if (VA.getLocInfo() == CCValAssign::SExt)
2744 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2745 DAG.getValueType(VA.getValVT()));
2746 else if (VA.getLocInfo() == CCValAssign::ZExt)
2747 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2748 DAG.getValueType(VA.getValVT()));
2749 else if (VA.getLocInfo() == CCValAssign::BCvt)
2750 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
2752 if (VA.isExtInLoc()) {
2753 // Handle MMX values passed in XMM regs.
2754 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
2755 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2757 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2760 assert(VA.isMemLoc());
2761 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2764 // If value is passed via pointer - do a load.
2765 if (VA.getLocInfo() == CCValAssign::Indirect)
2766 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2767 MachinePointerInfo(), false, false, false, 0);
2769 InVals.push_back(ArgValue);
2772 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2773 // All x86 ABIs require that for returning structs by value we copy the
2774 // sret argument into %rax/%eax (depending on ABI) for the return. Save
2775 // the argument into a virtual register so that we can access it from the
2777 if (Ins[i].Flags.isSRet()) {
2778 unsigned Reg = FuncInfo->getSRetReturnReg();
2780 MVT PtrTy = getPointerTy(DAG.getDataLayout());
2781 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2782 FuncInfo->setSRetReturnReg(Reg);
2784 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2785 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2790 unsigned StackSize = CCInfo.getNextStackOffset();
2791 // Align stack specially for tail calls.
2792 if (shouldGuaranteeTCO(CallConv,
2793 MF.getTarget().Options.GuaranteedTailCallOpt))
2794 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2796 // If the function takes variable number of arguments, make a frame index for
2797 // the start of the first vararg value... for expansion of llvm.va_start. We
2798 // can skip this if there are no va_start calls.
2799 if (MFI->hasVAStart() &&
2800 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2801 CallConv != CallingConv::X86_ThisCall))) {
2802 FuncInfo->setVarArgsFrameIndex(
2803 MFI->CreateFixedObject(1, StackSize, true));
2806 // Figure out if XMM registers are in use.
2807 assert(!(Subtarget->useSoftFloat() &&
2808 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2809 "SSE register cannot be used when SSE is disabled!");
2811 // 64-bit calling conventions support varargs and register parameters, so we
2812 // have to do extra work to spill them in the prologue.
2813 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2814 // Find the first unallocated argument registers.
2815 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2816 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2817 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
2818 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
2819 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2820 "SSE register cannot be used when SSE is disabled!");
2822 // Gather all the live in physical registers.
2823 SmallVector<SDValue, 6> LiveGPRs;
2824 SmallVector<SDValue, 8> LiveXMMRegs;
2826 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2827 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2829 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2831 if (!ArgXMMs.empty()) {
2832 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2833 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2834 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2835 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2836 LiveXMMRegs.push_back(
2837 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2842 // Get to the caller-allocated home save location. Add 8 to account
2843 // for the return address.
2844 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2845 FuncInfo->setRegSaveFrameIndex(
2846 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2847 // Fixup to set vararg frame on shadow area (4 x i64).
2849 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2851 // For X86-64, if there are vararg parameters that are passed via
2852 // registers, then we must store them to their spots on the stack so
2853 // they may be loaded by deferencing the result of va_next.
2854 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2855 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2856 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2857 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2860 // Store the integer parameter registers.
2861 SmallVector<SDValue, 8> MemOps;
2862 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2863 getPointerTy(DAG.getDataLayout()));
2864 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2865 for (SDValue Val : LiveGPRs) {
2866 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2867 RSFIN, DAG.getIntPtrConstant(Offset, dl));
2869 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2870 MachinePointerInfo::getFixedStack(
2871 DAG.getMachineFunction(),
2872 FuncInfo->getRegSaveFrameIndex(), Offset),
2874 MemOps.push_back(Store);
2878 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2879 // Now store the XMM (fp + vector) parameter registers.
2880 SmallVector<SDValue, 12> SaveXMMOps;
2881 SaveXMMOps.push_back(Chain);
2882 SaveXMMOps.push_back(ALVal);
2883 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2884 FuncInfo->getRegSaveFrameIndex(), dl));
2885 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2886 FuncInfo->getVarArgsFPOffset(), dl));
2887 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2889 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2890 MVT::Other, SaveXMMOps));
2893 if (!MemOps.empty())
2894 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2897 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2898 // Find the largest legal vector type.
2899 MVT VecVT = MVT::Other;
2900 // FIXME: Only some x86_32 calling conventions support AVX512.
2901 if (Subtarget->hasAVX512() &&
2902 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2903 CallConv == CallingConv::Intel_OCL_BI)))
2904 VecVT = MVT::v16f32;
2905 else if (Subtarget->hasAVX())
2907 else if (Subtarget->hasSSE2())
2910 // We forward some GPRs and some vector types.
2911 SmallVector<MVT, 2> RegParmTypes;
2912 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2913 RegParmTypes.push_back(IntVT);
2914 if (VecVT != MVT::Other)
2915 RegParmTypes.push_back(VecVT);
2917 // Compute the set of forwarded registers. The rest are scratch.
2918 SmallVectorImpl<ForwardedRegister> &Forwards =
2919 FuncInfo->getForwardedMustTailRegParms();
2920 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2922 // Conservatively forward AL on x86_64, since it might be used for varargs.
2923 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2924 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2925 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2928 // Copy all forwards from physical to virtual registers.
2929 for (ForwardedRegister &F : Forwards) {
2930 // FIXME: Can we use a less constrained schedule?
2931 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2932 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2933 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2937 // Some CCs need callee pop.
2938 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2939 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2940 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2941 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
2942 // X86 interrupts must pop the error code if present
2943 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 8 : 4);
2945 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2946 // If this is an sret function, the return should pop the hidden pointer.
2947 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
2948 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2949 argsAreStructReturn(Ins, Subtarget->isTargetMCU()) == StackStructReturn)
2950 FuncInfo->setBytesToPopOnReturn(4);
2954 // RegSaveFrameIndex is X86-64 only.
2955 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2956 if (CallConv == CallingConv::X86_FastCall ||
2957 CallConv == CallingConv::X86_ThisCall)
2958 // fastcc functions can't have varargs.
2959 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2962 FuncInfo->setArgumentStackSize(StackSize);
2964 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
2965 EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
2966 if (Personality == EHPersonality::CoreCLR) {
2968 // TODO: Add a mechanism to frame lowering that will allow us to indicate
2969 // that we'd prefer this slot be allocated towards the bottom of the frame
2970 // (i.e. near the stack pointer after allocating the frame). Every
2971 // funclet needs a copy of this slot in its (mostly empty) frame, and the
2972 // offset from the bottom of this and each funclet's frame must be the
2973 // same, so the size of funclets' (mostly empty) frames is dictated by
2974 // how far this slot is from the bottom (since they allocate just enough
2975 // space to accomodate holding this slot at the correct offset).
2976 int PSPSymFI = MFI->CreateStackObject(8, 8, /*isSS=*/false);
2977 EHInfo->PSPSymFrameIdx = PSPSymFI;
2985 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2986 SDValue StackPtr, SDValue Arg,
2987 SDLoc dl, SelectionDAG &DAG,
2988 const CCValAssign &VA,
2989 ISD::ArgFlagsTy Flags) const {
2990 unsigned LocMemOffset = VA.getLocMemOffset();
2991 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2992 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2994 if (Flags.isByVal())
2995 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2997 return DAG.getStore(
2998 Chain, dl, Arg, PtrOff,
2999 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
3003 /// Emit a load of return address if tail call
3004 /// optimization is performed and it is required.
3006 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
3007 SDValue &OutRetAddr, SDValue Chain,
3008 bool IsTailCall, bool Is64Bit,
3009 int FPDiff, SDLoc dl) const {
3010 // Adjust the Return address stack slot.
3011 EVT VT = getPointerTy(DAG.getDataLayout());
3012 OutRetAddr = getReturnAddressFrameIndex(DAG);
3014 // Load the "old" Return address.
3015 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
3016 false, false, false, 0);
3017 return SDValue(OutRetAddr.getNode(), 1);
3020 /// Emit a store of the return address if tail call
3021 /// optimization is performed and it is required (FPDiff!=0).
3022 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3023 SDValue Chain, SDValue RetAddrFrIdx,
3024 EVT PtrVT, unsigned SlotSize,
3025 int FPDiff, SDLoc dl) {
3026 // Store the return address to the appropriate stack slot.
3027 if (!FPDiff) return Chain;
3028 // Calculate the new stack slot for the return address.
3029 int NewReturnAddrFI =
3030 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3032 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3033 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3034 MachinePointerInfo::getFixedStack(
3035 DAG.getMachineFunction(), NewReturnAddrFI),
3040 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3041 /// operation of specified width.
3042 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
3044 unsigned NumElems = VT.getVectorNumElements();
3045 SmallVector<int, 8> Mask;
3046 Mask.push_back(NumElems);
3047 for (unsigned i = 1; i != NumElems; ++i)
3049 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
3053 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3054 SmallVectorImpl<SDValue> &InVals) const {
3055 SelectionDAG &DAG = CLI.DAG;
3057 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3058 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3059 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3060 SDValue Chain = CLI.Chain;
3061 SDValue Callee = CLI.Callee;
3062 CallingConv::ID CallConv = CLI.CallConv;
3063 bool &isTailCall = CLI.IsTailCall;
3064 bool isVarArg = CLI.IsVarArg;
3066 MachineFunction &MF = DAG.getMachineFunction();
3067 bool Is64Bit = Subtarget->is64Bit();
3068 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
3069 StructReturnType SR = callIsStructReturn(Outs, Subtarget->isTargetMCU());
3070 bool IsSibcall = false;
3071 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3072 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
3074 if (CallConv == CallingConv::X86_INTR)
3075 report_fatal_error("X86 interrupts may not be called directly");
3077 if (Attr.getValueAsString() == "true")
3080 if (Subtarget->isPICStyleGOT() &&
3081 !MF.getTarget().Options.GuaranteedTailCallOpt) {
3082 // If we are using a GOT, disable tail calls to external symbols with
3083 // default visibility. Tail calling such a symbol requires using a GOT
3084 // relocation, which forces early binding of the symbol. This breaks code
3085 // that require lazy function symbol resolution. Using musttail or
3086 // GuaranteedTailCallOpt will override this.
3087 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3088 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3089 G->getGlobal()->hasDefaultVisibility()))
3093 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
3095 // Force this to be a tail call. The verifier rules are enough to ensure
3096 // that we can lower this successfully without moving the return address
3099 } else if (isTailCall) {
3100 // Check if it's really possible to do a tail call.
3101 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3102 isVarArg, SR != NotStructReturn,
3103 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
3104 Outs, OutVals, Ins, DAG);
3106 // Sibcalls are automatically detected tailcalls which do not require
3108 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
3115 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3116 "Var args not supported with calling convention fastcc, ghc or hipe");
3118 // Analyze operands of the call, assigning locations to each operand.
3119 SmallVector<CCValAssign, 16> ArgLocs;
3120 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3122 // Allocate shadow area for Win64
3124 CCInfo.AllocateStack(32, 8);
3126 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3128 // Get a count of how many bytes are to be pushed on the stack.
3129 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3131 // This is a sibcall. The memory operands are available in caller's
3132 // own caller's stack.
3134 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
3135 canGuaranteeTCO(CallConv))
3136 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3139 if (isTailCall && !IsSibcall && !IsMustTail) {
3140 // Lower arguments at fp - stackoffset + fpdiff.
3141 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3143 FPDiff = NumBytesCallerPushed - NumBytes;
3145 // Set the delta of movement of the returnaddr stackslot.
3146 // But only set if delta is greater than previous delta.
3147 if (FPDiff < X86Info->getTCReturnAddrDelta())
3148 X86Info->setTCReturnAddrDelta(FPDiff);
3151 unsigned NumBytesToPush = NumBytes;
3152 unsigned NumBytesToPop = NumBytes;
3154 // If we have an inalloca argument, all stack space has already been allocated
3155 // for us and be right at the top of the stack. We don't support multiple
3156 // arguments passed in memory when using inalloca.
3157 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3159 if (!ArgLocs.back().isMemLoc())
3160 report_fatal_error("cannot use inalloca attribute on a register "
3162 if (ArgLocs.back().getLocMemOffset() != 0)
3163 report_fatal_error("any parameter with the inalloca attribute must be "
3164 "the only memory argument");
3168 Chain = DAG.getCALLSEQ_START(
3169 Chain, DAG.getIntPtrConstant(NumBytesToPush, dl, true), dl);
3171 SDValue RetAddrFrIdx;
3172 // Load return address for tail calls.
3173 if (isTailCall && FPDiff)
3174 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3175 Is64Bit, FPDiff, dl);
3177 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3178 SmallVector<SDValue, 8> MemOpChains;
3181 // Walk the register/memloc assignments, inserting copies/loads. In the case
3182 // of tail call optimization arguments are handle later.
3183 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3184 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3185 // Skip inalloca arguments, they have already been written.
3186 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3187 if (Flags.isInAlloca())
3190 CCValAssign &VA = ArgLocs[i];
3191 EVT RegVT = VA.getLocVT();
3192 SDValue Arg = OutVals[i];
3193 bool isByVal = Flags.isByVal();
3195 // Promote the value if needed.
3196 switch (VA.getLocInfo()) {
3197 default: llvm_unreachable("Unknown loc info!");
3198 case CCValAssign::Full: break;
3199 case CCValAssign::SExt:
3200 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3202 case CCValAssign::ZExt:
3203 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3205 case CCValAssign::AExt:
3206 if (Arg.getValueType().isVector() &&
3207 Arg.getValueType().getVectorElementType() == MVT::i1)
3208 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3209 else if (RegVT.is128BitVector()) {
3210 // Special case: passing MMX values in XMM registers.
3211 Arg = DAG.getBitcast(MVT::i64, Arg);
3212 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3213 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3215 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3217 case CCValAssign::BCvt:
3218 Arg = DAG.getBitcast(RegVT, Arg);
3220 case CCValAssign::Indirect: {
3221 // Store the argument.
3222 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3223 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3224 Chain = DAG.getStore(
3225 Chain, dl, Arg, SpillSlot,
3226 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
3233 if (VA.isRegLoc()) {
3234 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3235 if (isVarArg && IsWin64) {
3236 // Win64 ABI requires argument XMM reg to be copied to the corresponding
3237 // shadow reg if callee is a varargs function.
3238 unsigned ShadowReg = 0;
3239 switch (VA.getLocReg()) {
3240 case X86::XMM0: ShadowReg = X86::RCX; break;
3241 case X86::XMM1: ShadowReg = X86::RDX; break;
3242 case X86::XMM2: ShadowReg = X86::R8; break;
3243 case X86::XMM3: ShadowReg = X86::R9; break;
3246 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
3248 } else if (!IsSibcall && (!isTailCall || isByVal)) {
3249 assert(VA.isMemLoc());
3250 if (!StackPtr.getNode())
3251 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3252 getPointerTy(DAG.getDataLayout()));
3253 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
3254 dl, DAG, VA, Flags));
3258 if (!MemOpChains.empty())
3259 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
3261 if (Subtarget->isPICStyleGOT()) {
3262 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3265 RegsToPass.push_back(std::make_pair(
3266 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3267 getPointerTy(DAG.getDataLayout()))));
3269 // If we are tail calling and generating PIC/GOT style code load the
3270 // address of the callee into ECX. The value in ecx is used as target of
3271 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3272 // for tail calls on PIC/GOT architectures. Normally we would just put the
3273 // address of GOT into ebx and then call target@PLT. But for tail calls
3274 // ebx would be restored (since ebx is callee saved) before jumping to the
3277 // Note: The actual moving to ECX is done further down.
3278 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3279 if (G && !G->getGlobal()->hasLocalLinkage() &&
3280 G->getGlobal()->hasDefaultVisibility())
3281 Callee = LowerGlobalAddress(Callee, DAG);
3282 else if (isa<ExternalSymbolSDNode>(Callee))
3283 Callee = LowerExternalSymbol(Callee, DAG);
3287 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3288 // From AMD64 ABI document:
3289 // For calls that may call functions that use varargs or stdargs
3290 // (prototype-less calls or calls to functions containing ellipsis (...) in
3291 // the declaration) %al is used as hidden argument to specify the number
3292 // of SSE registers used. The contents of %al do not need to match exactly
3293 // the number of registers, but must be an ubound on the number of SSE
3294 // registers used and is in the range 0 - 8 inclusive.
3296 // Count the number of XMM registers allocated.
3297 static const MCPhysReg XMMArgRegs[] = {
3298 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3299 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3301 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3302 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3303 && "SSE registers cannot be used when SSE is disabled");
3305 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3306 DAG.getConstant(NumXMMRegs, dl,
3310 if (isVarArg && IsMustTail) {
3311 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3312 for (const auto &F : Forwards) {
3313 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3314 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3318 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3319 // don't need this because the eligibility check rejects calls that require
3320 // shuffling arguments passed in memory.
3321 if (!IsSibcall && isTailCall) {
3322 // Force all the incoming stack arguments to be loaded from the stack
3323 // before any new outgoing arguments are stored to the stack, because the
3324 // outgoing stack slots may alias the incoming argument stack slots, and
3325 // the alias isn't otherwise explicit. This is slightly more conservative
3326 // than necessary, because it means that each store effectively depends
3327 // on every argument instead of just those arguments it would clobber.
3328 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3330 SmallVector<SDValue, 8> MemOpChains2;
3333 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3334 CCValAssign &VA = ArgLocs[i];
3337 assert(VA.isMemLoc());
3338 SDValue Arg = OutVals[i];
3339 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3340 // Skip inalloca arguments. They don't require any work.
3341 if (Flags.isInAlloca())
3343 // Create frame index.
3344 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3345 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3346 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3347 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3349 if (Flags.isByVal()) {
3350 // Copy relative to framepointer.
3351 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
3352 if (!StackPtr.getNode())
3353 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3354 getPointerTy(DAG.getDataLayout()));
3355 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3358 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3362 // Store relative to framepointer.
3363 MemOpChains2.push_back(DAG.getStore(
3364 ArgChain, dl, Arg, FIN,
3365 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
3370 if (!MemOpChains2.empty())
3371 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3373 // Store the return address to the appropriate stack slot.
3374 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3375 getPointerTy(DAG.getDataLayout()),
3376 RegInfo->getSlotSize(), FPDiff, dl);
3379 // Build a sequence of copy-to-reg nodes chained together with token chain
3380 // and flag operands which copy the outgoing args into registers.
3382 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3383 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3384 RegsToPass[i].second, InFlag);
3385 InFlag = Chain.getValue(1);
3388 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3389 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3390 // In the 64-bit large code model, we have to make all calls
3391 // through a register, since the call instruction's 32-bit
3392 // pc-relative offset may not be large enough to hold the whole
3394 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3395 // If the callee is a GlobalAddress node (quite common, every direct call
3396 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3398 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3400 // We should use extra load for direct calls to dllimported functions in
3402 const GlobalValue *GV = G->getGlobal();
3403 if (!GV->hasDLLImportStorageClass()) {
3404 unsigned char OpFlags = 0;
3405 bool ExtraLoad = false;
3406 unsigned WrapperKind = ISD::DELETED_NODE;
3408 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3409 // external symbols most go through the PLT in PIC mode. If the symbol
3410 // has hidden or protected visibility, or if it is static or local, then
3411 // we don't need to use the PLT - we can directly call it.
3412 if (Subtarget->isTargetELF() &&
3413 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3414 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3415 OpFlags = X86II::MO_PLT;
3416 } else if (Subtarget->isPICStyleStubAny() &&
3417 !GV->isStrongDefinitionForLinker() &&
3418 (!Subtarget->getTargetTriple().isMacOSX() ||
3419 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3420 // PC-relative references to external symbols should go through $stub,
3421 // unless we're building with the leopard linker or later, which
3422 // automatically synthesizes these stubs.
3423 OpFlags = X86II::MO_DARWIN_STUB;
3424 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3425 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3426 // If the function is marked as non-lazy, generate an indirect call
3427 // which loads from the GOT directly. This avoids runtime overhead
3428 // at the cost of eager binding (and one extra byte of encoding).
3429 OpFlags = X86II::MO_GOTPCREL;
3430 WrapperKind = X86ISD::WrapperRIP;
3434 Callee = DAG.getTargetGlobalAddress(
3435 GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
3437 // Add a wrapper if needed.
3438 if (WrapperKind != ISD::DELETED_NODE)
3439 Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
3440 getPointerTy(DAG.getDataLayout()), Callee);
3441 // Add extra indirection if needed.
3443 Callee = DAG.getLoad(
3444 getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
3445 MachinePointerInfo::getGOT(DAG.getMachineFunction()), false, false,
3448 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3449 unsigned char OpFlags = 0;
3451 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3452 // external symbols should go through the PLT.
3453 if (Subtarget->isTargetELF() &&
3454 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3455 OpFlags = X86II::MO_PLT;
3456 } else if (Subtarget->isPICStyleStubAny() &&
3457 (!Subtarget->getTargetTriple().isMacOSX() ||
3458 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3459 // PC-relative references to external symbols should go through $stub,
3460 // unless we're building with the leopard linker or later, which
3461 // automatically synthesizes these stubs.
3462 OpFlags = X86II::MO_DARWIN_STUB;
3465 Callee = DAG.getTargetExternalSymbol(
3466 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
3467 } else if (Subtarget->isTarget64BitILP32() &&
3468 Callee->getValueType(0) == MVT::i32) {
3469 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3470 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3473 // Returns a chain & a flag for retval copy to use.
3474 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3475 SmallVector<SDValue, 8> Ops;
3477 if (!IsSibcall && isTailCall) {
3478 Chain = DAG.getCALLSEQ_END(Chain,
3479 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3480 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
3481 InFlag = Chain.getValue(1);
3484 Ops.push_back(Chain);
3485 Ops.push_back(Callee);
3488 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
3490 // Add argument registers to the end of the list so that they are known live
3492 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3493 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3494 RegsToPass[i].second.getValueType()));
3496 // Add a register mask operand representing the call-preserved registers.
3497 const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
3498 assert(Mask && "Missing call preserved mask for calling convention");
3500 // If this is an invoke in a 32-bit function using a funclet-based
3501 // personality, assume the function clobbers all registers. If an exception
3502 // is thrown, the runtime will not restore CSRs.
3503 // FIXME: Model this more precisely so that we can register allocate across
3504 // the normal edge and spill and fill across the exceptional edge.
3505 if (!Is64Bit && CLI.CS && CLI.CS->isInvoke()) {
3506 const Function *CallerFn = MF.getFunction();
3507 EHPersonality Pers =
3508 CallerFn->hasPersonalityFn()
3509 ? classifyEHPersonality(CallerFn->getPersonalityFn())
3510 : EHPersonality::Unknown;
3511 if (isFuncletEHPersonality(Pers))
3512 Mask = RegInfo->getNoPreservedMask();
3515 Ops.push_back(DAG.getRegisterMask(Mask));
3517 if (InFlag.getNode())
3518 Ops.push_back(InFlag);
3522 //// If this is the first return lowered for this function, add the regs
3523 //// to the liveout set for the function.
3524 // This isn't right, although it's probably harmless on x86; liveouts
3525 // should be computed from returns not tail calls. Consider a void
3526 // function making a tail call to a function returning int.
3527 MF.getFrameInfo()->setHasTailCall();
3528 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3531 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3532 InFlag = Chain.getValue(1);
3534 // Create the CALLSEQ_END node.
3535 unsigned NumBytesForCalleeToPop;
3536 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3537 DAG.getTarget().Options.GuaranteedTailCallOpt))
3538 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3539 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3540 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3541 SR == StackStructReturn)
3542 // If this is a call to a struct-return function, the callee
3543 // pops the hidden struct pointer, so we have to push it back.
3544 // This is common for Darwin/X86, Linux & Mingw32 targets.
3545 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3546 NumBytesForCalleeToPop = 4;
3548 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3550 // Returns a flag for retval copy to use.
3552 Chain = DAG.getCALLSEQ_END(Chain,
3553 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3554 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
3557 InFlag = Chain.getValue(1);
3560 // Handle result values, copying them out of physregs into vregs that we
3562 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3563 Ins, dl, DAG, InVals);
3566 //===----------------------------------------------------------------------===//
3567 // Fast Calling Convention (tail call) implementation
3568 //===----------------------------------------------------------------------===//
3570 // Like std call, callee cleans arguments, convention except that ECX is
3571 // reserved for storing the tail called function address. Only 2 registers are
3572 // free for argument passing (inreg). Tail call optimization is performed
3574 // * tailcallopt is enabled
3575 // * caller/callee are fastcc
3576 // On X86_64 architecture with GOT-style position independent code only local
3577 // (within module) calls are supported at the moment.
3578 // To keep the stack aligned according to platform abi the function
3579 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3580 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3581 // If a tail called function callee has more arguments than the caller the
3582 // caller needs to make sure that there is room to move the RETADDR to. This is
3583 // achieved by reserving an area the size of the argument delta right after the
3584 // original RETADDR, but before the saved framepointer or the spilled registers
3585 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3597 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
3600 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3601 SelectionDAG& DAG) const {
3602 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3603 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3604 unsigned StackAlignment = TFI.getStackAlignment();
3605 uint64_t AlignMask = StackAlignment - 1;
3606 int64_t Offset = StackSize;
3607 unsigned SlotSize = RegInfo->getSlotSize();
3608 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3609 // Number smaller than 12 so just add the difference.
3610 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3612 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3613 Offset = ((~AlignMask) & Offset) + StackAlignment +
3614 (StackAlignment-SlotSize);
3619 /// Return true if the given stack call argument is already available in the
3620 /// same position (relatively) of the caller's incoming argument stack.
3622 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3623 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3624 const X86InstrInfo *TII) {
3625 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3627 if (Arg.getOpcode() == ISD::CopyFromReg) {
3628 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3629 if (!TargetRegisterInfo::isVirtualRegister(VR))
3631 MachineInstr *Def = MRI->getVRegDef(VR);
3634 if (!Flags.isByVal()) {
3635 if (!TII->isLoadFromStackSlot(Def, FI))
3638 unsigned Opcode = Def->getOpcode();
3639 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3640 Opcode == X86::LEA64_32r) &&
3641 Def->getOperand(1).isFI()) {
3642 FI = Def->getOperand(1).getIndex();
3643 Bytes = Flags.getByValSize();
3647 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3648 if (Flags.isByVal())
3649 // ByVal argument is passed in as a pointer but it's now being
3650 // dereferenced. e.g.
3651 // define @foo(%struct.X* %A) {
3652 // tail call @bar(%struct.X* byval %A)
3655 SDValue Ptr = Ld->getBasePtr();
3656 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3659 FI = FINode->getIndex();
3660 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3661 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3662 FI = FINode->getIndex();
3663 Bytes = Flags.getByValSize();
3667 assert(FI != INT_MAX);
3668 if (!MFI->isFixedObjectIndex(FI))
3670 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3673 /// Check whether the call is eligible for tail call optimization. Targets
3674 /// that want to do tail call optimization should implement this function.
3675 bool X86TargetLowering::IsEligibleForTailCallOptimization(
3676 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
3677 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
3678 const SmallVectorImpl<ISD::OutputArg> &Outs,
3679 const SmallVectorImpl<SDValue> &OutVals,
3680 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
3681 if (!mayTailCallThisCC(CalleeCC))
3684 // If -tailcallopt is specified, make fastcc functions tail-callable.
3685 MachineFunction &MF = DAG.getMachineFunction();
3686 const Function *CallerF = MF.getFunction();
3688 // If the function return type is x86_fp80 and the callee return type is not,
3689 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3690 // perform a tailcall optimization here.
3691 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3694 CallingConv::ID CallerCC = CallerF->getCallingConv();
3695 bool CCMatch = CallerCC == CalleeCC;
3696 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3697 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3699 // Win64 functions have extra shadow space for argument homing. Don't do the
3700 // sibcall if the caller and callee have mismatched expectations for this
3702 if (IsCalleeWin64 != IsCallerWin64)
3705 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3706 if (canGuaranteeTCO(CalleeCC) && CCMatch)
3711 // Look for obvious safe cases to perform tail call optimization that do not
3712 // require ABI changes. This is what gcc calls sibcall.
3714 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3715 // emit a special epilogue.
3716 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3717 if (RegInfo->needsStackRealignment(MF))
3720 // Also avoid sibcall optimization if either caller or callee uses struct
3721 // return semantics.
3722 if (isCalleeStructRet || isCallerStructRet)
3725 // Do not sibcall optimize vararg calls unless all arguments are passed via
3727 if (isVarArg && !Outs.empty()) {
3728 // Optimizing for varargs on Win64 is unlikely to be safe without
3729 // additional testing.
3730 if (IsCalleeWin64 || IsCallerWin64)
3733 SmallVector<CCValAssign, 16> ArgLocs;
3734 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3737 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3738 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3739 if (!ArgLocs[i].isRegLoc())
3743 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3744 // stack. Therefore, if it's not used by the call it is not safe to optimize
3745 // this into a sibcall.
3746 bool Unused = false;
3747 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3754 SmallVector<CCValAssign, 16> RVLocs;
3755 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3757 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3758 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3759 CCValAssign &VA = RVLocs[i];
3760 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3765 // If the calling conventions do not match, then we'd better make sure the
3766 // results are returned in the same way as what the caller expects.
3768 SmallVector<CCValAssign, 16> RVLocs1;
3769 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3771 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3773 SmallVector<CCValAssign, 16> RVLocs2;
3774 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3776 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3778 if (RVLocs1.size() != RVLocs2.size())
3780 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3781 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3783 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3785 if (RVLocs1[i].isRegLoc()) {
3786 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3789 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3795 unsigned StackArgsSize = 0;
3797 // If the callee takes no arguments then go on to check the results of the
3799 if (!Outs.empty()) {
3800 // Check if stack adjustment is needed. For now, do not do this if any
3801 // argument is passed on the stack.
3802 SmallVector<CCValAssign, 16> ArgLocs;
3803 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3806 // Allocate shadow area for Win64
3808 CCInfo.AllocateStack(32, 8);
3810 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3811 StackArgsSize = CCInfo.getNextStackOffset();
3813 if (CCInfo.getNextStackOffset()) {
3814 // Check if the arguments are already laid out in the right way as
3815 // the caller's fixed stack objects.
3816 MachineFrameInfo *MFI = MF.getFrameInfo();
3817 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3818 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3819 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3820 CCValAssign &VA = ArgLocs[i];
3821 SDValue Arg = OutVals[i];
3822 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3823 if (VA.getLocInfo() == CCValAssign::Indirect)
3825 if (!VA.isRegLoc()) {
3826 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3833 // If the tailcall address may be in a register, then make sure it's
3834 // possible to register allocate for it. In 32-bit, the call address can
3835 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3836 // callee-saved registers are restored. These happen to be the same
3837 // registers used to pass 'inreg' arguments so watch out for those.
3838 if (!Subtarget->is64Bit() &&
3839 ((!isa<GlobalAddressSDNode>(Callee) &&
3840 !isa<ExternalSymbolSDNode>(Callee)) ||
3841 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3842 unsigned NumInRegs = 0;
3843 // In PIC we need an extra register to formulate the address computation
3845 unsigned MaxInRegs =
3846 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3848 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3849 CCValAssign &VA = ArgLocs[i];
3852 unsigned Reg = VA.getLocReg();
3855 case X86::EAX: case X86::EDX: case X86::ECX:
3856 if (++NumInRegs == MaxInRegs)
3864 bool CalleeWillPop =
3865 X86::isCalleePop(CalleeCC, Subtarget->is64Bit(), isVarArg,
3866 MF.getTarget().Options.GuaranteedTailCallOpt);
3868 if (unsigned BytesToPop =
3869 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
3870 // If we have bytes to pop, the callee must pop them.
3871 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
3872 if (!CalleePopMatches)
3874 } else if (CalleeWillPop && StackArgsSize > 0) {
3875 // If we don't have bytes to pop, make sure the callee doesn't pop any.
3883 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3884 const TargetLibraryInfo *libInfo) const {
3885 return X86::createFastISel(funcInfo, libInfo);
3888 //===----------------------------------------------------------------------===//
3889 // Other Lowering Hooks
3890 //===----------------------------------------------------------------------===//
3892 static bool MayFoldLoad(SDValue Op) {
3893 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3896 static bool MayFoldIntoStore(SDValue Op) {
3897 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3900 static bool isTargetShuffle(unsigned Opcode) {
3902 default: return false;
3903 case X86ISD::BLENDI:
3904 case X86ISD::PSHUFB:
3905 case X86ISD::PSHUFD:
3906 case X86ISD::PSHUFHW:
3907 case X86ISD::PSHUFLW:
3909 case X86ISD::PALIGNR:
3910 case X86ISD::MOVLHPS:
3911 case X86ISD::MOVLHPD:
3912 case X86ISD::MOVHLPS:
3913 case X86ISD::MOVLPS:
3914 case X86ISD::MOVLPD:
3915 case X86ISD::MOVSHDUP:
3916 case X86ISD::MOVSLDUP:
3917 case X86ISD::MOVDDUP:
3920 case X86ISD::UNPCKL:
3921 case X86ISD::UNPCKH:
3922 case X86ISD::VPERMILPI:
3923 case X86ISD::VPERM2X128:
3924 case X86ISD::VPERMI:
3925 case X86ISD::VPERMV:
3926 case X86ISD::VPERMV3:
3931 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
3932 SDValue V1, unsigned TargetMask,
3933 SelectionDAG &DAG) {
3935 default: llvm_unreachable("Unknown x86 shuffle node");
3936 case X86ISD::PSHUFD:
3937 case X86ISD::PSHUFHW:
3938 case X86ISD::PSHUFLW:
3939 case X86ISD::VPERMILPI:
3940 case X86ISD::VPERMI:
3941 return DAG.getNode(Opc, dl, VT, V1,
3942 DAG.getConstant(TargetMask, dl, MVT::i8));
3946 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
3947 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3949 default: llvm_unreachable("Unknown x86 shuffle node");
3950 case X86ISD::MOVLHPS:
3951 case X86ISD::MOVLHPD:
3952 case X86ISD::MOVHLPS:
3953 case X86ISD::MOVLPS:
3954 case X86ISD::MOVLPD:
3957 case X86ISD::UNPCKL:
3958 case X86ISD::UNPCKH:
3959 return DAG.getNode(Opc, dl, VT, V1, V2);
3963 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3964 MachineFunction &MF = DAG.getMachineFunction();
3965 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3966 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3967 int ReturnAddrIndex = FuncInfo->getRAIndex();
3969 if (ReturnAddrIndex == 0) {
3970 // Set up a frame object for the return address.
3971 unsigned SlotSize = RegInfo->getSlotSize();
3972 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3975 FuncInfo->setRAIndex(ReturnAddrIndex);
3978 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
3981 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3982 bool hasSymbolicDisplacement) {
3983 // Offset should fit into 32 bit immediate field.
3984 if (!isInt<32>(Offset))
3987 // If we don't have a symbolic displacement - we don't have any extra
3989 if (!hasSymbolicDisplacement)
3992 // FIXME: Some tweaks might be needed for medium code model.
3993 if (M != CodeModel::Small && M != CodeModel::Kernel)
3996 // For small code model we assume that latest object is 16MB before end of 31
3997 // bits boundary. We may also accept pretty large negative constants knowing
3998 // that all objects are in the positive half of address space.
3999 if (M == CodeModel::Small && Offset < 16*1024*1024)
4002 // For kernel code model we know that all object resist in the negative half
4003 // of 32bits address space. We may not accept negative offsets, since they may
4004 // be just off and we may accept pretty large positive ones.
4005 if (M == CodeModel::Kernel && Offset >= 0)
4011 /// Determines whether the callee is required to pop its own arguments.
4012 /// Callee pop is necessary to support tail calls.
4013 bool X86::isCalleePop(CallingConv::ID CallingConv,
4014 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4015 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4016 // can guarantee TCO.
4017 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4020 switch (CallingConv) {
4023 case CallingConv::X86_StdCall:
4024 case CallingConv::X86_FastCall:
4025 case CallingConv::X86_ThisCall:
4026 case CallingConv::X86_VectorCall:
4031 /// \brief Return true if the condition is an unsigned comparison operation.
4032 static bool isX86CCUnsigned(unsigned X86CC) {
4034 default: llvm_unreachable("Invalid integer condition!");
4035 case X86::COND_E: return true;
4036 case X86::COND_G: return false;
4037 case X86::COND_GE: return false;
4038 case X86::COND_L: return false;
4039 case X86::COND_LE: return false;
4040 case X86::COND_NE: return true;
4041 case X86::COND_B: return true;
4042 case X86::COND_A: return true;
4043 case X86::COND_BE: return true;
4044 case X86::COND_AE: return true;
4048 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4049 switch (SetCCOpcode) {
4050 default: llvm_unreachable("Invalid integer condition!");
4051 case ISD::SETEQ: return X86::COND_E;
4052 case ISD::SETGT: return X86::COND_G;
4053 case ISD::SETGE: return X86::COND_GE;
4054 case ISD::SETLT: return X86::COND_L;
4055 case ISD::SETLE: return X86::COND_LE;
4056 case ISD::SETNE: return X86::COND_NE;
4057 case ISD::SETULT: return X86::COND_B;
4058 case ISD::SETUGT: return X86::COND_A;
4059 case ISD::SETULE: return X86::COND_BE;
4060 case ISD::SETUGE: return X86::COND_AE;
4064 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4065 /// condition code, returning the condition code and the LHS/RHS of the
4066 /// comparison to make.
4067 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, SDLoc DL, bool isFP,
4068 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
4070 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4071 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4072 // X > -1 -> X == 0, jump !sign.
4073 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4074 return X86::COND_NS;
4076 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4077 // X < 0 -> X == 0, jump on sign.
4080 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
4082 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4083 return X86::COND_LE;
4087 return TranslateIntegerX86CC(SetCCOpcode);
4090 // First determine if it is required or is profitable to flip the operands.
4092 // If LHS is a foldable load, but RHS is not, flip the condition.
4093 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4094 !ISD::isNON_EXTLoad(RHS.getNode())) {
4095 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4096 std::swap(LHS, RHS);
4099 switch (SetCCOpcode) {
4105 std::swap(LHS, RHS);
4109 // On a floating point condition, the flags are set as follows:
4111 // 0 | 0 | 0 | X > Y
4112 // 0 | 0 | 1 | X < Y
4113 // 1 | 0 | 0 | X == Y
4114 // 1 | 1 | 1 | unordered
4115 switch (SetCCOpcode) {
4116 default: llvm_unreachable("Condcode should be pre-legalized away");
4118 case ISD::SETEQ: return X86::COND_E;
4119 case ISD::SETOLT: // flipped
4121 case ISD::SETGT: return X86::COND_A;
4122 case ISD::SETOLE: // flipped
4124 case ISD::SETGE: return X86::COND_AE;
4125 case ISD::SETUGT: // flipped
4127 case ISD::SETLT: return X86::COND_B;
4128 case ISD::SETUGE: // flipped
4130 case ISD::SETLE: return X86::COND_BE;
4132 case ISD::SETNE: return X86::COND_NE;
4133 case ISD::SETUO: return X86::COND_P;
4134 case ISD::SETO: return X86::COND_NP;
4136 case ISD::SETUNE: return X86::COND_INVALID;
4140 /// Is there a floating point cmov for the specific X86 condition code?
4141 /// Current x86 isa includes the following FP cmov instructions:
4142 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4143 static bool hasFPCMov(unsigned X86CC) {
4159 /// Returns true if the target can instruction select the
4160 /// specified FP immediate natively. If false, the legalizer will
4161 /// materialize the FP immediate as a load from a constant pool.
4162 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
4163 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
4164 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
4170 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
4171 ISD::LoadExtType ExtTy,
4173 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
4174 // relocation target a movq or addq instruction: don't let the load shrink.
4175 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
4176 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
4177 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
4178 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
4182 /// \brief Returns true if it is beneficial to convert a load of a constant
4183 /// to just the constant itself.
4184 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
4186 assert(Ty->isIntegerTy());
4188 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4189 if (BitSize == 0 || BitSize > 64)
4194 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
4195 unsigned Index) const {
4196 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
4199 return (Index == 0 || Index == ResVT.getVectorNumElements());
4202 bool X86TargetLowering::isCheapToSpeculateCttz() const {
4203 // Speculate cttz only if we can directly use TZCNT.
4204 return Subtarget->hasBMI();
4207 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
4208 // Speculate ctlz only if we can directly use LZCNT.
4209 return Subtarget->hasLZCNT();
4212 /// Return true if every element in Mask, beginning
4213 /// from position Pos and ending in Pos+Size is undef.
4214 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
4215 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
4221 /// Return true if Val is undef or if its value falls within the
4222 /// specified range (L, H].
4223 static bool isUndefOrInRange(int Val, int Low, int Hi) {
4224 return (Val < 0) || (Val >= Low && Val < Hi);
4227 /// Val is either less than zero (undef) or equal to the specified value.
4228 static bool isUndefOrEqual(int Val, int CmpVal) {
4229 return (Val < 0 || Val == CmpVal);
4232 /// Return true if every element in Mask, beginning
4233 /// from position Pos and ending in Pos+Size, falls within the specified
4234 /// sequential range (Low, Low+Size]. or is undef.
4235 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
4236 unsigned Pos, unsigned Size, int Low) {
4237 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
4238 if (!isUndefOrEqual(Mask[i], Low))
4243 /// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector
4244 /// extract that is suitable for instruction that extract 128 or 256 bit vectors
4245 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4246 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4247 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4250 // The index should be aligned on a vecWidth-bit boundary.
4252 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4254 MVT VT = N->getSimpleValueType(0);
4255 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4256 bool Result = (Index * ElSize) % vecWidth == 0;
4261 /// Return true if the specified INSERT_SUBVECTOR
4262 /// operand specifies a subvector insert that is suitable for input to
4263 /// insertion of 128 or 256-bit subvectors
4264 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4265 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4266 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4268 // The index should be aligned on a vecWidth-bit boundary.
4270 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4272 MVT VT = N->getSimpleValueType(0);
4273 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4274 bool Result = (Index * ElSize) % vecWidth == 0;
4279 bool X86::isVINSERT128Index(SDNode *N) {
4280 return isVINSERTIndex(N, 128);
4283 bool X86::isVINSERT256Index(SDNode *N) {
4284 return isVINSERTIndex(N, 256);
4287 bool X86::isVEXTRACT128Index(SDNode *N) {
4288 return isVEXTRACTIndex(N, 128);
4291 bool X86::isVEXTRACT256Index(SDNode *N) {
4292 return isVEXTRACTIndex(N, 256);
4295 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4296 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4297 assert(isa<ConstantSDNode>(N->getOperand(1).getNode()) &&
4298 "Illegal extract subvector for VEXTRACT");
4301 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4303 MVT VecVT = N->getOperand(0).getSimpleValueType();
4304 MVT ElVT = VecVT.getVectorElementType();
4306 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4307 return Index / NumElemsPerChunk;
4310 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4311 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4312 assert(isa<ConstantSDNode>(N->getOperand(2).getNode()) &&
4313 "Illegal insert subvector for VINSERT");
4316 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4318 MVT VecVT = N->getSimpleValueType(0);
4319 MVT ElVT = VecVT.getVectorElementType();
4321 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4322 return Index / NumElemsPerChunk;
4325 /// Return the appropriate immediate to extract the specified
4326 /// EXTRACT_SUBVECTOR index with VEXTRACTF128 and VINSERTI128 instructions.
4327 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4328 return getExtractVEXTRACTImmediate(N, 128);
4331 /// Return the appropriate immediate to extract the specified
4332 /// EXTRACT_SUBVECTOR index with VEXTRACTF64x4 and VINSERTI64x4 instructions.
4333 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4334 return getExtractVEXTRACTImmediate(N, 256);
4337 /// Return the appropriate immediate to insert at the specified
4338 /// INSERT_SUBVECTOR index with VINSERTF128 and VINSERTI128 instructions.
4339 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
4340 return getInsertVINSERTImmediate(N, 128);
4343 /// Return the appropriate immediate to insert at the specified
4344 /// INSERT_SUBVECTOR index with VINSERTF46x4 and VINSERTI64x4 instructions.
4345 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
4346 return getInsertVINSERTImmediate(N, 256);
4349 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
4350 bool X86::isZeroNode(SDValue Elt) {
4351 return isNullConstant(Elt) || isNullFPConstant(Elt);
4354 // Build a vector of constants
4355 // Use an UNDEF node if MaskElt == -1.
4356 // Spilt 64-bit constants in the 32-bit mode.
4357 static SDValue getConstVector(ArrayRef<int> Values, MVT VT,
4359 SDLoc dl, bool IsMask = false) {
4361 SmallVector<SDValue, 32> Ops;
4364 MVT ConstVecVT = VT;
4365 unsigned NumElts = VT.getVectorNumElements();
4366 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
4367 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
4368 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
4372 MVT EltVT = ConstVecVT.getVectorElementType();
4373 for (unsigned i = 0; i < NumElts; ++i) {
4374 bool IsUndef = Values[i] < 0 && IsMask;
4375 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
4376 DAG.getConstant(Values[i], dl, EltVT);
4377 Ops.push_back(OpNode);
4379 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
4380 DAG.getConstant(0, dl, EltVT));
4382 SDValue ConstsNode = DAG.getNode(ISD::BUILD_VECTOR, dl, ConstVecVT, Ops);
4384 ConstsNode = DAG.getBitcast(VT, ConstsNode);
4388 /// Returns a vector of specified type with all zero elements.
4389 static SDValue getZeroVector(MVT VT, const X86Subtarget *Subtarget,
4390 SelectionDAG &DAG, SDLoc dl) {
4391 assert(VT.isVector() && "Expected a vector type");
4393 // Always build SSE zero vectors as <4 x i32> bitcasted
4394 // to their dest type. This ensures they get CSE'd.
4396 if (VT.is128BitVector()) { // SSE
4397 if (Subtarget->hasSSE2()) { // SSE2
4398 SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
4399 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
4401 SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
4402 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
4404 } else if (VT.is256BitVector()) { // AVX
4405 if (Subtarget->hasInt256()) { // AVX2
4406 SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
4407 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4408 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
4410 // 256-bit logic and arithmetic instructions in AVX are all
4411 // floating-point, no support for integer ops. Emit fp zeroed vectors.
4412 SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
4413 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4414 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
4416 } else if (VT.is512BitVector()) { // AVX-512
4417 SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
4418 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
4419 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4420 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
4421 } else if (VT.getVectorElementType() == MVT::i1) {
4423 assert((Subtarget->hasBWI() || VT.getVectorNumElements() <= 16)
4424 && "Unexpected vector type");
4425 assert((Subtarget->hasVLX() || VT.getVectorNumElements() >= 8)
4426 && "Unexpected vector type");
4427 SDValue Cst = DAG.getConstant(0, dl, MVT::i1);
4428 SmallVector<SDValue, 64> Ops(VT.getVectorNumElements(), Cst);
4429 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
4431 llvm_unreachable("Unexpected vector type");
4433 return DAG.getBitcast(VT, Vec);
4436 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
4437 SelectionDAG &DAG, SDLoc dl,
4438 unsigned vectorWidth) {
4439 assert((vectorWidth == 128 || vectorWidth == 256) &&
4440 "Unsupported vector width");
4441 EVT VT = Vec.getValueType();
4442 EVT ElVT = VT.getVectorElementType();
4443 unsigned Factor = VT.getSizeInBits()/vectorWidth;
4444 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
4445 VT.getVectorNumElements()/Factor);
4447 // Extract from UNDEF is UNDEF.
4448 if (Vec.getOpcode() == ISD::UNDEF)
4449 return DAG.getUNDEF(ResultVT);
4451 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
4452 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
4453 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
4455 // This is the index of the first element of the vectorWidth-bit chunk
4456 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
4457 IdxVal &= ~(ElemsPerChunk - 1);
4459 // If the input is a buildvector just emit a smaller one.
4460 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
4461 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
4462 makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
4464 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
4465 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
4468 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
4469 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
4470 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
4471 /// instructions or a simple subregister reference. Idx is an index in the
4472 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
4473 /// lowering EXTRACT_VECTOR_ELT operations easier.
4474 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
4475 SelectionDAG &DAG, SDLoc dl) {
4476 assert((Vec.getValueType().is256BitVector() ||
4477 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
4478 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
4481 /// Generate a DAG to grab 256-bits from a 512-bit vector.
4482 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
4483 SelectionDAG &DAG, SDLoc dl) {
4484 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
4485 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
4488 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
4489 unsigned IdxVal, SelectionDAG &DAG,
4490 SDLoc dl, unsigned vectorWidth) {
4491 assert((vectorWidth == 128 || vectorWidth == 256) &&
4492 "Unsupported vector width");
4493 // Inserting UNDEF is Result
4494 if (Vec.getOpcode() == ISD::UNDEF)
4496 EVT VT = Vec.getValueType();
4497 EVT ElVT = VT.getVectorElementType();
4498 EVT ResultVT = Result.getValueType();
4500 // Insert the relevant vectorWidth bits.
4501 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
4502 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
4504 // This is the index of the first element of the vectorWidth-bit chunk
4505 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
4506 IdxVal &= ~(ElemsPerChunk - 1);
4508 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
4509 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
4512 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
4513 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
4514 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
4515 /// simple superregister reference. Idx is an index in the 128 bits
4516 /// we want. It need not be aligned to a 128-bit boundary. That makes
4517 /// lowering INSERT_VECTOR_ELT operations easier.
4518 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
4519 SelectionDAG &DAG, SDLoc dl) {
4520 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
4522 // For insertion into the zero index (low half) of a 256-bit vector, it is
4523 // more efficient to generate a blend with immediate instead of an insert*128.
4524 // We are still creating an INSERT_SUBVECTOR below with an undef node to
4525 // extend the subvector to the size of the result vector. Make sure that
4526 // we are not recursing on that node by checking for undef here.
4527 if (IdxVal == 0 && Result.getValueType().is256BitVector() &&
4528 Result.getOpcode() != ISD::UNDEF) {
4529 EVT ResultVT = Result.getValueType();
4530 SDValue ZeroIndex = DAG.getIntPtrConstant(0, dl);
4531 SDValue Undef = DAG.getUNDEF(ResultVT);
4532 SDValue Vec256 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Undef,
4535 // The blend instruction, and therefore its mask, depend on the data type.
4536 MVT ScalarType = ResultVT.getVectorElementType().getSimpleVT();
4537 if (ScalarType.isFloatingPoint()) {
4538 // Choose either vblendps (float) or vblendpd (double).
4539 unsigned ScalarSize = ScalarType.getSizeInBits();
4540 assert((ScalarSize == 64 || ScalarSize == 32) && "Unknown float type");
4541 unsigned MaskVal = (ScalarSize == 64) ? 0x03 : 0x0f;
4542 SDValue Mask = DAG.getConstant(MaskVal, dl, MVT::i8);
4543 return DAG.getNode(X86ISD::BLENDI, dl, ResultVT, Result, Vec256, Mask);
4546 const X86Subtarget &Subtarget =
4547 static_cast<const X86Subtarget &>(DAG.getSubtarget());
4549 // AVX2 is needed for 256-bit integer blend support.
4550 // Integers must be cast to 32-bit because there is only vpblendd;
4551 // vpblendw can't be used for this because it has a handicapped mask.
4553 // If we don't have AVX2, then cast to float. Using a wrong domain blend
4554 // is still more efficient than using the wrong domain vinsertf128 that
4555 // will be created by InsertSubVector().
4556 MVT CastVT = Subtarget.hasAVX2() ? MVT::v8i32 : MVT::v8f32;
4558 SDValue Mask = DAG.getConstant(0x0f, dl, MVT::i8);
4559 Vec256 = DAG.getBitcast(CastVT, Vec256);
4560 Vec256 = DAG.getNode(X86ISD::BLENDI, dl, CastVT, Result, Vec256, Mask);
4561 return DAG.getBitcast(ResultVT, Vec256);
4564 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
4567 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
4568 SelectionDAG &DAG, SDLoc dl) {
4569 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
4570 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
4573 /// Insert i1-subvector to i1-vector.
4574 static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
4577 SDValue Vec = Op.getOperand(0);
4578 SDValue SubVec = Op.getOperand(1);
4579 SDValue Idx = Op.getOperand(2);
4581 if (!isa<ConstantSDNode>(Idx))
4584 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4585 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
4588 MVT OpVT = Op.getSimpleValueType();
4589 MVT SubVecVT = SubVec.getSimpleValueType();
4590 unsigned NumElems = OpVT.getVectorNumElements();
4591 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
4593 assert(IdxVal + SubVecNumElems <= NumElems &&
4594 IdxVal % SubVecVT.getSizeInBits() == 0 &&
4595 "Unexpected index value in INSERT_SUBVECTOR");
4597 // There are 3 possible cases:
4598 // 1. Subvector should be inserted in the lower part (IdxVal == 0)
4599 // 2. Subvector should be inserted in the upper part
4600 // (IdxVal + SubVecNumElems == NumElems)
4601 // 3. Subvector should be inserted in the middle (for example v2i1
4602 // to v16i1, index 2)
4604 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
4605 SDValue Undef = DAG.getUNDEF(OpVT);
4606 SDValue WideSubVec =
4607 DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, SubVec, ZeroIdx);
4609 return DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
4610 DAG.getConstant(IdxVal, dl, MVT::i8));
4612 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
4613 unsigned ShiftLeft = NumElems - SubVecNumElems;
4614 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4615 WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
4616 DAG.getConstant(ShiftLeft, dl, MVT::i8));
4617 return ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, OpVT, WideSubVec,
4618 DAG.getConstant(ShiftRight, dl, MVT::i8)) : WideSubVec;
4622 // Zero lower bits of the Vec
4623 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
4624 Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
4625 Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
4626 // Merge them together
4627 return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
4630 // Simple case when we put subvector in the upper part
4631 if (IdxVal + SubVecNumElems == NumElems) {
4632 // Zero upper bits of the Vec
4633 WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec,
4634 DAG.getConstant(IdxVal, dl, MVT::i8));
4635 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
4636 Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
4637 Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
4638 return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
4640 // Subvector should be inserted in the middle - use shuffle
4641 SmallVector<int, 64> Mask;
4642 for (unsigned i = 0; i < NumElems; ++i)
4643 Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ?
4645 return DAG.getVectorShuffle(OpVT, dl, WideSubVec, Vec, Mask);
4648 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
4649 /// instructions. This is used because creating CONCAT_VECTOR nodes of
4650 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
4651 /// large BUILD_VECTORS.
4652 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
4653 unsigned NumElems, SelectionDAG &DAG,
4655 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
4656 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
4659 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
4660 unsigned NumElems, SelectionDAG &DAG,
4662 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
4663 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
4666 /// Returns a vector of specified type with all bits set.
4667 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
4668 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
4669 /// Then bitcast to their original type, ensuring they get CSE'd.
4670 static SDValue getOnesVector(EVT VT, const X86Subtarget *Subtarget,
4671 SelectionDAG &DAG, SDLoc dl) {
4672 assert(VT.isVector() && "Expected a vector type");
4674 SDValue Cst = DAG.getConstant(~0U, dl, MVT::i32);
4676 if (VT.is512BitVector()) {
4677 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
4678 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4679 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
4680 } else if (VT.is256BitVector()) {
4681 if (Subtarget->hasInt256()) { // AVX2
4682 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
4683 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
4685 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
4686 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
4688 } else if (VT.is128BitVector()) {
4689 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
4691 llvm_unreachable("Unexpected vector type");
4693 return DAG.getBitcast(VT, Vec);
4696 /// Returns a vector_shuffle node for an unpackl operation.
4697 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
4699 unsigned NumElems = VT.getVectorNumElements();
4700 SmallVector<int, 8> Mask;
4701 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
4703 Mask.push_back(i + NumElems);
4705 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
4708 /// Returns a vector_shuffle node for an unpackh operation.
4709 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
4711 unsigned NumElems = VT.getVectorNumElements();
4712 SmallVector<int, 8> Mask;
4713 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
4714 Mask.push_back(i + Half);
4715 Mask.push_back(i + NumElems + Half);
4717 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
4720 /// Return a vector_shuffle of the specified vector of zero or undef vector.
4721 /// This produces a shuffle where the low element of V2 is swizzled into the
4722 /// zero/undef vector, landing at element Idx.
4723 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
4724 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
4726 const X86Subtarget *Subtarget,
4727 SelectionDAG &DAG) {
4728 MVT VT = V2.getSimpleValueType();
4730 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
4731 unsigned NumElems = VT.getVectorNumElements();
4732 SmallVector<int, 16> MaskVec;
4733 for (unsigned i = 0; i != NumElems; ++i)
4734 // If this is the insertion idx, put the low elt of V2 here.
4735 MaskVec.push_back(i == Idx ? NumElems : i);
4736 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
4739 /// Calculates the shuffle mask corresponding to the target-specific opcode.
4740 /// Returns true if the Mask could be calculated. Sets IsUnary to true if only
4741 /// uses one source. Note that this will set IsUnary for shuffles which use a
4742 /// single input multiple times, and in those cases it will
4743 /// adjust the mask to only have indices within that single input.
4744 /// FIXME: Add support for Decode*Mask functions that return SM_SentinelZero.
4745 static bool getTargetShuffleMask(SDNode *N, MVT VT,
4746 SmallVectorImpl<int> &Mask, bool &IsUnary) {
4747 unsigned NumElems = VT.getVectorNumElements();
4751 bool IsFakeUnary = false;
4752 switch(N->getOpcode()) {
4753 case X86ISD::BLENDI:
4754 ImmN = N->getOperand(N->getNumOperands()-1);
4755 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4758 ImmN = N->getOperand(N->getNumOperands()-1);
4759 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4760 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4762 case X86ISD::UNPCKH:
4763 DecodeUNPCKHMask(VT, Mask);
4764 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4766 case X86ISD::UNPCKL:
4767 DecodeUNPCKLMask(VT, Mask);
4768 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4770 case X86ISD::MOVHLPS:
4771 DecodeMOVHLPSMask(NumElems, Mask);
4772 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4774 case X86ISD::MOVLHPS:
4775 DecodeMOVLHPSMask(NumElems, Mask);
4776 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4778 case X86ISD::PALIGNR:
4779 ImmN = N->getOperand(N->getNumOperands()-1);
4780 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4782 case X86ISD::PSHUFD:
4783 case X86ISD::VPERMILPI:
4784 ImmN = N->getOperand(N->getNumOperands()-1);
4785 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4788 case X86ISD::PSHUFHW:
4789 ImmN = N->getOperand(N->getNumOperands()-1);
4790 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4793 case X86ISD::PSHUFLW:
4794 ImmN = N->getOperand(N->getNumOperands()-1);
4795 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4798 case X86ISD::PSHUFB: {
4800 SDValue MaskNode = N->getOperand(1);
4801 while (MaskNode->getOpcode() == ISD::BITCAST)
4802 MaskNode = MaskNode->getOperand(0);
4804 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
4805 // If we have a build-vector, then things are easy.
4806 MVT VT = MaskNode.getSimpleValueType();
4807 assert(VT.isVector() &&
4808 "Can't produce a non-vector with a build_vector!");
4809 if (!VT.isInteger())
4812 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
4814 SmallVector<uint64_t, 32> RawMask;
4815 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
4816 SDValue Op = MaskNode->getOperand(i);
4817 if (Op->getOpcode() == ISD::UNDEF) {
4818 RawMask.push_back((uint64_t)SM_SentinelUndef);
4821 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
4824 APInt MaskElement = CN->getAPIntValue();
4826 // We now have to decode the element which could be any integer size and
4827 // extract each byte of it.
4828 for (int j = 0; j < NumBytesPerElement; ++j) {
4829 // Note that this is x86 and so always little endian: the low byte is
4830 // the first byte of the mask.
4831 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
4832 MaskElement = MaskElement.lshr(8);
4835 DecodePSHUFBMask(RawMask, Mask);
4839 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
4843 SDValue Ptr = MaskLoad->getBasePtr();
4844 if (Ptr->getOpcode() == X86ISD::Wrapper ||
4845 Ptr->getOpcode() == X86ISD::WrapperRIP)
4846 Ptr = Ptr->getOperand(0);
4848 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
4849 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
4852 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
4853 DecodePSHUFBMask(C, Mask);
4861 case X86ISD::VPERMI:
4862 ImmN = N->getOperand(N->getNumOperands()-1);
4863 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4868 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
4870 case X86ISD::VPERM2X128:
4871 ImmN = N->getOperand(N->getNumOperands()-1);
4872 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4873 if (Mask.empty()) return false;
4874 // Mask only contains negative index if an element is zero.
4875 if (std::any_of(Mask.begin(), Mask.end(),
4876 [](int M){ return M == SM_SentinelZero; }))
4879 case X86ISD::MOVSLDUP:
4880 DecodeMOVSLDUPMask(VT, Mask);
4883 case X86ISD::MOVSHDUP:
4884 DecodeMOVSHDUPMask(VT, Mask);
4887 case X86ISD::MOVDDUP:
4888 DecodeMOVDDUPMask(VT, Mask);
4891 case X86ISD::MOVLHPD:
4892 case X86ISD::MOVLPD:
4893 case X86ISD::MOVLPS:
4894 // Not yet implemented
4896 case X86ISD::VPERMV: {
4898 SDValue MaskNode = N->getOperand(0);
4899 while (MaskNode->getOpcode() == ISD::BITCAST)
4900 MaskNode = MaskNode->getOperand(0);
4902 unsigned MaskLoBits = Log2_64(VT.getVectorNumElements());
4903 SmallVector<uint64_t, 32> RawMask;
4904 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
4905 // If we have a build-vector, then things are easy.
4906 assert(MaskNode.getSimpleValueType().isInteger() &&
4907 MaskNode.getSimpleValueType().getVectorNumElements() ==
4908 VT.getVectorNumElements());
4910 for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
4911 SDValue Op = MaskNode->getOperand(i);
4912 if (Op->getOpcode() == ISD::UNDEF)
4913 RawMask.push_back((uint64_t)SM_SentinelUndef);
4914 else if (isa<ConstantSDNode>(Op)) {
4915 APInt MaskElement = cast<ConstantSDNode>(Op)->getAPIntValue();
4916 RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
4920 DecodeVPERMVMask(RawMask, Mask);
4923 if (MaskNode->getOpcode() == X86ISD::VBROADCAST) {
4924 unsigned NumEltsInMask = MaskNode->getNumOperands();
4925 MaskNode = MaskNode->getOperand(0);
4926 if (auto *CN = dyn_cast<ConstantSDNode>(MaskNode)) {
4927 APInt MaskEltValue = CN->getAPIntValue();
4928 for (unsigned i = 0; i < NumEltsInMask; ++i)
4929 RawMask.push_back(MaskEltValue.getLoBits(MaskLoBits).getZExtValue());
4930 DecodeVPERMVMask(RawMask, Mask);
4933 // It may be a scalar load
4936 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
4940 SDValue Ptr = MaskLoad->getBasePtr();
4941 if (Ptr->getOpcode() == X86ISD::Wrapper ||
4942 Ptr->getOpcode() == X86ISD::WrapperRIP)
4943 Ptr = Ptr->getOperand(0);
4945 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
4946 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
4949 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
4950 DecodeVPERMVMask(C, VT, Mask);
4957 case X86ISD::VPERMV3: {
4959 SDValue MaskNode = N->getOperand(1);
4960 while (MaskNode->getOpcode() == ISD::BITCAST)
4961 MaskNode = MaskNode->getOperand(1);
4963 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
4964 // If we have a build-vector, then things are easy.
4965 assert(MaskNode.getSimpleValueType().isInteger() &&
4966 MaskNode.getSimpleValueType().getVectorNumElements() ==
4967 VT.getVectorNumElements());
4969 SmallVector<uint64_t, 32> RawMask;
4970 unsigned MaskLoBits = Log2_64(VT.getVectorNumElements()*2);
4972 for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
4973 SDValue Op = MaskNode->getOperand(i);
4974 if (Op->getOpcode() == ISD::UNDEF)
4975 RawMask.push_back((uint64_t)SM_SentinelUndef);
4977 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
4980 APInt MaskElement = CN->getAPIntValue();
4981 RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
4984 DecodeVPERMV3Mask(RawMask, Mask);
4988 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
4992 SDValue Ptr = MaskLoad->getBasePtr();
4993 if (Ptr->getOpcode() == X86ISD::Wrapper ||
4994 Ptr->getOpcode() == X86ISD::WrapperRIP)
4995 Ptr = Ptr->getOperand(0);
4997 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
4998 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5001 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5002 DecodeVPERMV3Mask(C, VT, Mask);
5009 default: llvm_unreachable("unknown target shuffle node");
5012 // If we have a fake unary shuffle, the shuffle mask is spread across two
5013 // inputs that are actually the same node. Re-map the mask to always point
5014 // into the first input.
5017 if (M >= (int)Mask.size())
5023 /// Returns the scalar element that will make up the ith
5024 /// element of the result of the vector shuffle.
5025 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5028 return SDValue(); // Limit search depth.
5030 SDValue V = SDValue(N, 0);
5031 EVT VT = V.getValueType();
5032 unsigned Opcode = V.getOpcode();
5034 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5035 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5036 int Elt = SV->getMaskElt(Index);
5039 return DAG.getUNDEF(VT.getVectorElementType());
5041 unsigned NumElems = VT.getVectorNumElements();
5042 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5043 : SV->getOperand(1);
5044 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5047 // Recurse into target specific vector shuffles to find scalars.
5048 if (isTargetShuffle(Opcode)) {
5049 MVT ShufVT = V.getSimpleValueType();
5050 unsigned NumElems = ShufVT.getVectorNumElements();
5051 SmallVector<int, 16> ShuffleMask;
5054 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5057 int Elt = ShuffleMask[Index];
5059 return DAG.getUNDEF(ShufVT.getVectorElementType());
5061 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5063 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5067 // Actual nodes that may contain scalar elements
5068 if (Opcode == ISD::BITCAST) {
5069 V = V.getOperand(0);
5070 EVT SrcVT = V.getValueType();
5071 unsigned NumElems = VT.getVectorNumElements();
5073 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5077 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5078 return (Index == 0) ? V.getOperand(0)
5079 : DAG.getUNDEF(VT.getVectorElementType());
5081 if (V.getOpcode() == ISD::BUILD_VECTOR)
5082 return V.getOperand(Index);
5087 /// Custom lower build_vector of v16i8.
5088 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5089 unsigned NumNonZero, unsigned NumZero,
5091 const X86Subtarget* Subtarget,
5092 const TargetLowering &TLI) {
5100 // SSE4.1 - use PINSRB to insert each byte directly.
5101 if (Subtarget->hasSSE41()) {
5102 for (unsigned i = 0; i < 16; ++i) {
5103 bool isNonZero = (NonZeros & (1 << i)) != 0;
5107 V = getZeroVector(MVT::v16i8, Subtarget, DAG, dl);
5109 V = DAG.getUNDEF(MVT::v16i8);
5112 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5113 MVT::v16i8, V, Op.getOperand(i),
5114 DAG.getIntPtrConstant(i, dl));
5121 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
5122 for (unsigned i = 0; i < 16; ++i) {
5123 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5124 if (ThisIsNonZero && First) {
5126 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5128 V = DAG.getUNDEF(MVT::v8i16);
5133 SDValue ThisElt, LastElt;
5134 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5135 if (LastIsNonZero) {
5136 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5137 MVT::i16, Op.getOperand(i-1));
5139 if (ThisIsNonZero) {
5140 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5141 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5142 ThisElt, DAG.getConstant(8, dl, MVT::i8));
5144 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5148 if (ThisElt.getNode())
5149 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5150 DAG.getIntPtrConstant(i/2, dl));
5154 return DAG.getBitcast(MVT::v16i8, V);
5157 /// Custom lower build_vector of v8i16.
5158 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5159 unsigned NumNonZero, unsigned NumZero,
5161 const X86Subtarget* Subtarget,
5162 const TargetLowering &TLI) {
5169 for (unsigned i = 0; i < 8; ++i) {
5170 bool isNonZero = (NonZeros & (1 << i)) != 0;
5174 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5176 V = DAG.getUNDEF(MVT::v8i16);
5179 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5180 MVT::v8i16, V, Op.getOperand(i),
5181 DAG.getIntPtrConstant(i, dl));
5188 /// Custom lower build_vector of v4i32 or v4f32.
5189 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5190 const X86Subtarget *Subtarget,
5191 const TargetLowering &TLI) {
5192 // Find all zeroable elements.
5193 std::bitset<4> Zeroable;
5194 for (int i=0; i < 4; ++i) {
5195 SDValue Elt = Op->getOperand(i);
5196 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5198 assert(Zeroable.size() - Zeroable.count() > 1 &&
5199 "We expect at least two non-zero elements!");
5201 // We only know how to deal with build_vector nodes where elements are either
5202 // zeroable or extract_vector_elt with constant index.
5203 SDValue FirstNonZero;
5204 unsigned FirstNonZeroIdx;
5205 for (unsigned i=0; i < 4; ++i) {
5208 SDValue Elt = Op->getOperand(i);
5209 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5210 !isa<ConstantSDNode>(Elt.getOperand(1)))
5212 // Make sure that this node is extracting from a 128-bit vector.
5213 MVT VT = Elt.getOperand(0).getSimpleValueType();
5214 if (!VT.is128BitVector())
5216 if (!FirstNonZero.getNode()) {
5218 FirstNonZeroIdx = i;
5222 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5223 SDValue V1 = FirstNonZero.getOperand(0);
5224 MVT VT = V1.getSimpleValueType();
5226 // See if this build_vector can be lowered as a blend with zero.
5228 unsigned EltMaskIdx, EltIdx;
5230 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5231 if (Zeroable[EltIdx]) {
5232 // The zero vector will be on the right hand side.
5233 Mask[EltIdx] = EltIdx+4;
5237 Elt = Op->getOperand(EltIdx);
5238 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5239 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5240 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5242 Mask[EltIdx] = EltIdx;
5246 // Let the shuffle legalizer deal with blend operations.
5247 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5248 if (V1.getSimpleValueType() != VT)
5249 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5250 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5253 // See if we can lower this build_vector to a INSERTPS.
5254 if (!Subtarget->hasSSE41())
5257 SDValue V2 = Elt.getOperand(0);
5258 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5261 bool CanFold = true;
5262 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5266 SDValue Current = Op->getOperand(i);
5267 SDValue SrcVector = Current->getOperand(0);
5270 CanFold = SrcVector == V1 &&
5271 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5277 assert(V1.getNode() && "Expected at least two non-zero elements!");
5278 if (V1.getSimpleValueType() != MVT::v4f32)
5279 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5280 if (V2.getSimpleValueType() != MVT::v4f32)
5281 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5283 // Ok, we can emit an INSERTPS instruction.
5284 unsigned ZMask = Zeroable.to_ulong();
5286 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5287 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5289 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
5290 DAG.getIntPtrConstant(InsertPSMask, DL));
5291 return DAG.getBitcast(VT, Result);
5294 /// Return a vector logical shift node.
5295 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5296 unsigned NumBits, SelectionDAG &DAG,
5297 const TargetLowering &TLI, SDLoc dl) {
5298 assert(VT.is128BitVector() && "Unknown type for VShift");
5299 MVT ShVT = MVT::v2i64;
5300 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5301 SrcOp = DAG.getBitcast(ShVT, SrcOp);
5302 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(DAG.getDataLayout(), VT);
5303 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
5304 SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, ScalarShiftTy);
5305 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5309 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5311 // Check if the scalar load can be widened into a vector load. And if
5312 // the address is "base + cst" see if the cst can be "absorbed" into
5313 // the shuffle mask.
5314 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5315 SDValue Ptr = LD->getBasePtr();
5316 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5318 EVT PVT = LD->getValueType(0);
5319 if (PVT != MVT::i32 && PVT != MVT::f32)
5324 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5325 FI = FINode->getIndex();
5327 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5328 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5329 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5330 Offset = Ptr.getConstantOperandVal(1);
5331 Ptr = Ptr.getOperand(0);
5336 // FIXME: 256-bit vector instructions don't require a strict alignment,
5337 // improve this code to support it better.
5338 unsigned RequiredAlign = VT.getSizeInBits()/8;
5339 SDValue Chain = LD->getChain();
5340 // Make sure the stack object alignment is at least 16 or 32.
5341 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5342 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5343 if (MFI->isFixedObjectIndex(FI)) {
5344 // Can't change the alignment. FIXME: It's possible to compute
5345 // the exact stack offset and reference FI + adjust offset instead.
5346 // If someone *really* cares about this. That's the way to implement it.
5349 MFI->setObjectAlignment(FI, RequiredAlign);
5353 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5354 // Ptr + (Offset & ~15).
5357 if ((Offset % RequiredAlign) & 3)
5359 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
5362 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
5363 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
5366 int EltNo = (Offset - StartOffset) >> 2;
5367 unsigned NumElems = VT.getVectorNumElements();
5369 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5370 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5371 LD->getPointerInfo().getWithOffset(StartOffset),
5372 false, false, false, 0);
5374 SmallVector<int, 8> Mask(NumElems, EltNo);
5376 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
5382 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
5383 /// elements can be replaced by a single large load which has the same value as
5384 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
5386 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
5388 /// FIXME: we'd also like to handle the case where the last elements are zero
5389 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
5390 /// There's even a handy isZeroNode for that purpose.
5391 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
5392 SDLoc &DL, SelectionDAG &DAG,
5393 bool isAfterLegalize) {
5394 unsigned NumElems = Elts.size();
5396 LoadSDNode *LDBase = nullptr;
5397 unsigned LastLoadedElt = -1U;
5399 // For each element in the initializer, see if we've found a load or an undef.
5400 // If we don't find an initial load element, or later load elements are
5401 // non-consecutive, bail out.
5402 for (unsigned i = 0; i < NumElems; ++i) {
5403 SDValue Elt = Elts[i];
5404 // Look through a bitcast.
5405 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
5406 Elt = Elt.getOperand(0);
5407 if (!Elt.getNode() ||
5408 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
5411 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
5413 LDBase = cast<LoadSDNode>(Elt.getNode());
5417 if (Elt.getOpcode() == ISD::UNDEF)
5420 LoadSDNode *LD = cast<LoadSDNode>(Elt);
5421 EVT LdVT = Elt.getValueType();
5422 // Each loaded element must be the correct fractional portion of the
5423 // requested vector load.
5424 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
5426 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
5431 // If we have found an entire vector of loads and undefs, then return a large
5432 // load of the entire vector width starting at the base pointer. If we found
5433 // consecutive loads for the low half, generate a vzext_load node.
5434 if (LastLoadedElt == NumElems - 1) {
5435 assert(LDBase && "Did not find base load for merging consecutive loads");
5436 EVT EltVT = LDBase->getValueType(0);
5437 // Ensure that the input vector size for the merged loads matches the
5438 // cumulative size of the input elements.
5439 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
5442 if (isAfterLegalize &&
5443 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
5446 SDValue NewLd = SDValue();
5448 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
5449 LDBase->getPointerInfo(), LDBase->isVolatile(),
5450 LDBase->isNonTemporal(), LDBase->isInvariant(),
5451 LDBase->getAlignment());
5453 if (LDBase->hasAnyUseOfValue(1)) {
5454 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
5456 SDValue(NewLd.getNode(), 1));
5457 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
5458 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
5459 SDValue(NewLd.getNode(), 1));
5465 //TODO: The code below fires only for for loading the low v2i32 / v2f32
5466 //of a v4i32 / v4f32. It's probably worth generalizing.
5467 EVT EltVT = VT.getVectorElementType();
5468 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
5469 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
5470 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
5471 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
5473 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
5474 LDBase->getPointerInfo(),
5475 LDBase->getAlignment(),
5476 false/*isVolatile*/, true/*ReadMem*/,
5479 // Make sure the newly-created LOAD is in the same position as LDBase in
5480 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
5481 // update uses of LDBase's output chain to use the TokenFactor.
5482 if (LDBase->hasAnyUseOfValue(1)) {
5483 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
5484 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
5485 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
5486 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
5487 SDValue(ResNode.getNode(), 1));
5490 return DAG.getBitcast(VT, ResNode);
5495 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
5496 /// to generate a splat value for the following cases:
5497 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
5498 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
5499 /// a scalar load, or a constant.
5500 /// The VBROADCAST node is returned when a pattern is found,
5501 /// or SDValue() otherwise.
5502 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
5503 SelectionDAG &DAG) {
5504 // VBROADCAST requires AVX.
5505 // TODO: Splats could be generated for non-AVX CPUs using SSE
5506 // instructions, but there's less potential gain for only 128-bit vectors.
5507 if (!Subtarget->hasAVX())
5510 MVT VT = Op.getSimpleValueType();
5513 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5514 "Unsupported vector type for broadcast.");
5519 switch (Op.getOpcode()) {
5521 // Unknown pattern found.
5524 case ISD::BUILD_VECTOR: {
5525 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
5526 BitVector UndefElements;
5527 SDValue Splat = BVOp->getSplatValue(&UndefElements);
5529 // We need a splat of a single value to use broadcast, and it doesn't
5530 // make any sense if the value is only in one element of the vector.
5531 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
5535 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
5536 Ld.getOpcode() == ISD::ConstantFP);
5538 // Make sure that all of the users of a non-constant load are from the
5539 // BUILD_VECTOR node.
5540 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
5545 case ISD::VECTOR_SHUFFLE: {
5546 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
5548 // Shuffles must have a splat mask where the first element is
5550 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
5553 SDValue Sc = Op.getOperand(0);
5554 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
5555 Sc.getOpcode() != ISD::BUILD_VECTOR) {
5557 if (!Subtarget->hasInt256())
5560 // Use the register form of the broadcast instruction available on AVX2.
5561 if (VT.getSizeInBits() >= 256)
5562 Sc = Extract128BitVector(Sc, 0, DAG, dl);
5563 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
5566 Ld = Sc.getOperand(0);
5567 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
5568 Ld.getOpcode() == ISD::ConstantFP);
5570 // The scalar_to_vector node and the suspected
5571 // load node must have exactly one user.
5572 // Constants may have multiple users.
5574 // AVX-512 has register version of the broadcast
5575 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
5576 Ld.getValueType().getSizeInBits() >= 32;
5577 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
5584 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
5585 bool IsGE256 = (VT.getSizeInBits() >= 256);
5587 // When optimizing for size, generate up to 5 extra bytes for a broadcast
5588 // instruction to save 8 or more bytes of constant pool data.
5589 // TODO: If multiple splats are generated to load the same constant,
5590 // it may be detrimental to overall size. There needs to be a way to detect
5591 // that condition to know if this is truly a size win.
5592 bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
5594 // Handle broadcasting a single constant scalar from the constant pool
5596 // On Sandybridge (no AVX2), it is still better to load a constant vector
5597 // from the constant pool and not to broadcast it from a scalar.
5598 // But override that restriction when optimizing for size.
5599 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
5600 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
5601 EVT CVT = Ld.getValueType();
5602 assert(!CVT.isVector() && "Must not broadcast a vector type");
5604 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
5605 // For size optimization, also splat v2f64 and v2i64, and for size opt
5606 // with AVX2, also splat i8 and i16.
5607 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
5608 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
5609 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
5610 const Constant *C = nullptr;
5611 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
5612 C = CI->getConstantIntValue();
5613 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
5614 C = CF->getConstantFPValue();
5616 assert(C && "Invalid constant type");
5618 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5620 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
5621 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
5623 CVT, dl, DAG.getEntryNode(), CP,
5624 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
5625 false, false, Alignment);
5627 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5631 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
5633 // Handle AVX2 in-register broadcasts.
5634 if (!IsLoad && Subtarget->hasInt256() &&
5635 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
5636 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5638 // The scalar source must be a normal load.
5642 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
5643 (Subtarget->hasVLX() && ScalarSize == 64))
5644 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5646 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
5647 // double since there is no vbroadcastsd xmm
5648 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
5649 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
5650 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5653 // Unsupported broadcast.
5657 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
5658 /// underlying vector and index.
5660 /// Modifies \p ExtractedFromVec to the real vector and returns the real
5662 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
5664 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
5665 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
5668 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
5670 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
5672 // (extract_vector_elt (vector_shuffle<2,u,u,u>
5673 // (extract_subvector (v8f32 %vreg0), Constant<4>),
5676 // In this case the vector is the extract_subvector expression and the index
5677 // is 2, as specified by the shuffle.
5678 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
5679 SDValue ShuffleVec = SVOp->getOperand(0);
5680 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
5681 assert(ShuffleVecVT.getVectorElementType() ==
5682 ExtractedFromVec.getSimpleValueType().getVectorElementType());
5684 int ShuffleIdx = SVOp->getMaskElt(Idx);
5685 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
5686 ExtractedFromVec = ShuffleVec;
5692 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
5693 MVT VT = Op.getSimpleValueType();
5695 // Skip if insert_vec_elt is not supported.
5696 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5697 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
5701 unsigned NumElems = Op.getNumOperands();
5705 SmallVector<unsigned, 4> InsertIndices;
5706 SmallVector<int, 8> Mask(NumElems, -1);
5708 for (unsigned i = 0; i != NumElems; ++i) {
5709 unsigned Opc = Op.getOperand(i).getOpcode();
5711 if (Opc == ISD::UNDEF)
5714 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
5715 // Quit if more than 1 elements need inserting.
5716 if (InsertIndices.size() > 1)
5719 InsertIndices.push_back(i);
5723 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
5724 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
5725 // Quit if non-constant index.
5726 if (!isa<ConstantSDNode>(ExtIdx))
5728 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
5730 // Quit if extracted from vector of different type.
5731 if (ExtractedFromVec.getValueType() != VT)
5734 if (!VecIn1.getNode())
5735 VecIn1 = ExtractedFromVec;
5736 else if (VecIn1 != ExtractedFromVec) {
5737 if (!VecIn2.getNode())
5738 VecIn2 = ExtractedFromVec;
5739 else if (VecIn2 != ExtractedFromVec)
5740 // Quit if more than 2 vectors to shuffle
5744 if (ExtractedFromVec == VecIn1)
5746 else if (ExtractedFromVec == VecIn2)
5747 Mask[i] = Idx + NumElems;
5750 if (!VecIn1.getNode())
5753 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
5754 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
5755 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
5756 unsigned Idx = InsertIndices[i];
5757 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
5758 DAG.getIntPtrConstant(Idx, DL));
5764 static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
5765 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
5766 Op.getScalarValueSizeInBits() == 1 &&
5767 "Can not convert non-constant vector");
5768 uint64_t Immediate = 0;
5769 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
5770 SDValue In = Op.getOperand(idx);
5771 if (In.getOpcode() != ISD::UNDEF)
5772 Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
5776 MVT::getIntegerVT(std::max((int)Op.getValueType().getSizeInBits(), 8));
5777 return DAG.getConstant(Immediate, dl, VT);
5779 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
5781 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
5783 MVT VT = Op.getSimpleValueType();
5784 assert((VT.getVectorElementType() == MVT::i1) &&
5785 "Unexpected type in LowerBUILD_VECTORvXi1!");
5788 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
5789 SDValue Cst = DAG.getTargetConstant(0, dl, MVT::i1);
5790 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5791 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5794 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
5795 SDValue Cst = DAG.getTargetConstant(1, dl, MVT::i1);
5796 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5797 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5800 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
5801 SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
5802 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
5803 return DAG.getBitcast(VT, Imm);
5804 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
5805 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
5806 DAG.getIntPtrConstant(0, dl));
5809 // Vector has one or more non-const elements
5810 uint64_t Immediate = 0;
5811 SmallVector<unsigned, 16> NonConstIdx;
5812 bool IsSplat = true;
5813 bool HasConstElts = false;
5815 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
5816 SDValue In = Op.getOperand(idx);
5817 if (In.getOpcode() == ISD::UNDEF)
5819 if (!isa<ConstantSDNode>(In))
5820 NonConstIdx.push_back(idx);
5822 Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
5823 HasConstElts = true;
5827 else if (In != Op.getOperand(SplatIdx))
5831 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
5833 return DAG.getNode(ISD::SELECT, dl, VT, Op.getOperand(SplatIdx),
5834 DAG.getConstant(1, dl, VT),
5835 DAG.getConstant(0, dl, VT));
5837 // insert elements one by one
5841 MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8));
5842 Imm = DAG.getConstant(Immediate, dl, ImmVT);
5844 else if (HasConstElts)
5845 Imm = DAG.getConstant(0, dl, VT);
5847 Imm = DAG.getUNDEF(VT);
5848 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
5849 DstVec = DAG.getBitcast(VT, Imm);
5851 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
5852 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
5853 DAG.getIntPtrConstant(0, dl));
5856 for (unsigned i = 0; i < NonConstIdx.size(); ++i) {
5857 unsigned InsertIdx = NonConstIdx[i];
5858 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
5859 Op.getOperand(InsertIdx),
5860 DAG.getIntPtrConstant(InsertIdx, dl));
5865 /// \brief Return true if \p N implements a horizontal binop and return the
5866 /// operands for the horizontal binop into V0 and V1.
5868 /// This is a helper function of LowerToHorizontalOp().
5869 /// This function checks that the build_vector \p N in input implements a
5870 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
5871 /// operation to match.
5872 /// For example, if \p Opcode is equal to ISD::ADD, then this function
5873 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
5874 /// is equal to ISD::SUB, then this function checks if this is a horizontal
5877 /// This function only analyzes elements of \p N whose indices are
5878 /// in range [BaseIdx, LastIdx).
5879 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
5881 unsigned BaseIdx, unsigned LastIdx,
5882 SDValue &V0, SDValue &V1) {
5883 EVT VT = N->getValueType(0);
5885 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
5886 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
5887 "Invalid Vector in input!");
5889 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
5890 bool CanFold = true;
5891 unsigned ExpectedVExtractIdx = BaseIdx;
5892 unsigned NumElts = LastIdx - BaseIdx;
5893 V0 = DAG.getUNDEF(VT);
5894 V1 = DAG.getUNDEF(VT);
5896 // Check if N implements a horizontal binop.
5897 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
5898 SDValue Op = N->getOperand(i + BaseIdx);
5901 if (Op->getOpcode() == ISD::UNDEF) {
5902 // Update the expected vector extract index.
5903 if (i * 2 == NumElts)
5904 ExpectedVExtractIdx = BaseIdx;
5905 ExpectedVExtractIdx += 2;
5909 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
5914 SDValue Op0 = Op.getOperand(0);
5915 SDValue Op1 = Op.getOperand(1);
5917 // Try to match the following pattern:
5918 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
5919 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5920 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5921 Op0.getOperand(0) == Op1.getOperand(0) &&
5922 isa<ConstantSDNode>(Op0.getOperand(1)) &&
5923 isa<ConstantSDNode>(Op1.getOperand(1)));
5927 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
5928 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
5930 if (i * 2 < NumElts) {
5931 if (V0.getOpcode() == ISD::UNDEF) {
5932 V0 = Op0.getOperand(0);
5933 if (V0.getValueType() != VT)
5937 if (V1.getOpcode() == ISD::UNDEF) {
5938 V1 = Op0.getOperand(0);
5939 if (V1.getValueType() != VT)
5942 if (i * 2 == NumElts)
5943 ExpectedVExtractIdx = BaseIdx;
5946 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
5947 if (I0 == ExpectedVExtractIdx)
5948 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
5949 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
5950 // Try to match the following dag sequence:
5951 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
5952 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
5956 ExpectedVExtractIdx += 2;
5962 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
5963 /// a concat_vector.
5965 /// This is a helper function of LowerToHorizontalOp().
5966 /// This function expects two 256-bit vectors called V0 and V1.
5967 /// At first, each vector is split into two separate 128-bit vectors.
5968 /// Then, the resulting 128-bit vectors are used to implement two
5969 /// horizontal binary operations.
5971 /// The kind of horizontal binary operation is defined by \p X86Opcode.
5973 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
5974 /// the two new horizontal binop.
5975 /// When Mode is set, the first horizontal binop dag node would take as input
5976 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
5977 /// horizontal binop dag node would take as input the lower 128-bit of V1
5978 /// and the upper 128-bit of V1.
5980 /// HADD V0_LO, V0_HI
5981 /// HADD V1_LO, V1_HI
5983 /// Otherwise, the first horizontal binop dag node takes as input the lower
5984 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
5985 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
5987 /// HADD V0_LO, V1_LO
5988 /// HADD V0_HI, V1_HI
5990 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
5991 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
5992 /// the upper 128-bits of the result.
5993 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
5994 SDLoc DL, SelectionDAG &DAG,
5995 unsigned X86Opcode, bool Mode,
5996 bool isUndefLO, bool isUndefHI) {
5997 EVT VT = V0.getValueType();
5998 assert(VT.is256BitVector() && VT == V1.getValueType() &&
5999 "Invalid nodes in input!");
6001 unsigned NumElts = VT.getVectorNumElements();
6002 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6003 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6004 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6005 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6006 EVT NewVT = V0_LO.getValueType();
6008 SDValue LO = DAG.getUNDEF(NewVT);
6009 SDValue HI = DAG.getUNDEF(NewVT);
6012 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6013 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6014 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6015 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6016 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6018 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6019 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6020 V1_LO->getOpcode() != ISD::UNDEF))
6021 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6023 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6024 V1_HI->getOpcode() != ISD::UNDEF))
6025 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6028 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6031 /// Try to fold a build_vector that performs an 'addsub' to an X86ISD::ADDSUB
6033 static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
6034 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
6035 MVT VT = BV->getSimpleValueType(0);
6036 if ((!Subtarget->hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
6037 (!Subtarget->hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
6041 unsigned NumElts = VT.getVectorNumElements();
6042 SDValue InVec0 = DAG.getUNDEF(VT);
6043 SDValue InVec1 = DAG.getUNDEF(VT);
6045 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6046 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6048 // Odd-numbered elements in the input build vector are obtained from
6049 // adding two integer/float elements.
6050 // Even-numbered elements in the input build vector are obtained from
6051 // subtracting two integer/float elements.
6052 unsigned ExpectedOpcode = ISD::FSUB;
6053 unsigned NextExpectedOpcode = ISD::FADD;
6054 bool AddFound = false;
6055 bool SubFound = false;
6057 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6058 SDValue Op = BV->getOperand(i);
6060 // Skip 'undef' values.
6061 unsigned Opcode = Op.getOpcode();
6062 if (Opcode == ISD::UNDEF) {
6063 std::swap(ExpectedOpcode, NextExpectedOpcode);
6067 // Early exit if we found an unexpected opcode.
6068 if (Opcode != ExpectedOpcode)
6071 SDValue Op0 = Op.getOperand(0);
6072 SDValue Op1 = Op.getOperand(1);
6074 // Try to match the following pattern:
6075 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6076 // Early exit if we cannot match that sequence.
6077 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6078 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6079 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6080 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6081 Op0.getOperand(1) != Op1.getOperand(1))
6084 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6088 // We found a valid add/sub node. Update the information accordingly.
6094 // Update InVec0 and InVec1.
6095 if (InVec0.getOpcode() == ISD::UNDEF) {
6096 InVec0 = Op0.getOperand(0);
6097 if (InVec0.getSimpleValueType() != VT)
6100 if (InVec1.getOpcode() == ISD::UNDEF) {
6101 InVec1 = Op1.getOperand(0);
6102 if (InVec1.getSimpleValueType() != VT)
6106 // Make sure that operands in input to each add/sub node always
6107 // come from a same pair of vectors.
6108 if (InVec0 != Op0.getOperand(0)) {
6109 if (ExpectedOpcode == ISD::FSUB)
6112 // FADD is commutable. Try to commute the operands
6113 // and then test again.
6114 std::swap(Op0, Op1);
6115 if (InVec0 != Op0.getOperand(0))
6119 if (InVec1 != Op1.getOperand(0))
6122 // Update the pair of expected opcodes.
6123 std::swap(ExpectedOpcode, NextExpectedOpcode);
6126 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6127 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6128 InVec1.getOpcode() != ISD::UNDEF)
6129 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6134 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
6135 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
6136 const X86Subtarget *Subtarget,
6137 SelectionDAG &DAG) {
6138 MVT VT = BV->getSimpleValueType(0);
6139 unsigned NumElts = VT.getVectorNumElements();
6140 unsigned NumUndefsLO = 0;
6141 unsigned NumUndefsHI = 0;
6142 unsigned Half = NumElts/2;
6144 // Count the number of UNDEF operands in the build_vector in input.
6145 for (unsigned i = 0, e = Half; i != e; ++i)
6146 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6149 for (unsigned i = Half, e = NumElts; i != e; ++i)
6150 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6153 // Early exit if this is either a build_vector of all UNDEFs or all the
6154 // operands but one are UNDEF.
6155 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6159 SDValue InVec0, InVec1;
6160 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6161 // Try to match an SSE3 float HADD/HSUB.
6162 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6163 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6165 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6166 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6167 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6168 // Try to match an SSSE3 integer HADD/HSUB.
6169 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6170 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6172 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6173 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6176 if (!Subtarget->hasAVX())
6179 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6180 // Try to match an AVX horizontal add/sub of packed single/double
6181 // precision floating point values from 256-bit vectors.
6182 SDValue InVec2, InVec3;
6183 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6184 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6185 ((InVec0.getOpcode() == ISD::UNDEF ||
6186 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6187 ((InVec1.getOpcode() == ISD::UNDEF ||
6188 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6189 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6191 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6192 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6193 ((InVec0.getOpcode() == ISD::UNDEF ||
6194 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6195 ((InVec1.getOpcode() == ISD::UNDEF ||
6196 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6197 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6198 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6199 // Try to match an AVX2 horizontal add/sub of signed integers.
6200 SDValue InVec2, InVec3;
6202 bool CanFold = true;
6204 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6205 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6206 ((InVec0.getOpcode() == ISD::UNDEF ||
6207 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6208 ((InVec1.getOpcode() == ISD::UNDEF ||
6209 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6210 X86Opcode = X86ISD::HADD;
6211 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6212 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6213 ((InVec0.getOpcode() == ISD::UNDEF ||
6214 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6215 ((InVec1.getOpcode() == ISD::UNDEF ||
6216 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6217 X86Opcode = X86ISD::HSUB;
6222 // Fold this build_vector into a single horizontal add/sub.
6223 // Do this only if the target has AVX2.
6224 if (Subtarget->hasAVX2())
6225 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6227 // Do not try to expand this build_vector into a pair of horizontal
6228 // add/sub if we can emit a pair of scalar add/sub.
6229 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6232 // Convert this build_vector into a pair of horizontal binop followed by
6234 bool isUndefLO = NumUndefsLO == Half;
6235 bool isUndefHI = NumUndefsHI == Half;
6236 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6237 isUndefLO, isUndefHI);
6241 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6242 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6244 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6245 X86Opcode = X86ISD::HADD;
6246 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6247 X86Opcode = X86ISD::HSUB;
6248 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6249 X86Opcode = X86ISD::FHADD;
6250 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6251 X86Opcode = X86ISD::FHSUB;
6255 // Don't try to expand this build_vector into a pair of horizontal add/sub
6256 // if we can simply emit a pair of scalar add/sub.
6257 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6260 // Convert this build_vector into two horizontal add/sub followed by
6262 bool isUndefLO = NumUndefsLO == Half;
6263 bool isUndefHI = NumUndefsHI == Half;
6264 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6265 isUndefLO, isUndefHI);
6272 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6275 MVT VT = Op.getSimpleValueType();
6276 MVT ExtVT = VT.getVectorElementType();
6277 unsigned NumElems = Op.getNumOperands();
6279 // Generate vectors for predicate vectors.
6280 if (VT.getVectorElementType() == MVT::i1 && Subtarget->hasAVX512())
6281 return LowerBUILD_VECTORvXi1(Op, DAG);
6283 // Vectors containing all zeros can be matched by pxor and xorps later
6284 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6285 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6286 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6287 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6290 return getZeroVector(VT, Subtarget, DAG, dl);
6293 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6294 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6295 // vpcmpeqd on 256-bit vectors.
6296 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6297 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6300 if (!VT.is512BitVector())
6301 return getOnesVector(VT, Subtarget, DAG, dl);
6304 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
6305 if (SDValue AddSub = LowerToAddSub(BV, Subtarget, DAG))
6307 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
6308 return HorizontalOp;
6309 if (SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG))
6312 unsigned EVTBits = ExtVT.getSizeInBits();
6314 unsigned NumZero = 0;
6315 unsigned NumNonZero = 0;
6316 uint64_t NonZeros = 0;
6317 bool IsAllConstants = true;
6318 SmallSet<SDValue, 8> Values;
6319 for (unsigned i = 0; i < NumElems; ++i) {
6320 SDValue Elt = Op.getOperand(i);
6321 if (Elt.getOpcode() == ISD::UNDEF)
6324 if (Elt.getOpcode() != ISD::Constant &&
6325 Elt.getOpcode() != ISD::ConstantFP)
6326 IsAllConstants = false;
6327 if (X86::isZeroNode(Elt))
6330 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
6331 NonZeros |= ((uint64_t)1 << i);
6336 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6337 if (NumNonZero == 0)
6338 return DAG.getUNDEF(VT);
6340 // Special case for single non-zero, non-undef, element.
6341 if (NumNonZero == 1) {
6342 unsigned Idx = countTrailingZeros(NonZeros);
6343 SDValue Item = Op.getOperand(Idx);
6345 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6346 // the value are obviously zero, truncate the value to i32 and do the
6347 // insertion that way. Only do this if the value is non-constant or if the
6348 // value is a constant being inserted into element 0. It is cheaper to do
6349 // a constant pool load than it is to do a movd + shuffle.
6350 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6351 (!IsAllConstants || Idx == 0)) {
6352 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6354 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6355 MVT VecVT = MVT::v4i32;
6357 // Truncate the value (which may itself be a constant) to i32, and
6358 // convert it to a vector with movd (S2V+shuffle to zero extend).
6359 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6360 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6361 return DAG.getBitcast(VT, getShuffleVectorZeroOrUndef(
6362 Item, Idx * 2, true, Subtarget, DAG));
6366 // If we have a constant or non-constant insertion into the low element of
6367 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6368 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6369 // depending on what the source datatype is.
6372 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6374 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6375 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6376 if (VT.is512BitVector()) {
6377 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6378 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6379 Item, DAG.getIntPtrConstant(0, dl));
6381 assert((VT.is128BitVector() || VT.is256BitVector()) &&
6382 "Expected an SSE value type!");
6383 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6384 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6385 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6388 // We can't directly insert an i8 or i16 into a vector, so zero extend
6390 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
6391 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
6392 if (VT.is256BitVector()) {
6393 if (Subtarget->hasAVX()) {
6394 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v8i32, Item);
6395 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6397 // Without AVX, we need to extend to a 128-bit vector and then
6398 // insert into the 256-bit vector.
6399 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
6400 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
6401 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
6404 assert(VT.is128BitVector() && "Expected an SSE value type!");
6405 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
6406 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6408 return DAG.getBitcast(VT, Item);
6412 // Is it a vector logical left shift?
6413 if (NumElems == 2 && Idx == 1 &&
6414 X86::isZeroNode(Op.getOperand(0)) &&
6415 !X86::isZeroNode(Op.getOperand(1))) {
6416 unsigned NumBits = VT.getSizeInBits();
6417 return getVShift(true, VT,
6418 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
6419 VT, Op.getOperand(1)),
6420 NumBits/2, DAG, *this, dl);
6423 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
6426 // Otherwise, if this is a vector with i32 or f32 elements, and the element
6427 // is a non-constant being inserted into an element other than the low one,
6428 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
6429 // movd/movss) to move this into the low element, then shuffle it into
6431 if (EVTBits == 32) {
6432 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6433 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
6437 // Splat is obviously ok. Let legalizer expand it to a shuffle.
6438 if (Values.size() == 1) {
6439 if (EVTBits == 32) {
6440 // Instead of a shuffle like this:
6441 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
6442 // Check if it's possible to issue this instead.
6443 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
6444 unsigned Idx = countTrailingZeros(NonZeros);
6445 SDValue Item = Op.getOperand(Idx);
6446 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
6447 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
6452 // A vector full of immediates; various special cases are already
6453 // handled, so this is best done with a single constant-pool load.
6457 // For AVX-length vectors, see if we can use a vector load to get all of the
6458 // elements, otherwise build the individual 128-bit pieces and use
6459 // shuffles to put them in place.
6460 if (VT.is256BitVector() || VT.is512BitVector()) {
6461 SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
6463 // Check for a build vector of consecutive loads.
6464 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
6467 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
6469 // Build both the lower and upper subvector.
6470 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
6471 makeArrayRef(&V[0], NumElems/2));
6472 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
6473 makeArrayRef(&V[NumElems / 2], NumElems/2));
6475 // Recreate the wider vector with the lower and upper part.
6476 if (VT.is256BitVector())
6477 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
6478 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
6481 // Let legalizer expand 2-wide build_vectors.
6482 if (EVTBits == 64) {
6483 if (NumNonZero == 1) {
6484 // One half is zero or undef.
6485 unsigned Idx = countTrailingZeros(NonZeros);
6486 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
6487 Op.getOperand(Idx));
6488 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
6493 // If element VT is < 32 bits, convert it to inserts into a zero vector.
6494 if (EVTBits == 8 && NumElems == 16)
6495 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
6496 DAG, Subtarget, *this))
6499 if (EVTBits == 16 && NumElems == 8)
6500 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
6501 DAG, Subtarget, *this))
6504 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
6505 if (EVTBits == 32 && NumElems == 4)
6506 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this))
6509 // If element VT is == 32 bits, turn it into a number of shuffles.
6510 SmallVector<SDValue, 8> V(NumElems);
6511 if (NumElems == 4 && NumZero > 0) {
6512 for (unsigned i = 0; i < 4; ++i) {
6513 bool isZero = !(NonZeros & (1ULL << i));
6515 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
6517 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
6520 for (unsigned i = 0; i < 2; ++i) {
6521 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
6524 V[i] = V[i*2]; // Must be a zero vector.
6527 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
6530 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
6533 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
6538 bool Reverse1 = (NonZeros & 0x3) == 2;
6539 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
6543 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
6544 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
6546 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
6549 if (Values.size() > 1 && VT.is128BitVector()) {
6550 // Check for a build vector of consecutive loads.
6551 for (unsigned i = 0; i < NumElems; ++i)
6552 V[i] = Op.getOperand(i);
6554 // Check for elements which are consecutive loads.
6555 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
6558 // Check for a build vector from mostly shuffle plus few inserting.
6559 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
6562 // For SSE 4.1, use insertps to put the high elements into the low element.
6563 if (Subtarget->hasSSE41()) {
6565 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
6566 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
6568 Result = DAG.getUNDEF(VT);
6570 for (unsigned i = 1; i < NumElems; ++i) {
6571 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
6572 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
6573 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
6578 // Otherwise, expand into a number of unpckl*, start by extending each of
6579 // our (non-undef) elements to the full vector width with the element in the
6580 // bottom slot of the vector (which generates no code for SSE).
6581 for (unsigned i = 0; i < NumElems; ++i) {
6582 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
6583 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
6585 V[i] = DAG.getUNDEF(VT);
6588 // Next, we iteratively mix elements, e.g. for v4f32:
6589 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
6590 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
6591 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
6592 unsigned EltStride = NumElems >> 1;
6593 while (EltStride != 0) {
6594 for (unsigned i = 0; i < EltStride; ++i) {
6595 // If V[i+EltStride] is undef and this is the first round of mixing,
6596 // then it is safe to just drop this shuffle: V[i] is already in the
6597 // right place, the one element (since it's the first round) being
6598 // inserted as undef can be dropped. This isn't safe for successive
6599 // rounds because they will permute elements within both vectors.
6600 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
6601 EltStride == NumElems/2)
6604 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
6613 // 256-bit AVX can use the vinsertf128 instruction
6614 // to create 256-bit vectors from two other 128-bit ones.
6615 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
6617 MVT ResVT = Op.getSimpleValueType();
6619 assert((ResVT.is256BitVector() ||
6620 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
6622 SDValue V1 = Op.getOperand(0);
6623 SDValue V2 = Op.getOperand(1);
6624 unsigned NumElems = ResVT.getVectorNumElements();
6625 if (ResVT.is256BitVector())
6626 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
6628 if (Op.getNumOperands() == 4) {
6629 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
6630 ResVT.getVectorNumElements()/2);
6631 SDValue V3 = Op.getOperand(2);
6632 SDValue V4 = Op.getOperand(3);
6633 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
6634 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
6636 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
6639 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
6640 const X86Subtarget *Subtarget,
6641 SelectionDAG & DAG) {
6643 MVT ResVT = Op.getSimpleValueType();
6644 unsigned NumOfOperands = Op.getNumOperands();
6646 assert(isPowerOf2_32(NumOfOperands) &&
6647 "Unexpected number of operands in CONCAT_VECTORS");
6649 SDValue Undef = DAG.getUNDEF(ResVT);
6650 if (NumOfOperands > 2) {
6651 // Specialize the cases when all, or all but one, of the operands are undef.
6652 unsigned NumOfDefinedOps = 0;
6654 for (unsigned i = 0; i < NumOfOperands; i++)
6655 if (!Op.getOperand(i).isUndef()) {
6659 if (NumOfDefinedOps == 0)
6661 if (NumOfDefinedOps == 1) {
6662 unsigned SubVecNumElts =
6663 Op.getOperand(OpIdx).getValueType().getVectorNumElements();
6664 SDValue IdxVal = DAG.getIntPtrConstant(SubVecNumElts * OpIdx, dl);
6665 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef,
6666 Op.getOperand(OpIdx), IdxVal);
6669 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
6670 ResVT.getVectorNumElements()/2);
6671 SmallVector<SDValue, 2> Ops;
6672 for (unsigned i = 0; i < NumOfOperands/2; i++)
6673 Ops.push_back(Op.getOperand(i));
6674 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, Ops);
6676 for (unsigned i = NumOfOperands/2; i < NumOfOperands; i++)
6677 Ops.push_back(Op.getOperand(i));
6678 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, Ops);
6679 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
6683 SDValue V1 = Op.getOperand(0);
6684 SDValue V2 = Op.getOperand(1);
6685 unsigned NumElems = ResVT.getVectorNumElements();
6686 assert(V1.getValueType() == V2.getValueType() &&
6687 V1.getValueType().getVectorNumElements() == NumElems/2 &&
6688 "Unexpected operands in CONCAT_VECTORS");
6690 if (ResVT.getSizeInBits() >= 16)
6691 return Op; // The operation is legal with KUNPCK
6693 bool IsZeroV1 = ISD::isBuildVectorAllZeros(V1.getNode());
6694 bool IsZeroV2 = ISD::isBuildVectorAllZeros(V2.getNode());
6695 SDValue ZeroVec = getZeroVector(ResVT, Subtarget, DAG, dl);
6696 if (IsZeroV1 && IsZeroV2)
6699 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
6701 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V1, ZeroIdx);
6703 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ZeroVec, V1, ZeroIdx);
6705 SDValue IdxVal = DAG.getIntPtrConstant(NumElems/2, dl);
6707 V2 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V2, IdxVal);
6710 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ZeroVec, V2, IdxVal);
6712 V1 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V1, ZeroIdx);
6713 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, V1, V2, IdxVal);
6716 static SDValue LowerCONCAT_VECTORS(SDValue Op,
6717 const X86Subtarget *Subtarget,
6718 SelectionDAG &DAG) {
6719 MVT VT = Op.getSimpleValueType();
6720 if (VT.getVectorElementType() == MVT::i1)
6721 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
6723 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
6724 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
6725 Op.getNumOperands() == 4)));
6727 // AVX can use the vinsertf128 instruction to create 256-bit vectors
6728 // from two other 128-bit ones.
6730 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
6731 return LowerAVXCONCAT_VECTORS(Op, DAG);
6734 //===----------------------------------------------------------------------===//
6735 // Vector shuffle lowering
6737 // This is an experimental code path for lowering vector shuffles on x86. It is
6738 // designed to handle arbitrary vector shuffles and blends, gracefully
6739 // degrading performance as necessary. It works hard to recognize idiomatic
6740 // shuffles and lower them to optimal instruction patterns without leaving
6741 // a framework that allows reasonably efficient handling of all vector shuffle
6743 //===----------------------------------------------------------------------===//
6745 /// \brief Tiny helper function to identify a no-op mask.
6747 /// This is a somewhat boring predicate function. It checks whether the mask
6748 /// array input, which is assumed to be a single-input shuffle mask of the kind
6749 /// used by the X86 shuffle instructions (not a fully general
6750 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
6751 /// in-place shuffle are 'no-op's.
6752 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
6753 for (int i = 0, Size = Mask.size(); i < Size; ++i)
6754 if (Mask[i] != -1 && Mask[i] != i)
6759 /// \brief Helper function to classify a mask as a single-input mask.
6761 /// This isn't a generic single-input test because in the vector shuffle
6762 /// lowering we canonicalize single inputs to be the first input operand. This
6763 /// means we can more quickly test for a single input by only checking whether
6764 /// an input from the second operand exists. We also assume that the size of
6765 /// mask corresponds to the size of the input vectors which isn't true in the
6766 /// fully general case.
6767 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
6769 if (M >= (int)Mask.size())
6774 /// \brief Test whether there are elements crossing 128-bit lanes in this
6777 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
6778 /// and we routinely test for these.
6779 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
6780 int LaneSize = 128 / VT.getScalarSizeInBits();
6781 int Size = Mask.size();
6782 for (int i = 0; i < Size; ++i)
6783 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
6788 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
6790 /// This checks a shuffle mask to see if it is performing the same
6791 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
6792 /// that it is also not lane-crossing. It may however involve a blend from the
6793 /// same lane of a second vector.
6795 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
6796 /// non-trivial to compute in the face of undef lanes. The representation is
6797 /// *not* suitable for use with existing 128-bit shuffles as it will contain
6798 /// entries from both V1 and V2 inputs to the wider mask.
6800 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
6801 SmallVectorImpl<int> &RepeatedMask) {
6802 int LaneSize = 128 / VT.getScalarSizeInBits();
6803 RepeatedMask.resize(LaneSize, -1);
6804 int Size = Mask.size();
6805 for (int i = 0; i < Size; ++i) {
6808 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
6809 // This entry crosses lanes, so there is no way to model this shuffle.
6812 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
6813 if (RepeatedMask[i % LaneSize] == -1)
6814 // This is the first non-undef entry in this slot of a 128-bit lane.
6815 RepeatedMask[i % LaneSize] =
6816 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
6817 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
6818 // Found a mismatch with the repeated mask.
6824 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
6827 /// This is a fast way to test a shuffle mask against a fixed pattern:
6829 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
6831 /// It returns true if the mask is exactly as wide as the argument list, and
6832 /// each element of the mask is either -1 (signifying undef) or the value given
6833 /// in the argument.
6834 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
6835 ArrayRef<int> ExpectedMask) {
6836 if (Mask.size() != ExpectedMask.size())
6839 int Size = Mask.size();
6841 // If the values are build vectors, we can look through them to find
6842 // equivalent inputs that make the shuffles equivalent.
6843 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
6844 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
6846 for (int i = 0; i < Size; ++i)
6847 if (Mask[i] != -1 && Mask[i] != ExpectedMask[i]) {
6848 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
6849 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
6850 if (!MaskBV || !ExpectedBV ||
6851 MaskBV->getOperand(Mask[i] % Size) !=
6852 ExpectedBV->getOperand(ExpectedMask[i] % Size))
6859 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
6861 /// This helper function produces an 8-bit shuffle immediate corresponding to
6862 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
6863 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
6866 /// NB: We rely heavily on "undef" masks preserving the input lane.
6867 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
6868 SelectionDAG &DAG) {
6869 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
6870 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
6871 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
6872 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
6873 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
6876 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
6877 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
6878 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
6879 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
6880 return DAG.getConstant(Imm, DL, MVT::i8);
6883 /// \brief Compute whether each element of a shuffle is zeroable.
6885 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
6886 /// Either it is an undef element in the shuffle mask, the element of the input
6887 /// referenced is undef, or the element of the input referenced is known to be
6888 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
6889 /// as many lanes with this technique as possible to simplify the remaining
6891 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
6892 SDValue V1, SDValue V2) {
6893 SmallBitVector Zeroable(Mask.size(), false);
6895 while (V1.getOpcode() == ISD::BITCAST)
6896 V1 = V1->getOperand(0);
6897 while (V2.getOpcode() == ISD::BITCAST)
6898 V2 = V2->getOperand(0);
6900 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
6901 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
6903 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6905 // Handle the easy cases.
6906 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
6911 // If this is an index into a build_vector node (which has the same number
6912 // of elements), dig out the input value and use it.
6913 SDValue V = M < Size ? V1 : V2;
6914 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
6917 SDValue Input = V.getOperand(M % Size);
6918 // The UNDEF opcode check really should be dead code here, but not quite
6919 // worth asserting on (it isn't invalid, just unexpected).
6920 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
6927 // X86 has dedicated unpack instructions that can handle specific blend
6928 // operations: UNPCKH and UNPCKL.
6929 static SDValue lowerVectorShuffleWithUNPCK(SDLoc DL, MVT VT, ArrayRef<int> Mask,
6930 SDValue V1, SDValue V2,
6931 SelectionDAG &DAG) {
6932 int NumElts = VT.getVectorNumElements();
6933 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
6934 SmallVector<int, 8> Unpckl;
6935 SmallVector<int, 8> Unpckh;
6937 for (int i = 0; i < NumElts; ++i) {
6938 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
6939 int LoPos = (i % NumEltsInLane) / 2 + LaneStart + NumElts * (i % 2);
6940 int HiPos = LoPos + NumEltsInLane / 2;
6941 Unpckl.push_back(LoPos);
6942 Unpckh.push_back(HiPos);
6945 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
6946 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
6947 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
6948 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
6950 // Commute and try again.
6951 ShuffleVectorSDNode::commuteMask(Unpckl);
6952 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
6953 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
6955 ShuffleVectorSDNode::commuteMask(Unpckh);
6956 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
6957 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
6962 /// \brief Try to emit a bitmask instruction for a shuffle.
6964 /// This handles cases where we can model a blend exactly as a bitmask due to
6965 /// one of the inputs being zeroable.
6966 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
6967 SDValue V2, ArrayRef<int> Mask,
6968 SelectionDAG &DAG) {
6969 MVT EltVT = VT.getVectorElementType();
6970 int NumEltBits = EltVT.getSizeInBits();
6971 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
6972 SDValue Zero = DAG.getConstant(0, DL, IntEltVT);
6973 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL,
6975 if (EltVT.isFloatingPoint()) {
6976 Zero = DAG.getBitcast(EltVT, Zero);
6977 AllOnes = DAG.getBitcast(EltVT, AllOnes);
6979 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
6980 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
6982 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6985 if (Mask[i] % Size != i)
6986 return SDValue(); // Not a blend.
6988 V = Mask[i] < Size ? V1 : V2;
6989 else if (V != (Mask[i] < Size ? V1 : V2))
6990 return SDValue(); // Can only let one input through the mask.
6992 VMaskOps[i] = AllOnes;
6995 return SDValue(); // No non-zeroable elements!
6997 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
6998 V = DAG.getNode(VT.isFloatingPoint()
6999 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7004 /// \brief Try to emit a blend instruction for a shuffle using bit math.
7006 /// This is used as a fallback approach when first class blend instructions are
7007 /// unavailable. Currently it is only suitable for integer vectors, but could
7008 /// be generalized for floating point vectors if desirable.
7009 static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
7010 SDValue V2, ArrayRef<int> Mask,
7011 SelectionDAG &DAG) {
7012 assert(VT.isInteger() && "Only supports integer vector types!");
7013 MVT EltVT = VT.getVectorElementType();
7014 int NumEltBits = EltVT.getSizeInBits();
7015 SDValue Zero = DAG.getConstant(0, DL, EltVT);
7016 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL,
7018 SmallVector<SDValue, 16> MaskOps;
7019 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7020 if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
7021 return SDValue(); // Shuffled input!
7022 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
7025 SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
7026 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
7027 // We have to cast V2 around.
7028 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
7029 V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
7030 DAG.getBitcast(MaskVT, V1Mask),
7031 DAG.getBitcast(MaskVT, V2)));
7032 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
7035 /// \brief Try to emit a blend instruction for a shuffle.
7037 /// This doesn't do any checks for the availability of instructions for blending
7038 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7039 /// be matched in the backend with the type given. What it does check for is
7040 /// that the shuffle mask is a blend, or convertible into a blend with zero.
7041 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7042 SDValue V2, ArrayRef<int> Original,
7043 const X86Subtarget *Subtarget,
7044 SelectionDAG &DAG) {
7045 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7046 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7047 SmallVector<int, 8> Mask(Original.begin(), Original.end());
7048 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7049 bool ForceV1Zero = false, ForceV2Zero = false;
7051 // Attempt to generate the binary blend mask. If an input is zero then
7052 // we can use any lane.
7053 // TODO: generalize the zero matching to any scalar like isShuffleEquivalent.
7054 unsigned BlendMask = 0;
7055 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7061 if (M == i + Size) {
7062 BlendMask |= 1u << i;
7073 BlendMask |= 1u << i;
7078 return SDValue(); // Shuffled input!
7081 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
7083 V1 = getZeroVector(VT, Subtarget, DAG, DL);
7085 V2 = getZeroVector(VT, Subtarget, DAG, DL);
7087 auto ScaleBlendMask = [](unsigned BlendMask, int Size, int Scale) {
7088 unsigned ScaledMask = 0;
7089 for (int i = 0; i != Size; ++i)
7090 if (BlendMask & (1u << i))
7091 for (int j = 0; j != Scale; ++j)
7092 ScaledMask |= 1u << (i * Scale + j);
7096 switch (VT.SimpleTy) {
7101 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7102 DAG.getConstant(BlendMask, DL, MVT::i8));
7106 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7110 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7111 // that instruction.
7112 if (Subtarget->hasAVX2()) {
7113 // Scale the blend by the number of 32-bit dwords per element.
7114 int Scale = VT.getScalarSizeInBits() / 32;
7115 BlendMask = ScaleBlendMask(BlendMask, Mask.size(), Scale);
7116 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7117 V1 = DAG.getBitcast(BlendVT, V1);
7118 V2 = DAG.getBitcast(BlendVT, V2);
7119 return DAG.getBitcast(
7120 VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7121 DAG.getConstant(BlendMask, DL, MVT::i8)));
7125 // For integer shuffles we need to expand the mask and cast the inputs to
7126 // v8i16s prior to blending.
7127 int Scale = 8 / VT.getVectorNumElements();
7128 BlendMask = ScaleBlendMask(BlendMask, Mask.size(), Scale);
7129 V1 = DAG.getBitcast(MVT::v8i16, V1);
7130 V2 = DAG.getBitcast(MVT::v8i16, V2);
7131 return DAG.getBitcast(VT,
7132 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7133 DAG.getConstant(BlendMask, DL, MVT::i8)));
7137 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7138 SmallVector<int, 8> RepeatedMask;
7139 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7140 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7141 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7143 for (int i = 0; i < 8; ++i)
7144 if (RepeatedMask[i] >= 16)
7145 BlendMask |= 1u << i;
7146 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7147 DAG.getConstant(BlendMask, DL, MVT::i8));
7153 assert((VT.is128BitVector() || Subtarget->hasAVX2()) &&
7154 "256-bit byte-blends require AVX2 support!");
7156 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
7157 if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, DAG))
7160 // Scale the blend by the number of bytes per element.
7161 int Scale = VT.getScalarSizeInBits() / 8;
7163 // This form of blend is always done on bytes. Compute the byte vector
7165 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7167 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7168 // mix of LLVM's code generator and the x86 backend. We tell the code
7169 // generator that boolean values in the elements of an x86 vector register
7170 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7171 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7172 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7173 // of the element (the remaining are ignored) and 0 in that high bit would
7174 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7175 // the LLVM model for boolean values in vector elements gets the relevant
7176 // bit set, it is set backwards and over constrained relative to x86's
7178 SmallVector<SDValue, 32> VSELECTMask;
7179 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7180 for (int j = 0; j < Scale; ++j)
7181 VSELECTMask.push_back(
7182 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7183 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
7186 V1 = DAG.getBitcast(BlendVT, V1);
7187 V2 = DAG.getBitcast(BlendVT, V2);
7188 return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
7189 DAG.getNode(ISD::BUILD_VECTOR, DL,
7190 BlendVT, VSELECTMask),
7195 llvm_unreachable("Not a supported integer vector type!");
7199 /// \brief Try to lower as a blend of elements from two inputs followed by
7200 /// a single-input permutation.
7202 /// This matches the pattern where we can blend elements from two inputs and
7203 /// then reduce the shuffle to a single-input permutation.
7204 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7207 SelectionDAG &DAG) {
7208 // We build up the blend mask while checking whether a blend is a viable way
7209 // to reduce the shuffle.
7210 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7211 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7213 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7217 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7219 if (BlendMask[Mask[i] % Size] == -1)
7220 BlendMask[Mask[i] % Size] = Mask[i];
7221 else if (BlendMask[Mask[i] % Size] != Mask[i])
7222 return SDValue(); // Can't blend in the needed input!
7224 PermuteMask[i] = Mask[i] % Size;
7227 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7228 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7231 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7232 /// blends and permutes.
7234 /// This matches the extremely common pattern for handling combined
7235 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7236 /// operations. It will try to pick the best arrangement of shuffles and
7238 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7242 SelectionDAG &DAG) {
7243 // Shuffle the input elements into the desired positions in V1 and V2 and
7244 // blend them together.
7245 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7246 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7247 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7248 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7249 if (Mask[i] >= 0 && Mask[i] < Size) {
7250 V1Mask[i] = Mask[i];
7252 } else if (Mask[i] >= Size) {
7253 V2Mask[i] = Mask[i] - Size;
7254 BlendMask[i] = i + Size;
7257 // Try to lower with the simpler initial blend strategy unless one of the
7258 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7259 // shuffle may be able to fold with a load or other benefit. However, when
7260 // we'll have to do 2x as many shuffles in order to achieve this, blending
7261 // first is a better strategy.
7262 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7263 if (SDValue BlendPerm =
7264 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7267 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7268 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7269 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7272 /// \brief Try to lower a vector shuffle as a byte rotation.
7274 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7275 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7276 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7277 /// try to generically lower a vector shuffle through such an pattern. It
7278 /// does not check for the profitability of lowering either as PALIGNR or
7279 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7280 /// This matches shuffle vectors that look like:
7282 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7284 /// Essentially it concatenates V1 and V2, shifts right by some number of
7285 /// elements, and takes the low elements as the result. Note that while this is
7286 /// specified as a *right shift* because x86 is little-endian, it is a *left
7287 /// rotate* of the vector lanes.
7288 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7291 const X86Subtarget *Subtarget,
7292 SelectionDAG &DAG) {
7293 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7295 int NumElts = Mask.size();
7296 int NumLanes = VT.getSizeInBits() / 128;
7297 int NumLaneElts = NumElts / NumLanes;
7299 // We need to detect various ways of spelling a rotation:
7300 // [11, 12, 13, 14, 15, 0, 1, 2]
7301 // [-1, 12, 13, 14, -1, -1, 1, -1]
7302 // [-1, -1, -1, -1, -1, -1, 1, 2]
7303 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7304 // [-1, 4, 5, 6, -1, -1, 9, -1]
7305 // [-1, 4, 5, 6, -1, -1, -1, -1]
7308 for (int l = 0; l < NumElts; l += NumLaneElts) {
7309 for (int i = 0; i < NumLaneElts; ++i) {
7310 if (Mask[l + i] == -1)
7312 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7314 // Get the mod-Size index and lane correct it.
7315 int LaneIdx = (Mask[l + i] % NumElts) - l;
7316 // Make sure it was in this lane.
7317 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7320 // Determine where a rotated vector would have started.
7321 int StartIdx = i - LaneIdx;
7323 // The identity rotation isn't interesting, stop.
7326 // If we found the tail of a vector the rotation must be the missing
7327 // front. If we found the head of a vector, it must be how much of the
7329 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7332 Rotation = CandidateRotation;
7333 else if (Rotation != CandidateRotation)
7334 // The rotations don't match, so we can't match this mask.
7337 // Compute which value this mask is pointing at.
7338 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7340 // Compute which of the two target values this index should be assigned
7341 // to. This reflects whether the high elements are remaining or the low
7342 // elements are remaining.
7343 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7345 // Either set up this value if we've not encountered it before, or check
7346 // that it remains consistent.
7349 else if (TargetV != MaskV)
7350 // This may be a rotation, but it pulls from the inputs in some
7351 // unsupported interleaving.
7356 // Check that we successfully analyzed the mask, and normalize the results.
7357 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7358 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7364 // The actual rotate instruction rotates bytes, so we need to scale the
7365 // rotation based on how many bytes are in the vector lane.
7366 int Scale = 16 / NumLaneElts;
7368 // SSSE3 targets can use the palignr instruction.
7369 if (Subtarget->hasSSSE3()) {
7370 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7371 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7372 Lo = DAG.getBitcast(AlignVT, Lo);
7373 Hi = DAG.getBitcast(AlignVT, Hi);
7375 return DAG.getBitcast(
7376 VT, DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Lo, Hi,
7377 DAG.getConstant(Rotation * Scale, DL, MVT::i8)));
7380 assert(VT.is128BitVector() &&
7381 "Rotate-based lowering only supports 128-bit lowering!");
7382 assert(Mask.size() <= 16 &&
7383 "Can shuffle at most 16 bytes in a 128-bit vector!");
7385 // Default SSE2 implementation
7386 int LoByteShift = 16 - Rotation * Scale;
7387 int HiByteShift = Rotation * Scale;
7389 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7390 Lo = DAG.getBitcast(MVT::v2i64, Lo);
7391 Hi = DAG.getBitcast(MVT::v2i64, Hi);
7393 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7394 DAG.getConstant(LoByteShift, DL, MVT::i8));
7395 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7396 DAG.getConstant(HiByteShift, DL, MVT::i8));
7397 return DAG.getBitcast(VT,
7398 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7401 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7403 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
7404 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
7405 /// matches elements from one of the input vectors shuffled to the left or
7406 /// right with zeroable elements 'shifted in'. It handles both the strictly
7407 /// bit-wise element shifts and the byte shift across an entire 128-bit double
7410 /// PSHL : (little-endian) left bit shift.
7411 /// [ zz, 0, zz, 2 ]
7412 /// [ -1, 4, zz, -1 ]
7413 /// PSRL : (little-endian) right bit shift.
7415 /// [ -1, -1, 7, zz]
7416 /// PSLLDQ : (little-endian) left byte shift
7417 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
7418 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
7419 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
7420 /// PSRLDQ : (little-endian) right byte shift
7421 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
7422 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
7423 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
7424 static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
7425 SDValue V2, ArrayRef<int> Mask,
7426 SelectionDAG &DAG) {
7427 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7429 int Size = Mask.size();
7430 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7432 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
7433 for (int i = 0; i < Size; i += Scale)
7434 for (int j = 0; j < Shift; ++j)
7435 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
7441 auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
7442 for (int i = 0; i != Size; i += Scale) {
7443 unsigned Pos = Left ? i + Shift : i;
7444 unsigned Low = Left ? i : i + Shift;
7445 unsigned Len = Scale - Shift;
7446 if (!isSequentialOrUndefInRange(Mask, Pos, Len,
7447 Low + (V == V1 ? 0 : Size)))
7451 int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
7452 bool ByteShift = ShiftEltBits > 64;
7453 unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
7454 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
7455 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
7457 // Normalize the scale for byte shifts to still produce an i64 element
7459 Scale = ByteShift ? Scale / 2 : Scale;
7461 // We need to round trip through the appropriate type for the shift.
7462 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7463 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7464 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7465 "Illegal integer vector type");
7466 V = DAG.getBitcast(ShiftVT, V);
7468 V = DAG.getNode(OpCode, DL, ShiftVT, V,
7469 DAG.getConstant(ShiftAmt, DL, MVT::i8));
7470 return DAG.getBitcast(VT, V);
7473 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7474 // keep doubling the size of the integer elements up to that. We can
7475 // then shift the elements of the integer vector by whole multiples of
7476 // their width within the elements of the larger integer vector. Test each
7477 // multiple to see if we can find a match with the moved element indices
7478 // and that the shifted in elements are all zeroable.
7479 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
7480 for (int Shift = 1; Shift != Scale; ++Shift)
7481 for (bool Left : {true, false})
7482 if (CheckZeros(Shift, Scale, Left))
7483 for (SDValue V : {V1, V2})
7484 if (SDValue Match = MatchShift(Shift, Scale, Left, V))
7491 /// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
7492 static SDValue lowerVectorShuffleWithSSE4A(SDLoc DL, MVT VT, SDValue V1,
7493 SDValue V2, ArrayRef<int> Mask,
7494 SelectionDAG &DAG) {
7495 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7496 assert(!Zeroable.all() && "Fully zeroable shuffle mask");
7498 int Size = Mask.size();
7499 int HalfSize = Size / 2;
7500 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7502 // Upper half must be undefined.
7503 if (!isUndefInRange(Mask, HalfSize, HalfSize))
7506 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
7507 // Remainder of lower half result is zero and upper half is all undef.
7508 auto LowerAsEXTRQ = [&]() {
7509 // Determine the extraction length from the part of the
7510 // lower half that isn't zeroable.
7512 for (; Len > 0; --Len)
7513 if (!Zeroable[Len - 1])
7515 assert(Len > 0 && "Zeroable shuffle mask");
7517 // Attempt to match first Len sequential elements from the lower half.
7520 for (int i = 0; i != Len; ++i) {
7524 SDValue &V = (M < Size ? V1 : V2);
7527 // The extracted elements must start at a valid index and all mask
7528 // elements must be in the lower half.
7529 if (i > M || M >= HalfSize)
7532 if (Idx < 0 || (Src == V && Idx == (M - i))) {
7543 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
7544 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
7545 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
7546 return DAG.getNode(X86ISD::EXTRQI, DL, VT, Src,
7547 DAG.getConstant(BitLen, DL, MVT::i8),
7548 DAG.getConstant(BitIdx, DL, MVT::i8));
7551 if (SDValue ExtrQ = LowerAsEXTRQ())
7554 // INSERTQ: Extract lowest Len elements from lower half of second source and
7555 // insert over first source, starting at Idx.
7556 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
7557 auto LowerAsInsertQ = [&]() {
7558 for (int Idx = 0; Idx != HalfSize; ++Idx) {
7561 // Attempt to match first source from mask before insertion point.
7562 if (isUndefInRange(Mask, 0, Idx)) {
7564 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
7566 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
7572 // Extend the extraction length looking to match both the insertion of
7573 // the second source and the remaining elements of the first.
7574 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
7579 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
7581 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
7587 // Match the remaining elements of the lower half.
7588 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
7590 } else if ((!Base || (Base == V1)) &&
7591 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
7593 } else if ((!Base || (Base == V2)) &&
7594 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
7601 // We may not have a base (first source) - this can safely be undefined.
7603 Base = DAG.getUNDEF(VT);
7605 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
7606 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
7607 return DAG.getNode(X86ISD::INSERTQI, DL, VT, Base, Insert,
7608 DAG.getConstant(BitLen, DL, MVT::i8),
7609 DAG.getConstant(BitIdx, DL, MVT::i8));
7616 if (SDValue InsertQ = LowerAsInsertQ())
7622 /// \brief Lower a vector shuffle as a zero or any extension.
7624 /// Given a specific number of elements, element bit width, and extension
7625 /// stride, produce either a zero or any extension based on the available
7626 /// features of the subtarget. The extended elements are consecutive and
7627 /// begin and can start from an offseted element index in the input; to
7628 /// avoid excess shuffling the offset must either being in the bottom lane
7629 /// or at the start of a higher lane. All extended elements must be from
7631 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7632 SDLoc DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
7633 ArrayRef<int> Mask, const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7634 assert(Scale > 1 && "Need a scale to extend.");
7635 int EltBits = VT.getScalarSizeInBits();
7636 int NumElements = VT.getVectorNumElements();
7637 int NumEltsPerLane = 128 / EltBits;
7638 int OffsetLane = Offset / NumEltsPerLane;
7639 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7640 "Only 8, 16, and 32 bit elements can be extended.");
7641 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7642 assert(0 <= Offset && "Extension offset must be positive.");
7643 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
7644 "Extension offset must be in the first lane or start an upper lane.");
7646 // Check that an index is in same lane as the base offset.
7647 auto SafeOffset = [&](int Idx) {
7648 return OffsetLane == (Idx / NumEltsPerLane);
7651 // Shift along an input so that the offset base moves to the first element.
7652 auto ShuffleOffset = [&](SDValue V) {
7656 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
7657 for (int i = 0; i * Scale < NumElements; ++i) {
7658 int SrcIdx = i + Offset;
7659 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
7661 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
7664 // Found a valid zext mask! Try various lowering strategies based on the
7665 // input type and available ISA extensions.
7666 if (Subtarget->hasSSE41()) {
7667 // Not worth offseting 128-bit vectors if scale == 2, a pattern using
7668 // PUNPCK will catch this in a later shuffle match.
7669 if (Offset && Scale == 2 && VT.is128BitVector())
7671 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7672 NumElements / Scale);
7673 InputV = DAG.getNode(X86ISD::VZEXT, DL, ExtVT, ShuffleOffset(InputV));
7674 return DAG.getBitcast(VT, InputV);
7677 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
7679 // For any extends we can cheat for larger element sizes and use shuffle
7680 // instructions that can fold with a load and/or copy.
7681 if (AnyExt && EltBits == 32) {
7682 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
7684 return DAG.getBitcast(
7685 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7686 DAG.getBitcast(MVT::v4i32, InputV),
7687 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
7689 if (AnyExt && EltBits == 16 && Scale > 2) {
7690 int PSHUFDMask[4] = {Offset / 2, -1,
7691 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
7692 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7693 DAG.getBitcast(MVT::v4i32, InputV),
7694 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
7695 int PSHUFWMask[4] = {1, -1, -1, -1};
7696 unsigned OddEvenOp = (Offset & 1 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW);
7697 return DAG.getBitcast(
7698 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
7699 DAG.getBitcast(MVT::v8i16, InputV),
7700 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
7703 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
7705 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget->hasSSE4A()) {
7706 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
7707 assert(VT.is128BitVector() && "Unexpected vector width!");
7709 int LoIdx = Offset * EltBits;
7710 SDValue Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
7711 DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
7712 DAG.getConstant(EltBits, DL, MVT::i8),
7713 DAG.getConstant(LoIdx, DL, MVT::i8)));
7715 if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) ||
7716 !SafeOffset(Offset + 1))
7717 return DAG.getNode(ISD::BITCAST, DL, VT, Lo);
7719 int HiIdx = (Offset + 1) * EltBits;
7720 SDValue Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
7721 DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
7722 DAG.getConstant(EltBits, DL, MVT::i8),
7723 DAG.getConstant(HiIdx, DL, MVT::i8)));
7724 return DAG.getNode(ISD::BITCAST, DL, VT,
7725 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
7728 // If this would require more than 2 unpack instructions to expand, use
7729 // pshufb when available. We can only use more than 2 unpack instructions
7730 // when zero extending i8 elements which also makes it easier to use pshufb.
7731 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
7732 assert(NumElements == 16 && "Unexpected byte vector width!");
7733 SDValue PSHUFBMask[16];
7734 for (int i = 0; i < 16; ++i) {
7735 int Idx = Offset + (i / Scale);
7736 PSHUFBMask[i] = DAG.getConstant(
7737 (i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
7739 InputV = DAG.getBitcast(MVT::v16i8, InputV);
7740 return DAG.getBitcast(VT,
7741 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
7742 DAG.getNode(ISD::BUILD_VECTOR, DL,
7743 MVT::v16i8, PSHUFBMask)));
7746 // If we are extending from an offset, ensure we start on a boundary that
7747 // we can unpack from.
7748 int AlignToUnpack = Offset % (NumElements / Scale);
7749 if (AlignToUnpack) {
7750 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
7751 for (int i = AlignToUnpack; i < NumElements; ++i)
7752 ShMask[i - AlignToUnpack] = i;
7753 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
7754 Offset -= AlignToUnpack;
7757 // Otherwise emit a sequence of unpacks.
7759 unsigned UnpackLoHi = X86ISD::UNPCKL;
7760 if (Offset >= (NumElements / 2)) {
7761 UnpackLoHi = X86ISD::UNPCKH;
7762 Offset -= (NumElements / 2);
7765 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7766 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
7767 : getZeroVector(InputVT, Subtarget, DAG, DL);
7768 InputV = DAG.getBitcast(InputVT, InputV);
7769 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
7773 } while (Scale > 1);
7774 return DAG.getBitcast(VT, InputV);
7777 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
7779 /// This routine will try to do everything in its power to cleverly lower
7780 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
7781 /// check for the profitability of this lowering, it tries to aggressively
7782 /// match this pattern. It will use all of the micro-architectural details it
7783 /// can to emit an efficient lowering. It handles both blends with all-zero
7784 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
7785 /// masking out later).
7787 /// The reason we have dedicated lowering for zext-style shuffles is that they
7788 /// are both incredibly common and often quite performance sensitive.
7789 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
7790 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7791 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7792 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7794 int Bits = VT.getSizeInBits();
7795 int NumLanes = Bits / 128;
7796 int NumElements = VT.getVectorNumElements();
7797 int NumEltsPerLane = NumElements / NumLanes;
7798 assert(VT.getScalarSizeInBits() <= 32 &&
7799 "Exceeds 32-bit integer zero extension limit");
7800 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
7802 // Define a helper function to check a particular ext-scale and lower to it if
7804 auto Lower = [&](int Scale) -> SDValue {
7809 for (int i = 0; i < NumElements; ++i) {
7812 continue; // Valid anywhere but doesn't tell us anything.
7813 if (i % Scale != 0) {
7814 // Each of the extended elements need to be zeroable.
7818 // We no longer are in the anyext case.
7823 // Each of the base elements needs to be consecutive indices into the
7824 // same input vector.
7825 SDValue V = M < NumElements ? V1 : V2;
7826 M = M % NumElements;
7829 Offset = M - (i / Scale);
7830 } else if (InputV != V)
7831 return SDValue(); // Flip-flopping inputs.
7833 // Offset must start in the lowest 128-bit lane or at the start of an
7835 // FIXME: Is it ever worth allowing a negative base offset?
7836 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
7837 (Offset % NumEltsPerLane) == 0))
7840 // If we are offsetting, all referenced entries must come from the same
7842 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
7845 if ((M % NumElements) != (Offset + (i / Scale)))
7846 return SDValue(); // Non-consecutive strided elements.
7850 // If we fail to find an input, we have a zero-shuffle which should always
7851 // have already been handled.
7852 // FIXME: Maybe handle this here in case during blending we end up with one?
7856 // If we are offsetting, don't extend if we only match a single input, we
7857 // can always do better by using a basic PSHUF or PUNPCK.
7858 if (Offset != 0 && Matches < 2)
7861 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7862 DL, VT, Scale, Offset, AnyExt, InputV, Mask, Subtarget, DAG);
7865 // The widest scale possible for extending is to a 64-bit integer.
7866 assert(Bits % 64 == 0 &&
7867 "The number of bits in a vector must be divisible by 64 on x86!");
7868 int NumExtElements = Bits / 64;
7870 // Each iteration, try extending the elements half as much, but into twice as
7872 for (; NumExtElements < NumElements; NumExtElements *= 2) {
7873 assert(NumElements % NumExtElements == 0 &&
7874 "The input vector size must be divisible by the extended size.");
7875 if (SDValue V = Lower(NumElements / NumExtElements))
7879 // General extends failed, but 128-bit vectors may be able to use MOVQ.
7883 // Returns one of the source operands if the shuffle can be reduced to a
7884 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
7885 auto CanZExtLowHalf = [&]() {
7886 for (int i = NumElements / 2; i != NumElements; ++i)
7889 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
7891 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
7896 if (SDValue V = CanZExtLowHalf()) {
7897 V = DAG.getBitcast(MVT::v2i64, V);
7898 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
7899 return DAG.getBitcast(VT, V);
7902 // No viable ext lowering found.
7906 /// \brief Try to get a scalar value for a specific element of a vector.
7908 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
7909 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
7910 SelectionDAG &DAG) {
7911 MVT VT = V.getSimpleValueType();
7912 MVT EltVT = VT.getVectorElementType();
7913 while (V.getOpcode() == ISD::BITCAST)
7914 V = V.getOperand(0);
7915 // If the bitcasts shift the element size, we can't extract an equivalent
7917 MVT NewVT = V.getSimpleValueType();
7918 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
7921 if (V.getOpcode() == ISD::BUILD_VECTOR ||
7922 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
7923 // Ensure the scalar operand is the same size as the destination.
7924 // FIXME: Add support for scalar truncation where possible.
7925 SDValue S = V.getOperand(Idx);
7926 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
7927 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, S);
7933 /// \brief Helper to test for a load that can be folded with x86 shuffles.
7935 /// This is particularly important because the set of instructions varies
7936 /// significantly based on whether the operand is a load or not.
7937 static bool isShuffleFoldableLoad(SDValue V) {
7938 while (V.getOpcode() == ISD::BITCAST)
7939 V = V.getOperand(0);
7941 return ISD::isNON_EXTLoad(V.getNode());
7944 /// \brief Try to lower insertion of a single element into a zero vector.
7946 /// This is a common pattern that we have especially efficient patterns to lower
7947 /// across all subtarget feature sets.
7948 static SDValue lowerVectorShuffleAsElementInsertion(
7949 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7950 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7951 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7953 MVT EltVT = VT.getVectorElementType();
7955 int V2Index = std::find_if(Mask.begin(), Mask.end(),
7956 [&Mask](int M) { return M >= (int)Mask.size(); }) -
7958 bool IsV1Zeroable = true;
7959 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7960 if (i != V2Index && !Zeroable[i]) {
7961 IsV1Zeroable = false;
7965 // Check for a single input from a SCALAR_TO_VECTOR node.
7966 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
7967 // all the smarts here sunk into that routine. However, the current
7968 // lowering of BUILD_VECTOR makes that nearly impossible until the old
7969 // vector shuffle lowering is dead.
7970 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
7972 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
7973 // We need to zext the scalar if it is smaller than an i32.
7974 V2S = DAG.getBitcast(EltVT, V2S);
7975 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
7976 // Using zext to expand a narrow element won't work for non-zero
7981 // Zero-extend directly to i32.
7983 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
7985 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
7986 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
7987 EltVT == MVT::i16) {
7988 // Either not inserting from the low element of the input or the input
7989 // element size is too small to use VZEXT_MOVL to clear the high bits.
7993 if (!IsV1Zeroable) {
7994 // If V1 can't be treated as a zero vector we have fewer options to lower
7995 // this. We can't support integer vectors or non-zero targets cheaply, and
7996 // the V1 elements can't be permuted in any way.
7997 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
7998 if (!VT.isFloatingPoint() || V2Index != 0)
8000 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8001 V1Mask[V2Index] = -1;
8002 if (!isNoopShuffleMask(V1Mask))
8004 // This is essentially a special case blend operation, but if we have
8005 // general purpose blend operations, they are always faster. Bail and let
8006 // the rest of the lowering handle these as blends.
8007 if (Subtarget->hasSSE41())
8010 // Otherwise, use MOVSD or MOVSS.
8011 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8012 "Only two types of floating point element types to handle!");
8013 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8017 // This lowering only works for the low element with floating point vectors.
8018 if (VT.isFloatingPoint() && V2Index != 0)
8021 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8023 V2 = DAG.getBitcast(VT, V2);
8026 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8027 // the desired position. Otherwise it is more efficient to do a vector
8028 // shift left. We know that we can do a vector shift left because all
8029 // the inputs are zero.
8030 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8031 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8032 V2Shuffle[V2Index] = 0;
8033 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8035 V2 = DAG.getBitcast(MVT::v2i64, V2);
8037 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8038 DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL,
8039 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(
8040 DAG.getDataLayout(), VT)));
8041 V2 = DAG.getBitcast(VT, V2);
8047 /// \brief Try to lower broadcast of a single - truncated - integer element,
8048 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
8050 /// This assumes we have AVX2.
8051 static SDValue lowerVectorShuffleAsTruncBroadcast(SDLoc DL, MVT VT, SDValue V0,
8053 const X86Subtarget *Subtarget,
8054 SelectionDAG &DAG) {
8055 assert(Subtarget->hasAVX2() &&
8056 "We can only lower integer broadcasts with AVX2!");
8058 EVT EltVT = VT.getVectorElementType();
8059 EVT V0VT = V0.getValueType();
8061 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
8062 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
8064 EVT V0EltVT = V0VT.getVectorElementType();
8065 if (!V0EltVT.isInteger())
8068 const unsigned EltSize = EltVT.getSizeInBits();
8069 const unsigned V0EltSize = V0EltVT.getSizeInBits();
8071 // This is only a truncation if the original element type is larger.
8072 if (V0EltSize <= EltSize)
8075 assert(((V0EltSize % EltSize) == 0) &&
8076 "Scalar type sizes must all be powers of 2 on x86!");
8078 const unsigned V0Opc = V0.getOpcode();
8079 const unsigned Scale = V0EltSize / EltSize;
8080 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
8082 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
8083 V0Opc != ISD::BUILD_VECTOR)
8086 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
8088 // If we're extracting non-least-significant bits, shift so we can truncate.
8089 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
8090 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
8091 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
8092 if (const int OffsetIdx = BroadcastIdx % Scale)
8093 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
8094 DAG.getConstant(OffsetIdx * EltSize, DL, Scalar.getValueType()));
8096 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
8097 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
8100 /// \brief Try to lower broadcast of a single element.
8102 /// For convenience, this code also bundles all of the subtarget feature set
8103 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8104 /// a convenient way to factor it out.
8105 /// FIXME: This is very similar to LowerVectorBroadcast - can we merge them?
8106 static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
8108 const X86Subtarget *Subtarget,
8109 SelectionDAG &DAG) {
8110 if (!Subtarget->hasAVX())
8112 if (VT.isInteger() && !Subtarget->hasAVX2())
8115 // Check that the mask is a broadcast.
8116 int BroadcastIdx = -1;
8118 if (M >= 0 && BroadcastIdx == -1)
8120 else if (M >= 0 && M != BroadcastIdx)
8123 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8124 "a sorted mask where the broadcast "
8127 // Go up the chain of (vector) values to find a scalar load that we can
8128 // combine with the broadcast.
8130 switch (V.getOpcode()) {
8131 case ISD::CONCAT_VECTORS: {
8132 int OperandSize = Mask.size() / V.getNumOperands();
8133 V = V.getOperand(BroadcastIdx / OperandSize);
8134 BroadcastIdx %= OperandSize;
8138 case ISD::INSERT_SUBVECTOR: {
8139 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8140 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8144 int BeginIdx = (int)ConstantIdx->getZExtValue();
8146 BeginIdx + (int)VInner.getSimpleValueType().getVectorNumElements();
8147 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8148 BroadcastIdx -= BeginIdx;
8159 // Check if this is a broadcast of a scalar. We special case lowering
8160 // for scalars so that we can more effectively fold with loads.
8161 // First, look through bitcast: if the original value has a larger element
8162 // type than the shuffle, the broadcast element is in essence truncated.
8163 // Make that explicit to ease folding.
8164 if (V.getOpcode() == ISD::BITCAST && VT.isInteger())
8165 if (SDValue TruncBroadcast = lowerVectorShuffleAsTruncBroadcast(
8166 DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG))
8167 return TruncBroadcast;
8169 // Also check the simpler case, where we can directly reuse the scalar.
8170 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8171 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8172 V = V.getOperand(BroadcastIdx);
8174 // If the scalar isn't a load, we can't broadcast from it in AVX1.
8175 // Only AVX2 has register broadcasts.
8176 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8178 } else if (MayFoldLoad(V) && !cast<LoadSDNode>(V)->isVolatile()) {
8179 // If we are broadcasting a load that is only used by the shuffle
8180 // then we can reduce the vector load to the broadcasted scalar load.
8181 LoadSDNode *Ld = cast<LoadSDNode>(V);
8182 SDValue BaseAddr = Ld->getOperand(1);
8183 EVT AddrVT = BaseAddr.getValueType();
8184 EVT SVT = VT.getScalarType();
8185 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
8186 SDValue NewAddr = DAG.getNode(
8187 ISD::ADD, DL, AddrVT, BaseAddr,
8188 DAG.getConstant(Offset, DL, AddrVT));
8189 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
8190 DAG.getMachineFunction().getMachineMemOperand(
8191 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
8192 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8193 // We can't broadcast from a vector register without AVX2, and we can only
8194 // broadcast from the zero-element of a vector register.
8198 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8201 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8202 // INSERTPS when the V1 elements are already in the correct locations
8203 // because otherwise we can just always use two SHUFPS instructions which
8204 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8205 // perform INSERTPS if a single V1 element is out of place and all V2
8206 // elements are zeroable.
8207 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8209 SelectionDAG &DAG) {
8210 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8211 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8212 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8213 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8215 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8218 int V1DstIndex = -1;
8219 int V2DstIndex = -1;
8220 bool V1UsedInPlace = false;
8222 for (int i = 0; i < 4; ++i) {
8223 // Synthesize a zero mask from the zeroable elements (includes undefs).
8229 // Flag if we use any V1 inputs in place.
8231 V1UsedInPlace = true;
8235 // We can only insert a single non-zeroable element.
8236 if (V1DstIndex != -1 || V2DstIndex != -1)
8240 // V1 input out of place for insertion.
8243 // V2 input for insertion.
8248 // Don't bother if we have no (non-zeroable) element for insertion.
8249 if (V1DstIndex == -1 && V2DstIndex == -1)
8252 // Determine element insertion src/dst indices. The src index is from the
8253 // start of the inserted vector, not the start of the concatenated vector.
8254 unsigned V2SrcIndex = 0;
8255 if (V1DstIndex != -1) {
8256 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8257 // and don't use the original V2 at all.
8258 V2SrcIndex = Mask[V1DstIndex];
8259 V2DstIndex = V1DstIndex;
8262 V2SrcIndex = Mask[V2DstIndex] - 4;
8265 // If no V1 inputs are used in place, then the result is created only from
8266 // the zero mask and the V2 insertion - so remove V1 dependency.
8268 V1 = DAG.getUNDEF(MVT::v4f32);
8270 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8271 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8273 // Insert the V2 element into the desired position.
8275 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8276 DAG.getConstant(InsertPSMask, DL, MVT::i8));
8279 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8280 /// UNPCK instruction.
8282 /// This specifically targets cases where we end up with alternating between
8283 /// the two inputs, and so can permute them into something that feeds a single
8284 /// UNPCK instruction. Note that this routine only targets integer vectors
8285 /// because for floating point vectors we have a generalized SHUFPS lowering
8286 /// strategy that handles everything that doesn't *exactly* match an unpack,
8287 /// making this clever lowering unnecessary.
8288 static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
8289 SDValue V1, SDValue V2,
8291 SelectionDAG &DAG) {
8292 assert(!VT.isFloatingPoint() &&
8293 "This routine only supports integer vectors.");
8294 assert(!isSingleInputShuffleMask(Mask) &&
8295 "This routine should only be used when blending two inputs.");
8296 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8298 int Size = Mask.size();
8300 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
8301 return M >= 0 && M % Size < Size / 2;
8303 int NumHiInputs = std::count_if(
8304 Mask.begin(), Mask.end(), [Size](int M) { return M % Size >= Size / 2; });
8306 bool UnpackLo = NumLoInputs >= NumHiInputs;
8308 auto TryUnpack = [&](MVT UnpackVT, int Scale) {
8309 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8310 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8312 for (int i = 0; i < Size; ++i) {
8316 // Each element of the unpack contains Scale elements from this mask.
8317 int UnpackIdx = i / Scale;
8319 // We only handle the case where V1 feeds the first slots of the unpack.
8320 // We rely on canonicalization to ensure this is the case.
8321 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
8324 // Setup the mask for this input. The indexing is tricky as we have to
8325 // handle the unpack stride.
8326 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
8327 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
8331 // If we will have to shuffle both inputs to use the unpack, check whether
8332 // we can just unpack first and shuffle the result. If so, skip this unpack.
8333 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
8334 !isNoopShuffleMask(V2Mask))
8337 // Shuffle the inputs into place.
8338 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8339 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8341 // Cast the inputs to the type we will use to unpack them.
8342 V1 = DAG.getBitcast(UnpackVT, V1);
8343 V2 = DAG.getBitcast(UnpackVT, V2);
8345 // Unpack the inputs and cast the result back to the desired type.
8346 return DAG.getBitcast(
8347 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
8351 // We try each unpack from the largest to the smallest to try and find one
8352 // that fits this mask.
8353 int OrigNumElements = VT.getVectorNumElements();
8354 int OrigScalarSize = VT.getScalarSizeInBits();
8355 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
8356 int Scale = ScalarSize / OrigScalarSize;
8357 int NumElements = OrigNumElements / Scale;
8358 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
8359 if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
8363 // If none of the unpack-rooted lowerings worked (or were profitable) try an
8365 if (NumLoInputs == 0 || NumHiInputs == 0) {
8366 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
8367 "We have to have *some* inputs!");
8368 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
8370 // FIXME: We could consider the total complexity of the permute of each
8371 // possible unpacking. Or at the least we should consider how many
8372 // half-crossings are created.
8373 // FIXME: We could consider commuting the unpacks.
8375 SmallVector<int, 32> PermMask;
8376 PermMask.assign(Size, -1);
8377 for (int i = 0; i < Size; ++i) {
8381 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
8384 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
8386 return DAG.getVectorShuffle(
8387 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
8389 DAG.getUNDEF(VT), PermMask);
8395 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8397 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8398 /// support for floating point shuffles but not integer shuffles. These
8399 /// instructions will incur a domain crossing penalty on some chips though so
8400 /// it is better to avoid lowering through this for integer vectors where
8402 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8403 const X86Subtarget *Subtarget,
8404 SelectionDAG &DAG) {
8406 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8407 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8408 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8409 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8410 ArrayRef<int> Mask = SVOp->getMask();
8411 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8413 if (isSingleInputShuffleMask(Mask)) {
8414 // Use low duplicate instructions for masks that match their pattern.
8415 if (Subtarget->hasSSE3())
8416 if (isShuffleEquivalent(V1, V2, Mask, {0, 0}))
8417 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8419 // Straight shuffle of a single input vector. Simulate this by using the
8420 // single input as both of the "inputs" to this instruction..
8421 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8423 if (Subtarget->hasAVX()) {
8424 // If we have AVX, we can use VPERMILPS which will allow folding a load
8425 // into the shuffle.
8426 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8427 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
8430 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V1,
8431 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
8433 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8434 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8436 // If we have a single input, insert that into V1 if we can do so cheaply.
8437 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8438 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8439 DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
8441 // Try inverting the insertion since for v2 masks it is easy to do and we
8442 // can't reliably sort the mask one way or the other.
8443 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8444 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8445 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8446 DL, MVT::v2f64, V2, V1, InverseMask, Subtarget, DAG))
8450 // Try to use one of the special instruction patterns to handle two common
8451 // blend patterns if a zero-blend above didn't work.
8452 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
8453 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
8454 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8455 // We can either use a special instruction to load over the low double or
8456 // to move just the low double.
8458 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8460 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8462 if (Subtarget->hasSSE41())
8463 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8467 // Use dedicated unpack instructions for masks that match their pattern.
8469 lowerVectorShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
8472 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8473 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
8474 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
8477 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8479 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8480 /// the integer unit to minimize domain crossing penalties. However, for blends
8481 /// it falls back to the floating point shuffle operation with appropriate bit
8483 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8484 const X86Subtarget *Subtarget,
8485 SelectionDAG &DAG) {
8487 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8488 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8489 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8490 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8491 ArrayRef<int> Mask = SVOp->getMask();
8492 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8494 if (isSingleInputShuffleMask(Mask)) {
8495 // Check for being able to broadcast a single element.
8496 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v2i64, V1,
8497 Mask, Subtarget, DAG))
8500 // Straight shuffle of a single input vector. For everything from SSE2
8501 // onward this has a single fast instruction with no scary immediates.
8502 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8503 V1 = DAG.getBitcast(MVT::v4i32, V1);
8504 int WidenedMask[4] = {
8505 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8506 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8507 return DAG.getBitcast(
8509 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8510 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
8512 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
8513 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
8514 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
8515 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
8517 // If we have a blend of two PACKUS operations an the blend aligns with the
8518 // low and half halves, we can just merge the PACKUS operations. This is
8519 // particularly important as it lets us merge shuffles that this routine itself
8521 auto GetPackNode = [](SDValue V) {
8522 while (V.getOpcode() == ISD::BITCAST)
8523 V = V.getOperand(0);
8525 return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
8527 if (SDValue V1Pack = GetPackNode(V1))
8528 if (SDValue V2Pack = GetPackNode(V2))
8529 return DAG.getBitcast(MVT::v2i64,
8530 DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
8531 Mask[0] == 0 ? V1Pack.getOperand(0)
8532 : V1Pack.getOperand(1),
8533 Mask[1] == 2 ? V2Pack.getOperand(0)
8534 : V2Pack.getOperand(1)));
8536 // Try to use shift instructions.
8538 lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
8541 // When loading a scalar and then shuffling it into a vector we can often do
8542 // the insertion cheaply.
8543 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8544 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8546 // Try inverting the insertion since for v2 masks it is easy to do and we
8547 // can't reliably sort the mask one way or the other.
8548 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
8549 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8550 DL, MVT::v2i64, V2, V1, InverseMask, Subtarget, DAG))
8553 // We have different paths for blend lowering, but they all must use the
8554 // *exact* same predicate.
8555 bool IsBlendSupported = Subtarget->hasSSE41();
8556 if (IsBlendSupported)
8557 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8561 // Use dedicated unpack instructions for masks that match their pattern.
8563 lowerVectorShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
8566 // Try to use byte rotation instructions.
8567 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8568 if (Subtarget->hasSSSE3())
8569 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8570 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8573 // If we have direct support for blends, we should lower by decomposing into
8574 // a permute. That will be faster than the domain cross.
8575 if (IsBlendSupported)
8576 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8579 // We implement this with SHUFPD which is pretty lame because it will likely
8580 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8581 // However, all the alternatives are still more cycles and newer chips don't
8582 // have this problem. It would be really nice if x86 had better shuffles here.
8583 V1 = DAG.getBitcast(MVT::v2f64, V1);
8584 V2 = DAG.getBitcast(MVT::v2f64, V2);
8585 return DAG.getBitcast(MVT::v2i64,
8586 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8589 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8591 /// This is used to disable more specialized lowerings when the shufps lowering
8592 /// will happen to be efficient.
8593 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8594 // This routine only handles 128-bit shufps.
8595 assert(Mask.size() == 4 && "Unsupported mask size!");
8597 // To lower with a single SHUFPS we need to have the low half and high half
8598 // each requiring a single input.
8599 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8601 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8607 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8609 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8610 /// It makes no assumptions about whether this is the *best* lowering, it simply
8612 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8613 ArrayRef<int> Mask, SDValue V1,
8614 SDValue V2, SelectionDAG &DAG) {
8615 SDValue LowV = V1, HighV = V2;
8616 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8619 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8621 if (NumV2Elements == 1) {
8623 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8626 // Compute the index adjacent to V2Index and in the same half by toggling
8628 int V2AdjIndex = V2Index ^ 1;
8630 if (Mask[V2AdjIndex] == -1) {
8631 // Handles all the cases where we have a single V2 element and an undef.
8632 // This will only ever happen in the high lanes because we commute the
8633 // vector otherwise.
8635 std::swap(LowV, HighV);
8636 NewMask[V2Index] -= 4;
8638 // Handle the case where the V2 element ends up adjacent to a V1 element.
8639 // To make this work, blend them together as the first step.
8640 int V1Index = V2AdjIndex;
8641 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8642 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8643 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
8645 // Now proceed to reconstruct the final blend as we have the necessary
8646 // high or low half formed.
8653 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8654 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8656 } else if (NumV2Elements == 2) {
8657 if (Mask[0] < 4 && Mask[1] < 4) {
8658 // Handle the easy case where we have V1 in the low lanes and V2 in the
8662 } else if (Mask[2] < 4 && Mask[3] < 4) {
8663 // We also handle the reversed case because this utility may get called
8664 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8665 // arrange things in the right direction.
8671 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8672 // trying to place elements directly, just blend them and set up the final
8673 // shuffle to place them.
8675 // The first two blend mask elements are for V1, the second two are for
8677 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8678 Mask[2] < 4 ? Mask[2] : Mask[3],
8679 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8680 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8681 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8682 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
8684 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8687 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8688 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8689 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8690 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8693 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8694 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
8697 /// \brief Lower 4-lane 32-bit floating point shuffles.
8699 /// Uses instructions exclusively from the floating point unit to minimize
8700 /// domain crossing penalties, as these are sufficient to implement all v4f32
8702 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8703 const X86Subtarget *Subtarget,
8704 SelectionDAG &DAG) {
8706 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8707 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8708 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8709 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8710 ArrayRef<int> Mask = SVOp->getMask();
8711 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8714 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8716 if (NumV2Elements == 0) {
8717 // Check for being able to broadcast a single element.
8718 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f32, V1,
8719 Mask, Subtarget, DAG))
8722 // Use even/odd duplicate instructions for masks that match their pattern.
8723 if (Subtarget->hasSSE3()) {
8724 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
8725 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8726 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
8727 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8730 if (Subtarget->hasAVX()) {
8731 // If we have AVX, we can use VPERMILPS which will allow folding a load
8732 // into the shuffle.
8733 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8734 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
8737 // Otherwise, use a straight shuffle of a single input vector. We pass the
8738 // input vector to both operands to simulate this with a SHUFPS.
8739 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8740 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
8743 // There are special ways we can lower some single-element blends. However, we
8744 // have custom ways we can lower more complex single-element blends below that
8745 // we defer to if both this and BLENDPS fail to match, so restrict this to
8746 // when the V2 input is targeting element 0 of the mask -- that is the fast
8748 if (NumV2Elements == 1 && Mask[0] >= 4)
8749 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v4f32, V1, V2,
8750 Mask, Subtarget, DAG))
8753 if (Subtarget->hasSSE41()) {
8754 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8758 // Use INSERTPS if we can complete the shuffle efficiently.
8759 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8762 if (!isSingleSHUFPSMask(Mask))
8763 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8764 DL, MVT::v4f32, V1, V2, Mask, DAG))
8768 // Use dedicated unpack instructions for masks that match their pattern.
8770 lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
8773 // Otherwise fall back to a SHUFPS lowering strategy.
8774 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8777 /// \brief Lower 4-lane i32 vector shuffles.
8779 /// We try to handle these with integer-domain shuffles where we can, but for
8780 /// blends we use the floating point domain blend instructions.
8781 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8782 const X86Subtarget *Subtarget,
8783 SelectionDAG &DAG) {
8785 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8786 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8787 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8788 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8789 ArrayRef<int> Mask = SVOp->getMask();
8790 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8792 // Whenever we can lower this as a zext, that instruction is strictly faster
8793 // than any alternative. It also allows us to fold memory operands into the
8794 // shuffle in many cases.
8795 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8796 Mask, Subtarget, DAG))
8800 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8802 if (NumV2Elements == 0) {
8803 // Check for being able to broadcast a single element.
8804 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i32, V1,
8805 Mask, Subtarget, DAG))
8808 // Straight shuffle of a single input vector. For everything from SSE2
8809 // onward this has a single fast instruction with no scary immediates.
8810 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8811 // but we aren't actually going to use the UNPCK instruction because doing
8812 // so prevents folding a load into this instruction or making a copy.
8813 const int UnpackLoMask[] = {0, 0, 1, 1};
8814 const int UnpackHiMask[] = {2, 2, 3, 3};
8815 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
8816 Mask = UnpackLoMask;
8817 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
8818 Mask = UnpackHiMask;
8820 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8821 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
8824 // Try to use shift instructions.
8826 lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
8829 // There are special ways we can lower some single-element blends.
8830 if (NumV2Elements == 1)
8831 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v4i32, V1, V2,
8832 Mask, Subtarget, DAG))
8835 // We have different paths for blend lowering, but they all must use the
8836 // *exact* same predicate.
8837 bool IsBlendSupported = Subtarget->hasSSE41();
8838 if (IsBlendSupported)
8839 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8843 if (SDValue Masked =
8844 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8847 // Use dedicated unpack instructions for masks that match their pattern.
8849 lowerVectorShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
8852 // Try to use byte rotation instructions.
8853 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8854 if (Subtarget->hasSSSE3())
8855 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8856 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8859 // If we have direct support for blends, we should lower by decomposing into
8860 // a permute. That will be faster than the domain cross.
8861 if (IsBlendSupported)
8862 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
8865 // Try to lower by permuting the inputs into an unpack instruction.
8866 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1,
8870 // We implement this with SHUFPS because it can blend from two vectors.
8871 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8872 // up the inputs, bypassing domain shift penalties that we would encur if we
8873 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8875 return DAG.getBitcast(
8877 DAG.getVectorShuffle(MVT::v4f32, DL, DAG.getBitcast(MVT::v4f32, V1),
8878 DAG.getBitcast(MVT::v4f32, V2), Mask));
8881 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8882 /// shuffle lowering, and the most complex part.
8884 /// The lowering strategy is to try to form pairs of input lanes which are
8885 /// targeted at the same half of the final vector, and then use a dword shuffle
8886 /// to place them onto the right half, and finally unpack the paired lanes into
8887 /// their final position.
8889 /// The exact breakdown of how to form these dword pairs and align them on the
8890 /// correct sides is really tricky. See the comments within the function for
8891 /// more of the details.
8893 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
8894 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
8895 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
8896 /// vector, form the analogous 128-bit 8-element Mask.
8897 static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
8898 SDLoc DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
8899 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8900 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
8901 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
8903 assert(Mask.size() == 8 && "Shuffle mask length doen't match!");
8904 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8905 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8907 SmallVector<int, 4> LoInputs;
8908 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8909 [](int M) { return M >= 0; });
8910 std::sort(LoInputs.begin(), LoInputs.end());
8911 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8912 SmallVector<int, 4> HiInputs;
8913 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8914 [](int M) { return M >= 0; });
8915 std::sort(HiInputs.begin(), HiInputs.end());
8916 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8918 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8919 int NumHToL = LoInputs.size() - NumLToL;
8921 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8922 int NumHToH = HiInputs.size() - NumLToH;
8923 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8924 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8925 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8926 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8928 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8929 // such inputs we can swap two of the dwords across the half mark and end up
8930 // with <=2 inputs to each half in each half. Once there, we can fall through
8931 // to the generic code below. For example:
8933 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8934 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8936 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8937 // and an existing 2-into-2 on the other half. In this case we may have to
8938 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8939 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8940 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8941 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8942 // half than the one we target for fixing) will be fixed when we re-enter this
8943 // path. We will also combine away any sequence of PSHUFD instructions that
8944 // result into a single instruction. Here is an example of the tricky case:
8946 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8947 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8949 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8951 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8952 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8954 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8955 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8957 // The result is fine to be handled by the generic logic.
8958 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8959 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8960 int AOffset, int BOffset) {
8961 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8962 "Must call this with A having 3 or 1 inputs from the A half.");
8963 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8964 "Must call this with B having 1 or 3 inputs from the B half.");
8965 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8966 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8968 bool ThreeAInputs = AToAInputs.size() == 3;
8970 // Compute the index of dword with only one word among the three inputs in
8971 // a half by taking the sum of the half with three inputs and subtracting
8972 // the sum of the actual three inputs. The difference is the remaining
8975 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
8976 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
8977 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
8978 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
8979 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
8980 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8981 int TripleNonInputIdx =
8982 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
8983 TripleDWord = TripleNonInputIdx / 2;
8985 // We use xor with one to compute the adjacent DWord to whichever one the
8987 OneInputDWord = (OneInput / 2) ^ 1;
8989 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
8990 // and BToA inputs. If there is also such a problem with the BToB and AToB
8991 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
8992 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
8993 // is essential that we don't *create* a 3<-1 as then we might oscillate.
8994 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
8995 // Compute how many inputs will be flipped by swapping these DWords. We
8997 // to balance this to ensure we don't form a 3-1 shuffle in the other
8999 int NumFlippedAToBInputs =
9000 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9001 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9002 int NumFlippedBToBInputs =
9003 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9004 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9005 if ((NumFlippedAToBInputs == 1 &&
9006 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9007 (NumFlippedBToBInputs == 1 &&
9008 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9009 // We choose whether to fix the A half or B half based on whether that
9010 // half has zero flipped inputs. At zero, we may not be able to fix it
9011 // with that half. We also bias towards fixing the B half because that
9012 // will more commonly be the high half, and we have to bias one way.
9013 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9014 ArrayRef<int> Inputs) {
9015 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9016 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9017 PinnedIdx ^ 1) != Inputs.end();
9018 // Determine whether the free index is in the flipped dword or the
9019 // unflipped dword based on where the pinned index is. We use this bit
9020 // in an xor to conditionally select the adjacent dword.
9021 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9022 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9023 FixFreeIdx) != Inputs.end();
9024 if (IsFixIdxInput == IsFixFreeIdxInput)
9026 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9027 FixFreeIdx) != Inputs.end();
9028 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9029 "We need to be changing the number of flipped inputs!");
9030 int PSHUFHalfMask[] = {0, 1, 2, 3};
9031 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9032 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9034 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
9037 if (M != -1 && M == FixIdx)
9039 else if (M != -1 && M == FixFreeIdx)
9042 if (NumFlippedBToBInputs != 0) {
9044 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9045 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9047 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9048 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
9049 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9054 int PSHUFDMask[] = {0, 1, 2, 3};
9055 PSHUFDMask[ADWord] = BDWord;
9056 PSHUFDMask[BDWord] = ADWord;
9059 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
9060 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
9062 // Adjust the mask to match the new locations of A and B.
9064 if (M != -1 && M/2 == ADWord)
9065 M = 2 * BDWord + M % 2;
9066 else if (M != -1 && M/2 == BDWord)
9067 M = 2 * ADWord + M % 2;
9069 // Recurse back into this routine to re-compute state now that this isn't
9070 // a 3 and 1 problem.
9071 return lowerV8I16GeneralSingleInputVectorShuffle(DL, VT, V, Mask, Subtarget,
9074 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9075 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9076 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9077 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9079 // At this point there are at most two inputs to the low and high halves from
9080 // each half. That means the inputs can always be grouped into dwords and
9081 // those dwords can then be moved to the correct half with a dword shuffle.
9082 // We use at most one low and one high word shuffle to collect these paired
9083 // inputs into dwords, and finally a dword shuffle to place them.
9084 int PSHUFLMask[4] = {-1, -1, -1, -1};
9085 int PSHUFHMask[4] = {-1, -1, -1, -1};
9086 int PSHUFDMask[4] = {-1, -1, -1, -1};
9088 // First fix the masks for all the inputs that are staying in their
9089 // original halves. This will then dictate the targets of the cross-half
9091 auto fixInPlaceInputs =
9092 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9093 MutableArrayRef<int> SourceHalfMask,
9094 MutableArrayRef<int> HalfMask, int HalfOffset) {
9095 if (InPlaceInputs.empty())
9097 if (InPlaceInputs.size() == 1) {
9098 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9099 InPlaceInputs[0] - HalfOffset;
9100 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9103 if (IncomingInputs.empty()) {
9104 // Just fix all of the in place inputs.
9105 for (int Input : InPlaceInputs) {
9106 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9107 PSHUFDMask[Input / 2] = Input / 2;
9112 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9113 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9114 InPlaceInputs[0] - HalfOffset;
9115 // Put the second input next to the first so that they are packed into
9116 // a dword. We find the adjacent index by toggling the low bit.
9117 int AdjIndex = InPlaceInputs[0] ^ 1;
9118 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9119 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9120 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9122 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9123 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9125 // Now gather the cross-half inputs and place them into a free dword of
9126 // their target half.
9127 // FIXME: This operation could almost certainly be simplified dramatically to
9128 // look more like the 3-1 fixing operation.
9129 auto moveInputsToRightHalf = [&PSHUFDMask](
9130 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9131 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9132 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9134 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9135 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9137 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9139 int LowWord = Word & ~1;
9140 int HighWord = Word | 1;
9141 return isWordClobbered(SourceHalfMask, LowWord) ||
9142 isWordClobbered(SourceHalfMask, HighWord);
9145 if (IncomingInputs.empty())
9148 if (ExistingInputs.empty()) {
9149 // Map any dwords with inputs from them into the right half.
9150 for (int Input : IncomingInputs) {
9151 // If the source half mask maps over the inputs, turn those into
9152 // swaps and use the swapped lane.
9153 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9154 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9155 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9156 Input - SourceOffset;
9157 // We have to swap the uses in our half mask in one sweep.
9158 for (int &M : HalfMask)
9159 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9161 else if (M == Input)
9162 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9164 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9165 Input - SourceOffset &&
9166 "Previous placement doesn't match!");
9168 // Note that this correctly re-maps both when we do a swap and when
9169 // we observe the other side of the swap above. We rely on that to
9170 // avoid swapping the members of the input list directly.
9171 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9174 // Map the input's dword into the correct half.
9175 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9176 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9178 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9180 "Previous placement doesn't match!");
9183 // And just directly shift any other-half mask elements to be same-half
9184 // as we will have mirrored the dword containing the element into the
9185 // same position within that half.
9186 for (int &M : HalfMask)
9187 if (M >= SourceOffset && M < SourceOffset + 4) {
9188 M = M - SourceOffset + DestOffset;
9189 assert(M >= 0 && "This should never wrap below zero!");
9194 // Ensure we have the input in a viable dword of its current half. This
9195 // is particularly tricky because the original position may be clobbered
9196 // by inputs being moved and *staying* in that half.
9197 if (IncomingInputs.size() == 1) {
9198 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9199 int InputFixed = std::find(std::begin(SourceHalfMask),
9200 std::end(SourceHalfMask), -1) -
9201 std::begin(SourceHalfMask) + SourceOffset;
9202 SourceHalfMask[InputFixed - SourceOffset] =
9203 IncomingInputs[0] - SourceOffset;
9204 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9206 IncomingInputs[0] = InputFixed;
9208 } else if (IncomingInputs.size() == 2) {
9209 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9210 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9211 // We have two non-adjacent or clobbered inputs we need to extract from
9212 // the source half. To do this, we need to map them into some adjacent
9213 // dword slot in the source mask.
9214 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9215 IncomingInputs[1] - SourceOffset};
9217 // If there is a free slot in the source half mask adjacent to one of
9218 // the inputs, place the other input in it. We use (Index XOR 1) to
9219 // compute an adjacent index.
9220 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9221 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9222 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9223 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9224 InputsFixed[1] = InputsFixed[0] ^ 1;
9225 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9226 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9227 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9228 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9229 InputsFixed[0] = InputsFixed[1] ^ 1;
9230 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9231 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9232 // The two inputs are in the same DWord but it is clobbered and the
9233 // adjacent DWord isn't used at all. Move both inputs to the free
9235 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9236 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9237 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9238 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9240 // The only way we hit this point is if there is no clobbering
9241 // (because there are no off-half inputs to this half) and there is no
9242 // free slot adjacent to one of the inputs. In this case, we have to
9243 // swap an input with a non-input.
9244 for (int i = 0; i < 4; ++i)
9245 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9246 "We can't handle any clobbers here!");
9247 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9248 "Cannot have adjacent inputs here!");
9250 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9251 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9253 // We also have to update the final source mask in this case because
9254 // it may need to undo the above swap.
9255 for (int &M : FinalSourceHalfMask)
9256 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9257 M = InputsFixed[1] + SourceOffset;
9258 else if (M == InputsFixed[1] + SourceOffset)
9259 M = (InputsFixed[0] ^ 1) + SourceOffset;
9261 InputsFixed[1] = InputsFixed[0] ^ 1;
9264 // Point everything at the fixed inputs.
9265 for (int &M : HalfMask)
9266 if (M == IncomingInputs[0])
9267 M = InputsFixed[0] + SourceOffset;
9268 else if (M == IncomingInputs[1])
9269 M = InputsFixed[1] + SourceOffset;
9271 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9272 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9275 llvm_unreachable("Unhandled input size!");
9278 // Now hoist the DWord down to the right half.
9279 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9280 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9281 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9282 for (int &M : HalfMask)
9283 for (int Input : IncomingInputs)
9285 M = FreeDWord * 2 + Input % 2;
9287 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9288 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9289 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9290 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9292 // Now enact all the shuffles we've computed to move the inputs into their
9294 if (!isNoopShuffleMask(PSHUFLMask))
9295 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
9296 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
9297 if (!isNoopShuffleMask(PSHUFHMask))
9298 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
9299 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
9300 if (!isNoopShuffleMask(PSHUFDMask))
9303 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
9304 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
9306 // At this point, each half should contain all its inputs, and we can then
9307 // just shuffle them into their final position.
9308 assert(std::count_if(LoMask.begin(), LoMask.end(),
9309 [](int M) { return M >= 4; }) == 0 &&
9310 "Failed to lift all the high half inputs to the low mask!");
9311 assert(std::count_if(HiMask.begin(), HiMask.end(),
9312 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9313 "Failed to lift all the low half inputs to the high mask!");
9315 // Do a half shuffle for the low mask.
9316 if (!isNoopShuffleMask(LoMask))
9317 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
9318 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
9320 // Do a half shuffle with the high mask after shifting its values down.
9321 for (int &M : HiMask)
9324 if (!isNoopShuffleMask(HiMask))
9325 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
9326 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
9331 /// \brief Helper to form a PSHUFB-based shuffle+blend.
9332 static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
9333 SDValue V2, ArrayRef<int> Mask,
9334 SelectionDAG &DAG, bool &V1InUse,
9336 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9342 int Size = Mask.size();
9343 int Scale = 16 / Size;
9344 for (int i = 0; i < 16; ++i) {
9345 if (Mask[i / Scale] == -1) {
9346 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9348 const int ZeroMask = 0x80;
9349 int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
9351 int V2Idx = Mask[i / Scale] < Size
9353 : (Mask[i / Scale] - Size) * Scale + i % Scale;
9354 if (Zeroable[i / Scale])
9355 V1Idx = V2Idx = ZeroMask;
9356 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
9357 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
9358 V1InUse |= (ZeroMask != V1Idx);
9359 V2InUse |= (ZeroMask != V2Idx);
9364 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9365 DAG.getBitcast(MVT::v16i8, V1),
9366 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9368 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9369 DAG.getBitcast(MVT::v16i8, V2),
9370 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9372 // If we need shuffled inputs from both, blend the two.
9374 if (V1InUse && V2InUse)
9375 V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9377 V = V1InUse ? V1 : V2;
9379 // Cast the result back to the correct type.
9380 return DAG.getBitcast(VT, V);
9383 /// \brief Generic lowering of 8-lane i16 shuffles.
9385 /// This handles both single-input shuffles and combined shuffle/blends with
9386 /// two inputs. The single input shuffles are immediately delegated to
9387 /// a dedicated lowering routine.
9389 /// The blends are lowered in one of three fundamental ways. If there are few
9390 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9391 /// of the input is significantly cheaper when lowered as an interleaving of
9392 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9393 /// halves of the inputs separately (making them have relatively few inputs)
9394 /// and then concatenate them.
9395 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9396 const X86Subtarget *Subtarget,
9397 SelectionDAG &DAG) {
9399 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9400 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9401 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9402 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9403 ArrayRef<int> OrigMask = SVOp->getMask();
9404 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9405 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9406 MutableArrayRef<int> Mask(MaskStorage);
9408 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9410 // Whenever we can lower this as a zext, that instruction is strictly faster
9411 // than any alternative.
9412 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9413 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9416 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9418 auto isV2 = [](int M) { return M >= 8; };
9420 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9422 if (NumV2Inputs == 0) {
9423 // Check for being able to broadcast a single element.
9424 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i16, V1,
9425 Mask, Subtarget, DAG))
9428 // Try to use shift instructions.
9430 lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, DAG))
9433 // Use dedicated unpack instructions for masks that match their pattern.
9435 lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
9438 // Try to use byte rotation instructions.
9439 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i16, V1, V1,
9440 Mask, Subtarget, DAG))
9443 return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1, Mask,
9447 assert(std::any_of(Mask.begin(), Mask.end(), isV1) &&
9448 "All single-input shuffles should be canonicalized to be V1-input "
9451 // Try to use shift instructions.
9453 lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
9456 // See if we can use SSE4A Extraction / Insertion.
9457 if (Subtarget->hasSSE4A())
9458 if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask, DAG))
9461 // There are special ways we can lower some single-element blends.
9462 if (NumV2Inputs == 1)
9463 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v8i16, V1, V2,
9464 Mask, Subtarget, DAG))
9467 // We have different paths for blend lowering, but they all must use the
9468 // *exact* same predicate.
9469 bool IsBlendSupported = Subtarget->hasSSE41();
9470 if (IsBlendSupported)
9471 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9475 if (SDValue Masked =
9476 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9479 // Use dedicated unpack instructions for masks that match their pattern.
9481 lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
9484 // Try to use byte rotation instructions.
9485 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9486 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9489 if (SDValue BitBlend =
9490 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
9493 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1,
9497 // If we can't directly blend but can use PSHUFB, that will be better as it
9498 // can both shuffle and set up the inefficient blend.
9499 if (!IsBlendSupported && Subtarget->hasSSSE3()) {
9500 bool V1InUse, V2InUse;
9501 return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
9505 // We can always bit-blend if we have to so the fallback strategy is to
9506 // decompose into single-input permutes and blends.
9507 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9511 /// \brief Check whether a compaction lowering can be done by dropping even
9512 /// elements and compute how many times even elements must be dropped.
9514 /// This handles shuffles which take every Nth element where N is a power of
9515 /// two. Example shuffle masks:
9517 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9518 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9519 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9520 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9521 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9522 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9524 /// Any of these lanes can of course be undef.
9526 /// This routine only supports N <= 3.
9527 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9530 /// \returns N above, or the number of times even elements must be dropped if
9531 /// there is such a number. Otherwise returns zero.
9532 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9533 // Figure out whether we're looping over two inputs or just one.
9534 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9536 // The modulus for the shuffle vector entries is based on whether this is
9537 // a single input or not.
9538 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9539 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9540 "We should only be called with masks with a power-of-2 size!");
9542 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9544 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9545 // and 2^3 simultaneously. This is because we may have ambiguity with
9546 // partially undef inputs.
9547 bool ViableForN[3] = {true, true, true};
9549 for (int i = 0, e = Mask.size(); i < e; ++i) {
9550 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9555 bool IsAnyViable = false;
9556 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9557 if (ViableForN[j]) {
9560 // The shuffle mask must be equal to (i * 2^N) % M.
9561 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9564 ViableForN[j] = false;
9566 // Early exit if we exhaust the possible powers of two.
9571 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9575 // Return 0 as there is no viable power of two.
9579 /// \brief Generic lowering of v16i8 shuffles.
9581 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9582 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9583 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9584 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9586 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9587 const X86Subtarget *Subtarget,
9588 SelectionDAG &DAG) {
9590 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9591 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9592 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9593 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9594 ArrayRef<int> Mask = SVOp->getMask();
9595 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9597 // Try to use shift instructions.
9599 lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
9602 // Try to use byte rotation instructions.
9603 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9604 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9607 // Try to use a zext lowering.
9608 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9609 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9612 // See if we can use SSE4A Extraction / Insertion.
9613 if (Subtarget->hasSSE4A())
9614 if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask, DAG))
9618 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9620 // For single-input shuffles, there are some nicer lowering tricks we can use.
9621 if (NumV2Elements == 0) {
9622 // Check for being able to broadcast a single element.
9623 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i8, V1,
9624 Mask, Subtarget, DAG))
9627 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9628 // Notably, this handles splat and partial-splat shuffles more efficiently.
9629 // However, it only makes sense if the pre-duplication shuffle simplifies
9630 // things significantly. Currently, this means we need to be able to
9631 // express the pre-duplication shuffle as an i16 shuffle.
9633 // FIXME: We should check for other patterns which can be widened into an
9634 // i16 shuffle as well.
9635 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9636 for (int i = 0; i < 16; i += 2)
9637 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9642 auto tryToWidenViaDuplication = [&]() -> SDValue {
9643 if (!canWidenViaDuplication(Mask))
9645 SmallVector<int, 4> LoInputs;
9646 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9647 [](int M) { return M >= 0 && M < 8; });
9648 std::sort(LoInputs.begin(), LoInputs.end());
9649 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9651 SmallVector<int, 4> HiInputs;
9652 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9653 [](int M) { return M >= 8; });
9654 std::sort(HiInputs.begin(), HiInputs.end());
9655 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9658 bool TargetLo = LoInputs.size() >= HiInputs.size();
9659 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9660 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9662 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9663 SmallDenseMap<int, int, 8> LaneMap;
9664 for (int I : InPlaceInputs) {
9665 PreDupI16Shuffle[I/2] = I/2;
9668 int j = TargetLo ? 0 : 4, je = j + 4;
9669 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9670 // Check if j is already a shuffle of this input. This happens when
9671 // there are two adjacent bytes after we move the low one.
9672 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9673 // If we haven't yet mapped the input, search for a slot into which
9675 while (j < je && PreDupI16Shuffle[j] != -1)
9679 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9682 // Map this input with the i16 shuffle.
9683 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9686 // Update the lane map based on the mapping we ended up with.
9687 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9689 V1 = DAG.getBitcast(
9691 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
9692 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9694 // Unpack the bytes to form the i16s that will be shuffled into place.
9695 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9696 MVT::v16i8, V1, V1);
9698 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9699 for (int i = 0; i < 16; ++i)
9700 if (Mask[i] != -1) {
9701 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9702 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9703 if (PostDupI16Shuffle[i / 2] == -1)
9704 PostDupI16Shuffle[i / 2] = MappedMask;
9706 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9707 "Conflicting entrties in the original shuffle!");
9709 return DAG.getBitcast(
9711 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
9712 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9714 if (SDValue V = tryToWidenViaDuplication())
9718 if (SDValue Masked =
9719 lowerVectorShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask, DAG))
9722 // Use dedicated unpack instructions for masks that match their pattern.
9724 lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
9727 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9728 // with PSHUFB. It is important to do this before we attempt to generate any
9729 // blends but after all of the single-input lowerings. If the single input
9730 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9731 // want to preserve that and we can DAG combine any longer sequences into
9732 // a PSHUFB in the end. But once we start blending from multiple inputs,
9733 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9734 // and there are *very* few patterns that would actually be faster than the
9735 // PSHUFB approach because of its ability to zero lanes.
9737 // FIXME: The only exceptions to the above are blends which are exact
9738 // interleavings with direct instructions supporting them. We currently don't
9739 // handle those well here.
9740 if (Subtarget->hasSSSE3()) {
9741 bool V1InUse = false;
9742 bool V2InUse = false;
9744 SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
9745 DAG, V1InUse, V2InUse);
9747 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
9748 // do so. This avoids using them to handle blends-with-zero which is
9749 // important as a single pshufb is significantly faster for that.
9750 if (V1InUse && V2InUse) {
9751 if (Subtarget->hasSSE41())
9752 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
9753 Mask, Subtarget, DAG))
9756 // We can use an unpack to do the blending rather than an or in some
9757 // cases. Even though the or may be (very minorly) more efficient, we
9758 // preference this lowering because there are common cases where part of
9759 // the complexity of the shuffles goes away when we do the final blend as
9761 // FIXME: It might be worth trying to detect if the unpack-feeding
9762 // shuffles will both be pshufb, in which case we shouldn't bother with
9764 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
9765 DL, MVT::v16i8, V1, V2, Mask, DAG))
9772 // There are special ways we can lower some single-element blends.
9773 if (NumV2Elements == 1)
9774 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v16i8, V1, V2,
9775 Mask, Subtarget, DAG))
9778 if (SDValue BitBlend =
9779 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
9782 // Check whether a compaction lowering can be done. This handles shuffles
9783 // which take every Nth element for some even N. See the helper function for
9786 // We special case these as they can be particularly efficiently handled with
9787 // the PACKUSB instruction on x86 and they show up in common patterns of
9788 // rearranging bytes to truncate wide elements.
9789 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9790 // NumEvenDrops is the power of two stride of the elements. Another way of
9791 // thinking about it is that we need to drop the even elements this many
9792 // times to get the original input.
9793 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9795 // First we need to zero all the dropped bytes.
9796 assert(NumEvenDrops <= 3 &&
9797 "No support for dropping even elements more than 3 times.");
9798 // We use the mask type to pick which bytes are preserved based on how many
9799 // elements are dropped.
9800 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9801 SDValue ByteClearMask = DAG.getBitcast(
9802 MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
9803 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9805 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9807 // Now pack things back together.
9808 V1 = DAG.getBitcast(MVT::v8i16, V1);
9809 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
9810 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9811 for (int i = 1; i < NumEvenDrops; ++i) {
9812 Result = DAG.getBitcast(MVT::v8i16, Result);
9813 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9819 // Handle multi-input cases by blending single-input shuffles.
9820 if (NumV2Elements > 0)
9821 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
9824 // The fallback path for single-input shuffles widens this into two v8i16
9825 // vectors with unpacks, shuffles those, and then pulls them back together
9829 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9830 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9831 for (int i = 0; i < 16; ++i)
9833 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
9835 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9837 SDValue VLoHalf, VHiHalf;
9838 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9839 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9841 if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
9842 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9843 std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
9844 [](int M) { return M >= 0 && M % 2 == 1; })) {
9845 // Use a mask to drop the high bytes.
9846 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
9847 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
9848 DAG.getConstant(0x00FF, DL, MVT::v8i16));
9850 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
9851 VHiHalf = DAG.getUNDEF(MVT::v8i16);
9853 // Squash the masks to point directly into VLoHalf.
9854 for (int &M : LoBlendMask)
9857 for (int &M : HiBlendMask)
9861 // Otherwise just unpack the low half of V into VLoHalf and the high half into
9862 // VHiHalf so that we can blend them as i16s.
9863 VLoHalf = DAG.getBitcast(
9864 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
9865 VHiHalf = DAG.getBitcast(
9866 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
9869 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
9870 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
9872 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
9875 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
9877 /// This routine breaks down the specific type of 128-bit shuffle and
9878 /// dispatches to the lowering routines accordingly.
9879 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9880 MVT VT, const X86Subtarget *Subtarget,
9881 SelectionDAG &DAG) {
9882 switch (VT.SimpleTy) {
9884 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9886 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9888 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9890 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9892 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9894 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9897 llvm_unreachable("Unimplemented!");
9901 /// \brief Helper function to test whether a shuffle mask could be
9902 /// simplified by widening the elements being shuffled.
9904 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
9905 /// leaves it in an unspecified state.
9907 /// NOTE: This must handle normal vector shuffle masks and *target* vector
9908 /// shuffle masks. The latter have the special property of a '-2' representing
9909 /// a zero-ed lane of a vector.
9910 static bool canWidenShuffleElements(ArrayRef<int> Mask,
9911 SmallVectorImpl<int> &WidenedMask) {
9912 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
9913 // If both elements are undef, its trivial.
9914 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
9915 WidenedMask.push_back(SM_SentinelUndef);
9919 // Check for an undef mask and a mask value properly aligned to fit with
9920 // a pair of values. If we find such a case, use the non-undef mask's value.
9921 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
9922 WidenedMask.push_back(Mask[i + 1] / 2);
9925 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
9926 WidenedMask.push_back(Mask[i] / 2);
9930 // When zeroing, we need to spread the zeroing across both lanes to widen.
9931 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
9932 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
9933 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
9934 WidenedMask.push_back(SM_SentinelZero);
9940 // Finally check if the two mask values are adjacent and aligned with
9942 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
9943 WidenedMask.push_back(Mask[i] / 2);
9947 // Otherwise we can't safely widen the elements used in this shuffle.
9950 assert(WidenedMask.size() == Mask.size() / 2 &&
9951 "Incorrect size of mask after widening the elements!");
9956 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
9958 /// This routine just extracts two subvectors, shuffles them independently, and
9959 /// then concatenates them back together. This should work effectively with all
9960 /// AVX vector shuffle types.
9961 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
9962 SDValue V2, ArrayRef<int> Mask,
9963 SelectionDAG &DAG) {
9964 assert(VT.getSizeInBits() >= 256 &&
9965 "Only for 256-bit or wider vector shuffles!");
9966 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
9967 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
9969 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
9970 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
9972 int NumElements = VT.getVectorNumElements();
9973 int SplitNumElements = NumElements / 2;
9974 MVT ScalarVT = VT.getVectorElementType();
9975 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
9977 // Rather than splitting build-vectors, just build two narrower build
9978 // vectors. This helps shuffling with splats and zeros.
9979 auto SplitVector = [&](SDValue V) {
9980 while (V.getOpcode() == ISD::BITCAST)
9981 V = V->getOperand(0);
9983 MVT OrigVT = V.getSimpleValueType();
9984 int OrigNumElements = OrigVT.getVectorNumElements();
9985 int OrigSplitNumElements = OrigNumElements / 2;
9986 MVT OrigScalarVT = OrigVT.getVectorElementType();
9987 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
9991 auto *BV = dyn_cast<BuildVectorSDNode>(V);
9993 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
9994 DAG.getIntPtrConstant(0, DL));
9995 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
9996 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
9999 SmallVector<SDValue, 16> LoOps, HiOps;
10000 for (int i = 0; i < OrigSplitNumElements; ++i) {
10001 LoOps.push_back(BV->getOperand(i));
10002 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10004 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10005 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10007 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
10008 DAG.getBitcast(SplitVT, HiV));
10011 SDValue LoV1, HiV1, LoV2, HiV2;
10012 std::tie(LoV1, HiV1) = SplitVector(V1);
10013 std::tie(LoV2, HiV2) = SplitVector(V2);
10015 // Now create two 4-way blends of these half-width vectors.
10016 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10017 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10018 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10019 for (int i = 0; i < SplitNumElements; ++i) {
10020 int M = HalfMask[i];
10021 if (M >= NumElements) {
10022 if (M >= NumElements + SplitNumElements)
10026 V2BlendMask.push_back(M - NumElements);
10027 V1BlendMask.push_back(-1);
10028 BlendMask.push_back(SplitNumElements + i);
10029 } else if (M >= 0) {
10030 if (M >= SplitNumElements)
10034 V2BlendMask.push_back(-1);
10035 V1BlendMask.push_back(M);
10036 BlendMask.push_back(i);
10038 V2BlendMask.push_back(-1);
10039 V1BlendMask.push_back(-1);
10040 BlendMask.push_back(-1);
10044 // Because the lowering happens after all combining takes place, we need to
10045 // manually combine these blend masks as much as possible so that we create
10046 // a minimal number of high-level vector shuffle nodes.
10048 // First try just blending the halves of V1 or V2.
10049 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10050 return DAG.getUNDEF(SplitVT);
10051 if (!UseLoV2 && !UseHiV2)
10052 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10053 if (!UseLoV1 && !UseHiV1)
10054 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10056 SDValue V1Blend, V2Blend;
10057 if (UseLoV1 && UseHiV1) {
10059 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10061 // We only use half of V1 so map the usage down into the final blend mask.
10062 V1Blend = UseLoV1 ? LoV1 : HiV1;
10063 for (int i = 0; i < SplitNumElements; ++i)
10064 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10065 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10067 if (UseLoV2 && UseHiV2) {
10069 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10071 // We only use half of V2 so map the usage down into the final blend mask.
10072 V2Blend = UseLoV2 ? LoV2 : HiV2;
10073 for (int i = 0; i < SplitNumElements; ++i)
10074 if (BlendMask[i] >= SplitNumElements)
10075 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10077 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10079 SDValue Lo = HalfBlend(LoMask);
10080 SDValue Hi = HalfBlend(HiMask);
10081 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10084 /// \brief Either split a vector in halves or decompose the shuffles and the
10087 /// This is provided as a good fallback for many lowerings of non-single-input
10088 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10089 /// between splitting the shuffle into 128-bit components and stitching those
10090 /// back together vs. extracting the single-input shuffles and blending those
10092 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10093 SDValue V2, ArrayRef<int> Mask,
10094 SelectionDAG &DAG) {
10095 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10096 "lower single-input shuffles as it "
10097 "could then recurse on itself.");
10098 int Size = Mask.size();
10100 // If this can be modeled as a broadcast of two elements followed by a blend,
10101 // prefer that lowering. This is especially important because broadcasts can
10102 // often fold with memory operands.
10103 auto DoBothBroadcast = [&] {
10104 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10107 if (V2BroadcastIdx == -1)
10108 V2BroadcastIdx = M - Size;
10109 else if (M - Size != V2BroadcastIdx)
10111 } else if (M >= 0) {
10112 if (V1BroadcastIdx == -1)
10113 V1BroadcastIdx = M;
10114 else if (M != V1BroadcastIdx)
10119 if (DoBothBroadcast())
10120 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10123 // If the inputs all stem from a single 128-bit lane of each input, then we
10124 // split them rather than blending because the split will decompose to
10125 // unusually few instructions.
10126 int LaneCount = VT.getSizeInBits() / 128;
10127 int LaneSize = Size / LaneCount;
10128 SmallBitVector LaneInputs[2];
10129 LaneInputs[0].resize(LaneCount, false);
10130 LaneInputs[1].resize(LaneCount, false);
10131 for (int i = 0; i < Size; ++i)
10133 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10134 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10135 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10137 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10138 // that the decomposed single-input shuffles don't end up here.
10139 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10142 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10143 /// a permutation and blend of those lanes.
10145 /// This essentially blends the out-of-lane inputs to each lane into the lane
10146 /// from a permuted copy of the vector. This lowering strategy results in four
10147 /// instructions in the worst case for a single-input cross lane shuffle which
10148 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10149 /// of. Special cases for each particular shuffle pattern should be handled
10150 /// prior to trying this lowering.
10151 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10152 SDValue V1, SDValue V2,
10153 ArrayRef<int> Mask,
10154 SelectionDAG &DAG) {
10155 // FIXME: This should probably be generalized for 512-bit vectors as well.
10156 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
10157 int LaneSize = Mask.size() / 2;
10159 // If there are only inputs from one 128-bit lane, splitting will in fact be
10160 // less expensive. The flags track whether the given lane contains an element
10161 // that crosses to another lane.
10162 bool LaneCrossing[2] = {false, false};
10163 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10164 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10165 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10166 if (!LaneCrossing[0] || !LaneCrossing[1])
10167 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10169 if (isSingleInputShuffleMask(Mask)) {
10170 SmallVector<int, 32> FlippedBlendMask;
10171 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10172 FlippedBlendMask.push_back(
10173 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10175 : Mask[i] % LaneSize +
10176 (i / LaneSize) * LaneSize + Size));
10178 // Flip the vector, and blend the results which should now be in-lane. The
10179 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10180 // 5 for the high source. The value 3 selects the high half of source 2 and
10181 // the value 2 selects the low half of source 2. We only use source 2 to
10182 // allow folding it into a memory operand.
10183 unsigned PERMMask = 3 | 2 << 4;
10184 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10185 V1, DAG.getConstant(PERMMask, DL, MVT::i8));
10186 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10189 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10190 // will be handled by the above logic and a blend of the results, much like
10191 // other patterns in AVX.
10192 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10195 /// \brief Handle lowering 2-lane 128-bit shuffles.
10196 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10197 SDValue V2, ArrayRef<int> Mask,
10198 const X86Subtarget *Subtarget,
10199 SelectionDAG &DAG) {
10200 // TODO: If minimizing size and one of the inputs is a zero vector and the
10201 // the zero vector has only one use, we could use a VPERM2X128 to save the
10202 // instruction bytes needed to explicitly generate the zero vector.
10204 // Blends are faster and handle all the non-lane-crossing cases.
10205 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10209 bool IsV1Zero = ISD::isBuildVectorAllZeros(V1.getNode());
10210 bool IsV2Zero = ISD::isBuildVectorAllZeros(V2.getNode());
10212 // If either input operand is a zero vector, use VPERM2X128 because its mask
10213 // allows us to replace the zero input with an implicit zero.
10214 if (!IsV1Zero && !IsV2Zero) {
10215 // Check for patterns which can be matched with a single insert of a 128-bit
10217 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
10218 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
10219 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10220 VT.getVectorNumElements() / 2);
10221 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10222 DAG.getIntPtrConstant(0, DL));
10223 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10224 OnlyUsesV1 ? V1 : V2,
10225 DAG.getIntPtrConstant(0, DL));
10226 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10230 // Otherwise form a 128-bit permutation. After accounting for undefs,
10231 // convert the 64-bit shuffle mask selection values into 128-bit
10232 // selection bits by dividing the indexes by 2 and shifting into positions
10233 // defined by a vperm2*128 instruction's immediate control byte.
10235 // The immediate permute control byte looks like this:
10236 // [1:0] - select 128 bits from sources for low half of destination
10238 // [3] - zero low half of destination
10239 // [5:4] - select 128 bits from sources for high half of destination
10241 // [7] - zero high half of destination
10243 int MaskLO = Mask[0];
10244 if (MaskLO == SM_SentinelUndef)
10245 MaskLO = Mask[1] == SM_SentinelUndef ? 0 : Mask[1];
10247 int MaskHI = Mask[2];
10248 if (MaskHI == SM_SentinelUndef)
10249 MaskHI = Mask[3] == SM_SentinelUndef ? 0 : Mask[3];
10251 unsigned PermMask = MaskLO / 2 | (MaskHI / 2) << 4;
10253 // If either input is a zero vector, replace it with an undef input.
10254 // Shuffle mask values < 4 are selecting elements of V1.
10255 // Shuffle mask values >= 4 are selecting elements of V2.
10256 // Adjust each half of the permute mask by clearing the half that was
10257 // selecting the zero vector and setting the zero mask bit.
10259 V1 = DAG.getUNDEF(VT);
10261 PermMask = (PermMask & 0xf0) | 0x08;
10263 PermMask = (PermMask & 0x0f) | 0x80;
10266 V2 = DAG.getUNDEF(VT);
10268 PermMask = (PermMask & 0xf0) | 0x08;
10270 PermMask = (PermMask & 0x0f) | 0x80;
10273 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10274 DAG.getConstant(PermMask, DL, MVT::i8));
10277 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10278 /// shuffling each lane.
10280 /// This will only succeed when the result of fixing the 128-bit lanes results
10281 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10282 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10283 /// the lane crosses early and then use simpler shuffles within each lane.
10285 /// FIXME: It might be worthwhile at some point to support this without
10286 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10287 /// in x86 only floating point has interesting non-repeating shuffles, and even
10288 /// those are still *marginally* more expensive.
10289 static SDValue lowerVectorShuffleByMerging128BitLanes(
10290 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10291 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10292 assert(!isSingleInputShuffleMask(Mask) &&
10293 "This is only useful with multiple inputs.");
10295 int Size = Mask.size();
10296 int LaneSize = 128 / VT.getScalarSizeInBits();
10297 int NumLanes = Size / LaneSize;
10298 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10300 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10301 // check whether the in-128-bit lane shuffles share a repeating pattern.
10302 SmallVector<int, 4> Lanes;
10303 Lanes.resize(NumLanes, -1);
10304 SmallVector<int, 4> InLaneMask;
10305 InLaneMask.resize(LaneSize, -1);
10306 for (int i = 0; i < Size; ++i) {
10310 int j = i / LaneSize;
10312 if (Lanes[j] < 0) {
10313 // First entry we've seen for this lane.
10314 Lanes[j] = Mask[i] / LaneSize;
10315 } else if (Lanes[j] != Mask[i] / LaneSize) {
10316 // This doesn't match the lane selected previously!
10320 // Check that within each lane we have a consistent shuffle mask.
10321 int k = i % LaneSize;
10322 if (InLaneMask[k] < 0) {
10323 InLaneMask[k] = Mask[i] % LaneSize;
10324 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10325 // This doesn't fit a repeating in-lane mask.
10330 // First shuffle the lanes into place.
10331 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10332 VT.getSizeInBits() / 64);
10333 SmallVector<int, 8> LaneMask;
10334 LaneMask.resize(NumLanes * 2, -1);
10335 for (int i = 0; i < NumLanes; ++i)
10336 if (Lanes[i] >= 0) {
10337 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10338 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10341 V1 = DAG.getBitcast(LaneVT, V1);
10342 V2 = DAG.getBitcast(LaneVT, V2);
10343 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10345 // Cast it back to the type we actually want.
10346 LaneShuffle = DAG.getBitcast(VT, LaneShuffle);
10348 // Now do a simple shuffle that isn't lane crossing.
10349 SmallVector<int, 8> NewMask;
10350 NewMask.resize(Size, -1);
10351 for (int i = 0; i < Size; ++i)
10353 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10354 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10355 "Must not introduce lane crosses at this point!");
10357 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10360 /// Lower shuffles where an entire half of a 256-bit vector is UNDEF.
10361 /// This allows for fast cases such as subvector extraction/insertion
10362 /// or shuffling smaller vector types which can lower more efficiently.
10363 static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
10364 SDValue V2, ArrayRef<int> Mask,
10365 const X86Subtarget *Subtarget,
10366 SelectionDAG &DAG) {
10367 assert(VT.getSizeInBits() == 256 && "Expected 256-bit vector");
10369 unsigned NumElts = VT.getVectorNumElements();
10370 unsigned HalfNumElts = NumElts / 2;
10371 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
10373 bool UndefLower = isUndefInRange(Mask, 0, HalfNumElts);
10374 bool UndefUpper = isUndefInRange(Mask, HalfNumElts, HalfNumElts);
10375 if (!UndefLower && !UndefUpper)
10378 // Upper half is undef and lower half is whole upper subvector.
10379 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
10381 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
10382 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
10383 DAG.getIntPtrConstant(HalfNumElts, DL));
10384 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
10385 DAG.getIntPtrConstant(0, DL));
10388 // Lower half is undef and upper half is whole lower subvector.
10389 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
10391 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
10392 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
10393 DAG.getIntPtrConstant(0, DL));
10394 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
10395 DAG.getIntPtrConstant(HalfNumElts, DL));
10398 // AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
10399 if (UndefLower && Subtarget->hasAVX2() &&
10400 (VT == MVT::v4f64 || VT == MVT::v4i64))
10403 // If the shuffle only uses the lower halves of the input operands,
10404 // then extract them and perform the 'half' shuffle at half width.
10405 // e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u>
10406 int HalfIdx1 = -1, HalfIdx2 = -1;
10407 SmallVector<int, 8> HalfMask;
10408 unsigned Offset = UndefLower ? HalfNumElts : 0;
10409 for (unsigned i = 0; i != HalfNumElts; ++i) {
10410 int M = Mask[i + Offset];
10412 HalfMask.push_back(M);
10416 // Determine which of the 4 half vectors this element is from.
10417 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
10418 int HalfIdx = M / HalfNumElts;
10420 // Only shuffle using the lower halves of the inputs.
10421 // TODO: Investigate usefulness of shuffling with upper halves.
10422 if (HalfIdx != 0 && HalfIdx != 2)
10425 // Determine the element index into its half vector source.
10426 int HalfElt = M % HalfNumElts;
10428 // We can shuffle with up to 2 half vectors, set the new 'half'
10429 // shuffle mask accordingly.
10430 if (-1 == HalfIdx1 || HalfIdx1 == HalfIdx) {
10431 HalfMask.push_back(HalfElt);
10432 HalfIdx1 = HalfIdx;
10435 if (-1 == HalfIdx2 || HalfIdx2 == HalfIdx) {
10436 HalfMask.push_back(HalfElt + HalfNumElts);
10437 HalfIdx2 = HalfIdx;
10441 // Too many half vectors referenced.
10444 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
10446 auto GetHalfVector = [&](int HalfIdx) {
10448 return DAG.getUNDEF(HalfVT);
10449 SDValue V = (HalfIdx < 2 ? V1 : V2);
10450 HalfIdx = (HalfIdx % 2) * HalfNumElts;
10451 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
10452 DAG.getIntPtrConstant(HalfIdx, DL));
10455 SDValue Half1 = GetHalfVector(HalfIdx1);
10456 SDValue Half2 = GetHalfVector(HalfIdx2);
10457 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
10458 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
10459 DAG.getIntPtrConstant(Offset, DL));
10462 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10465 /// This returns true if the elements from a particular input are already in the
10466 /// slot required by the given mask and require no permutation.
10467 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10468 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10469 int Size = Mask.size();
10470 for (int i = 0; i < Size; ++i)
10471 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10477 static SDValue lowerVectorShuffleWithSHUFPD(SDLoc DL, MVT VT,
10478 ArrayRef<int> Mask, SDValue V1,
10479 SDValue V2, SelectionDAG &DAG) {
10481 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
10482 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
10483 assert(VT.getScalarSizeInBits() == 64 && "Unexpected data type for VSHUFPD");
10484 int NumElts = VT.getVectorNumElements();
10485 bool ShufpdMask = true;
10486 bool CommutableMask = true;
10487 unsigned Immediate = 0;
10488 for (int i = 0; i < NumElts; ++i) {
10491 int Val = (i & 6) + NumElts * (i & 1);
10492 int CommutVal = (i & 0xe) + NumElts * ((i & 1)^1);
10493 if (Mask[i] < Val || Mask[i] > Val + 1)
10494 ShufpdMask = false;
10495 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
10496 CommutableMask = false;
10497 Immediate |= (Mask[i] % 2) << i;
10500 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
10501 DAG.getConstant(Immediate, DL, MVT::i8));
10502 if (CommutableMask)
10503 return DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
10504 DAG.getConstant(Immediate, DL, MVT::i8));
10508 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10510 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10511 /// isn't available.
10512 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10513 const X86Subtarget *Subtarget,
10514 SelectionDAG &DAG) {
10516 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10517 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10518 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10519 ArrayRef<int> Mask = SVOp->getMask();
10520 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10522 SmallVector<int, 4> WidenedMask;
10523 if (canWidenShuffleElements(Mask, WidenedMask))
10524 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10527 if (isSingleInputShuffleMask(Mask)) {
10528 // Check for being able to broadcast a single element.
10529 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f64, V1,
10530 Mask, Subtarget, DAG))
10533 // Use low duplicate instructions for masks that match their pattern.
10534 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
10535 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10537 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10538 // Non-half-crossing single input shuffles can be lowerid with an
10539 // interleaved permutation.
10540 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10541 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10542 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10543 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
10546 // With AVX2 we have direct support for this permutation.
10547 if (Subtarget->hasAVX2())
10548 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10549 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
10551 // Otherwise, fall back.
10552 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10556 // Use dedicated unpack instructions for masks that match their pattern.
10558 lowerVectorShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
10561 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10565 // Check if the blend happens to exactly fit that of SHUFPD.
10567 lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
10570 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10571 // shuffle. However, if we have AVX2 and either inputs are already in place,
10572 // we will be able to shuffle even across lanes the other input in a single
10573 // instruction so skip this pattern.
10574 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10575 isShuffleMaskInputInPlace(1, Mask))))
10576 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10577 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10580 // If we have AVX2 then we always want to lower with a blend because an v4 we
10581 // can fully permute the elements.
10582 if (Subtarget->hasAVX2())
10583 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10586 // Otherwise fall back on generic lowering.
10587 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10590 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10592 /// This routine is only called when we have AVX2 and thus a reasonable
10593 /// instruction set for v4i64 shuffling..
10594 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10595 const X86Subtarget *Subtarget,
10596 SelectionDAG &DAG) {
10598 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10599 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10600 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10601 ArrayRef<int> Mask = SVOp->getMask();
10602 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10603 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10605 SmallVector<int, 4> WidenedMask;
10606 if (canWidenShuffleElements(Mask, WidenedMask))
10607 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10610 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10614 // Check for being able to broadcast a single element.
10615 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1,
10616 Mask, Subtarget, DAG))
10619 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10620 // use lower latency instructions that will operate on both 128-bit lanes.
10621 SmallVector<int, 2> RepeatedMask;
10622 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10623 if (isSingleInputShuffleMask(Mask)) {
10624 int PSHUFDMask[] = {-1, -1, -1, -1};
10625 for (int i = 0; i < 2; ++i)
10626 if (RepeatedMask[i] >= 0) {
10627 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10628 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10630 return DAG.getBitcast(
10632 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10633 DAG.getBitcast(MVT::v8i32, V1),
10634 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
10638 // AVX2 provides a direct instruction for permuting a single input across
10640 if (isSingleInputShuffleMask(Mask))
10641 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10642 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
10644 // Try to use shift instructions.
10645 if (SDValue Shift =
10646 lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
10649 // Use dedicated unpack instructions for masks that match their pattern.
10651 lowerVectorShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
10654 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10655 // shuffle. However, if we have AVX2 and either inputs are already in place,
10656 // we will be able to shuffle even across lanes the other input in a single
10657 // instruction so skip this pattern.
10658 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10659 isShuffleMaskInputInPlace(1, Mask))))
10660 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10661 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10664 // Otherwise fall back on generic blend lowering.
10665 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10669 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10671 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10672 /// isn't available.
10673 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10674 const X86Subtarget *Subtarget,
10675 SelectionDAG &DAG) {
10677 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10678 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10679 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10680 ArrayRef<int> Mask = SVOp->getMask();
10681 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10683 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10687 // Check for being able to broadcast a single element.
10688 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1,
10689 Mask, Subtarget, DAG))
10692 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10693 // options to efficiently lower the shuffle.
10694 SmallVector<int, 4> RepeatedMask;
10695 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10696 assert(RepeatedMask.size() == 4 &&
10697 "Repeated masks must be half the mask width!");
10699 // Use even/odd duplicate instructions for masks that match their pattern.
10700 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
10701 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10702 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3, 5, 5, 7, 7}))
10703 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10705 if (isSingleInputShuffleMask(Mask))
10706 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10707 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
10709 // Use dedicated unpack instructions for masks that match their pattern.
10711 lowerVectorShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
10714 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10715 // have already handled any direct blends. We also need to squash the
10716 // repeated mask into a simulated v4f32 mask.
10717 for (int i = 0; i < 4; ++i)
10718 if (RepeatedMask[i] >= 8)
10719 RepeatedMask[i] -= 4;
10720 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10723 // If we have a single input shuffle with different shuffle patterns in the
10724 // two 128-bit lanes use the variable mask to VPERMILPS.
10725 if (isSingleInputShuffleMask(Mask)) {
10726 SDValue VPermMask[8];
10727 for (int i = 0; i < 8; ++i)
10728 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10729 : DAG.getConstant(Mask[i], DL, MVT::i32);
10730 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10731 return DAG.getNode(
10732 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10733 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10735 if (Subtarget->hasAVX2())
10736 return DAG.getNode(
10737 X86ISD::VPERMV, DL, MVT::v8f32,
10738 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10740 // Otherwise, fall back.
10741 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10745 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10747 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10748 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10751 // If we have AVX2 then we always want to lower with a blend because at v8 we
10752 // can fully permute the elements.
10753 if (Subtarget->hasAVX2())
10754 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10757 // Otherwise fall back on generic lowering.
10758 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10761 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10763 /// This routine is only called when we have AVX2 and thus a reasonable
10764 /// instruction set for v8i32 shuffling..
10765 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10766 const X86Subtarget *Subtarget,
10767 SelectionDAG &DAG) {
10769 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10770 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10771 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10772 ArrayRef<int> Mask = SVOp->getMask();
10773 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10774 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10776 // Whenever we can lower this as a zext, that instruction is strictly faster
10777 // than any alternative. It also allows us to fold memory operands into the
10778 // shuffle in many cases.
10779 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10780 Mask, Subtarget, DAG))
10783 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10787 // Check for being able to broadcast a single element.
10788 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1,
10789 Mask, Subtarget, DAG))
10792 // If the shuffle mask is repeated in each 128-bit lane we can use more
10793 // efficient instructions that mirror the shuffles across the two 128-bit
10795 SmallVector<int, 4> RepeatedMask;
10796 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10797 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10798 if (isSingleInputShuffleMask(Mask))
10799 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10800 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
10802 // Use dedicated unpack instructions for masks that match their pattern.
10804 lowerVectorShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
10808 // Try to use shift instructions.
10809 if (SDValue Shift =
10810 lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
10813 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10814 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10817 // If the shuffle patterns aren't repeated but it is a single input, directly
10818 // generate a cross-lane VPERMD instruction.
10819 if (isSingleInputShuffleMask(Mask)) {
10820 SDValue VPermMask[8];
10821 for (int i = 0; i < 8; ++i)
10822 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10823 : DAG.getConstant(Mask[i], DL, MVT::i32);
10824 return DAG.getNode(
10825 X86ISD::VPERMV, DL, MVT::v8i32,
10826 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10829 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10831 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10832 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10835 // Otherwise fall back on generic blend lowering.
10836 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10840 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10842 /// This routine is only called when we have AVX2 and thus a reasonable
10843 /// instruction set for v16i16 shuffling..
10844 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10845 const X86Subtarget *Subtarget,
10846 SelectionDAG &DAG) {
10848 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10849 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10850 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10851 ArrayRef<int> Mask = SVOp->getMask();
10852 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10853 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10855 // Whenever we can lower this as a zext, that instruction is strictly faster
10856 // than any alternative. It also allows us to fold memory operands into the
10857 // shuffle in many cases.
10858 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10859 Mask, Subtarget, DAG))
10862 // Check for being able to broadcast a single element.
10863 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1,
10864 Mask, Subtarget, DAG))
10867 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10871 // Use dedicated unpack instructions for masks that match their pattern.
10873 lowerVectorShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
10876 // Try to use shift instructions.
10877 if (SDValue Shift =
10878 lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
10881 // Try to use byte rotation instructions.
10882 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10883 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10886 if (isSingleInputShuffleMask(Mask)) {
10887 // There are no generalized cross-lane shuffle operations available on i16
10889 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10890 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10893 SmallVector<int, 8> RepeatedMask;
10894 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
10895 // As this is a single-input shuffle, the repeated mask should be
10896 // a strictly valid v8i16 mask that we can pass through to the v8i16
10897 // lowering to handle even the v16 case.
10898 return lowerV8I16GeneralSingleInputVectorShuffle(
10899 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
10902 SDValue PSHUFBMask[32];
10903 for (int i = 0; i < 16; ++i) {
10904 if (Mask[i] == -1) {
10905 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10909 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10910 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10911 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, DL, MVT::i8);
10912 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, DL, MVT::i8);
10914 return DAG.getBitcast(MVT::v16i16,
10915 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8,
10916 DAG.getBitcast(MVT::v32i8, V1),
10917 DAG.getNode(ISD::BUILD_VECTOR, DL,
10918 MVT::v32i8, PSHUFBMask)));
10921 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10923 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10924 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10927 // Otherwise fall back on generic lowering.
10928 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10931 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10933 /// This routine is only called when we have AVX2 and thus a reasonable
10934 /// instruction set for v32i8 shuffling..
10935 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10936 const X86Subtarget *Subtarget,
10937 SelectionDAG &DAG) {
10939 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10940 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10941 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10942 ArrayRef<int> Mask = SVOp->getMask();
10943 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10944 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10946 // Whenever we can lower this as a zext, that instruction is strictly faster
10947 // than any alternative. It also allows us to fold memory operands into the
10948 // shuffle in many cases.
10949 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10950 Mask, Subtarget, DAG))
10953 // Check for being able to broadcast a single element.
10954 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1,
10955 Mask, Subtarget, DAG))
10958 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10962 // Use dedicated unpack instructions for masks that match their pattern.
10964 lowerVectorShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
10967 // Try to use shift instructions.
10968 if (SDValue Shift =
10969 lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
10972 // Try to use byte rotation instructions.
10973 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10974 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
10977 if (isSingleInputShuffleMask(Mask)) {
10978 // There are no generalized cross-lane shuffle operations available on i8
10980 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10981 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10984 SDValue PSHUFBMask[32];
10985 for (int i = 0; i < 32; ++i)
10988 ? DAG.getUNDEF(MVT::i8)
10989 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, DL,
10992 return DAG.getNode(
10993 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
10994 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
10997 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10999 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11000 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11003 // Otherwise fall back on generic lowering.
11004 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11007 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11009 /// This routine either breaks down the specific type of a 256-bit x86 vector
11010 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11011 /// together based on the available instructions.
11012 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11013 MVT VT, const X86Subtarget *Subtarget,
11014 SelectionDAG &DAG) {
11016 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11017 ArrayRef<int> Mask = SVOp->getMask();
11019 // If we have a single input to the zero element, insert that into V1 if we
11020 // can do so cheaply.
11021 int NumElts = VT.getVectorNumElements();
11022 int NumV2Elements = std::count_if(Mask.begin(), Mask.end(), [NumElts](int M) {
11023 return M >= NumElts;
11026 if (NumV2Elements == 1 && Mask[0] >= NumElts)
11027 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
11028 DL, VT, V1, V2, Mask, Subtarget, DAG))
11031 // Handle special cases where the lower or upper half is UNDEF.
11033 lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
11036 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
11037 // can check for those subtargets here and avoid much of the subtarget
11038 // querying in the per-vector-type lowering routines. With AVX1 we have
11039 // essentially *zero* ability to manipulate a 256-bit vector with integer
11040 // types. Since we'll use floating point types there eventually, just
11041 // immediately cast everything to a float and operate entirely in that domain.
11042 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11043 int ElementBits = VT.getScalarSizeInBits();
11044 if (ElementBits < 32)
11045 // No floating point type available, decompose into 128-bit vectors.
11046 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11048 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11049 VT.getVectorNumElements());
11050 V1 = DAG.getBitcast(FpVT, V1);
11051 V2 = DAG.getBitcast(FpVT, V2);
11052 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11055 switch (VT.SimpleTy) {
11057 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11059 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11061 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11063 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11065 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11067 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11070 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11074 /// \brief Try to lower a vector shuffle as a 128-bit shuffles.
11075 static SDValue lowerV4X128VectorShuffle(SDLoc DL, MVT VT,
11076 ArrayRef<int> Mask,
11077 SDValue V1, SDValue V2,
11078 SelectionDAG &DAG) {
11079 assert(VT.getScalarSizeInBits() == 64 &&
11080 "Unexpected element type size for 128bit shuffle.");
11082 // To handle 256 bit vector requires VLX and most probably
11083 // function lowerV2X128VectorShuffle() is better solution.
11084 assert(VT.is512BitVector() && "Unexpected vector size for 128bit shuffle.");
11086 SmallVector<int, 4> WidenedMask;
11087 if (!canWidenShuffleElements(Mask, WidenedMask))
11090 // Form a 128-bit permutation.
11091 // Convert the 64-bit shuffle mask selection values into 128-bit selection
11092 // bits defined by a vshuf64x2 instruction's immediate control byte.
11093 unsigned PermMask = 0, Imm = 0;
11094 unsigned ControlBitsNum = WidenedMask.size() / 2;
11096 for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
11097 if (WidenedMask[i] == SM_SentinelZero)
11100 // Use first element in place of undef mask.
11101 Imm = (WidenedMask[i] == SM_SentinelUndef) ? 0 : WidenedMask[i];
11102 PermMask |= (Imm % WidenedMask.size()) << (i * ControlBitsNum);
11105 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
11106 DAG.getConstant(PermMask, DL, MVT::i8));
11109 static SDValue lowerVectorShuffleWithPERMV(SDLoc DL, MVT VT,
11110 ArrayRef<int> Mask, SDValue V1,
11111 SDValue V2, SelectionDAG &DAG) {
11113 assert(VT.getScalarSizeInBits() >= 16 && "Unexpected data type for PERMV");
11115 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
11116 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
11118 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
11119 if (isSingleInputShuffleMask(Mask))
11120 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
11122 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
11125 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11126 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11127 const X86Subtarget *Subtarget,
11128 SelectionDAG &DAG) {
11130 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11131 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11132 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11133 ArrayRef<int> Mask = SVOp->getMask();
11134 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11136 if (SDValue Shuf128 =
11137 lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, V1, V2, DAG))
11140 if (SDValue Unpck =
11141 lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
11144 return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
11147 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11148 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11149 const X86Subtarget *Subtarget,
11150 SelectionDAG &DAG) {
11152 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11153 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11154 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11155 ArrayRef<int> Mask = SVOp->getMask();
11156 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11158 if (SDValue Unpck =
11159 lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
11162 return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
11165 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11166 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11167 const X86Subtarget *Subtarget,
11168 SelectionDAG &DAG) {
11170 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11171 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11172 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11173 ArrayRef<int> Mask = SVOp->getMask();
11174 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11176 if (SDValue Shuf128 =
11177 lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, V1, V2, DAG))
11180 if (SDValue Unpck =
11181 lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
11184 return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
11187 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11188 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11189 const X86Subtarget *Subtarget,
11190 SelectionDAG &DAG) {
11192 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11193 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11194 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11195 ArrayRef<int> Mask = SVOp->getMask();
11196 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11198 if (SDValue Unpck =
11199 lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
11202 return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
11205 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11206 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11207 const X86Subtarget *Subtarget,
11208 SelectionDAG &DAG) {
11210 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11211 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11212 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11213 ArrayRef<int> Mask = SVOp->getMask();
11214 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11215 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11217 return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
11220 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11221 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11222 const X86Subtarget *Subtarget,
11223 SelectionDAG &DAG) {
11225 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11226 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11227 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11228 ArrayRef<int> Mask = SVOp->getMask();
11229 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11230 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11232 // FIXME: Implement direct support for this type!
11233 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11236 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11238 /// This routine either breaks down the specific type of a 512-bit x86 vector
11239 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11240 /// together based on the available instructions.
11241 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11242 MVT VT, const X86Subtarget *Subtarget,
11243 SelectionDAG &DAG) {
11245 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11246 ArrayRef<int> Mask = SVOp->getMask();
11247 assert(Subtarget->hasAVX512() &&
11248 "Cannot lower 512-bit vectors w/ basic ISA!");
11250 // Check for being able to broadcast a single element.
11251 if (SDValue Broadcast =
11252 lowerVectorShuffleAsBroadcast(DL, VT, V1, Mask, Subtarget, DAG))
11255 // Dispatch to each element type for lowering. If we don't have supprot for
11256 // specific element type shuffles at 512 bits, immediately split them and
11257 // lower them. Each lowering routine of a given type is allowed to assume that
11258 // the requisite ISA extensions for that element type are available.
11259 switch (VT.SimpleTy) {
11261 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11263 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11265 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11267 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11269 if (Subtarget->hasBWI())
11270 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11273 if (Subtarget->hasBWI())
11274 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11278 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11281 // Otherwise fall back on splitting.
11282 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11285 // Lower vXi1 vector shuffles.
11286 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
11287 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
11288 // vector, shuffle and then truncate it back.
11289 static SDValue lower1BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11290 MVT VT, const X86Subtarget *Subtarget,
11291 SelectionDAG &DAG) {
11293 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11294 ArrayRef<int> Mask = SVOp->getMask();
11295 assert(Subtarget->hasAVX512() &&
11296 "Cannot lower 512-bit vectors w/o basic ISA!");
11298 switch (VT.SimpleTy) {
11300 llvm_unreachable("Expected a vector of i1 elements");
11302 ExtVT = MVT::v2i64;
11305 ExtVT = MVT::v4i32;
11308 ExtVT = MVT::v8i64; // Take 512-bit type, more shuffles on KNL
11311 ExtVT = MVT::v16i32;
11314 ExtVT = MVT::v32i16;
11317 ExtVT = MVT::v64i8;
11321 if (ISD::isBuildVectorAllZeros(V1.getNode()))
11322 V1 = getZeroVector(ExtVT, Subtarget, DAG, DL);
11323 else if (ISD::isBuildVectorAllOnes(V1.getNode()))
11324 V1 = getOnesVector(ExtVT, Subtarget, DAG, DL);
11326 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
11329 V2 = DAG.getUNDEF(ExtVT);
11330 else if (ISD::isBuildVectorAllZeros(V2.getNode()))
11331 V2 = getZeroVector(ExtVT, Subtarget, DAG, DL);
11332 else if (ISD::isBuildVectorAllOnes(V2.getNode()))
11333 V2 = getOnesVector(ExtVT, Subtarget, DAG, DL);
11335 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
11336 return DAG.getNode(ISD::TRUNCATE, DL, VT,
11337 DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask));
11339 /// \brief Top-level lowering for x86 vector shuffles.
11341 /// This handles decomposition, canonicalization, and lowering of all x86
11342 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11343 /// above in helper routines. The canonicalization attempts to widen shuffles
11344 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11345 /// s.t. only one of the two inputs needs to be tested, etc.
11346 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11347 SelectionDAG &DAG) {
11348 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11349 ArrayRef<int> Mask = SVOp->getMask();
11350 SDValue V1 = Op.getOperand(0);
11351 SDValue V2 = Op.getOperand(1);
11352 MVT VT = Op.getSimpleValueType();
11353 int NumElements = VT.getVectorNumElements();
11355 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
11357 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
11358 "Can't lower MMX shuffles");
11360 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11361 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11362 if (V1IsUndef && V2IsUndef)
11363 return DAG.getUNDEF(VT);
11365 // When we create a shuffle node we put the UNDEF node to second operand,
11366 // but in some cases the first operand may be transformed to UNDEF.
11367 // In this case we should just commute the node.
11369 return DAG.getCommutedVectorShuffle(*SVOp);
11371 // Check for non-undef masks pointing at an undef vector and make the masks
11372 // undef as well. This makes it easier to match the shuffle based solely on
11376 if (M >= NumElements) {
11377 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11378 for (int &M : NewMask)
11379 if (M >= NumElements)
11381 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11384 // We actually see shuffles that are entirely re-arrangements of a set of
11385 // zero inputs. This mostly happens while decomposing complex shuffles into
11386 // simple ones. Directly lower these as a buildvector of zeros.
11387 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11388 if (Zeroable.all())
11389 return getZeroVector(VT, Subtarget, DAG, dl);
11391 // Try to collapse shuffles into using a vector type with fewer elements but
11392 // wider element types. We cap this to not form integers or floating point
11393 // elements wider than 64 bits, but it might be interesting to form i128
11394 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11395 SmallVector<int, 16> WidenedMask;
11396 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
11397 canWidenShuffleElements(Mask, WidenedMask)) {
11398 MVT NewEltVT = VT.isFloatingPoint()
11399 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11400 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11401 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11402 // Make sure that the new vector type is legal. For example, v2f64 isn't
11404 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11405 V1 = DAG.getBitcast(NewVT, V1);
11406 V2 = DAG.getBitcast(NewVT, V2);
11407 return DAG.getBitcast(
11408 VT, DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11412 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11413 for (int M : SVOp->getMask())
11415 ++NumUndefElements;
11416 else if (M < NumElements)
11421 // Commute the shuffle as needed such that more elements come from V1 than
11422 // V2. This allows us to match the shuffle pattern strictly on how many
11423 // elements come from V1 without handling the symmetric cases.
11424 if (NumV2Elements > NumV1Elements)
11425 return DAG.getCommutedVectorShuffle(*SVOp);
11427 // When the number of V1 and V2 elements are the same, try to minimize the
11428 // number of uses of V2 in the low half of the vector. When that is tied,
11429 // ensure that the sum of indices for V1 is equal to or lower than the sum
11430 // indices for V2. When those are equal, try to ensure that the number of odd
11431 // indices for V1 is lower than the number of odd indices for V2.
11432 if (NumV1Elements == NumV2Elements) {
11433 int LowV1Elements = 0, LowV2Elements = 0;
11434 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11435 if (M >= NumElements)
11439 if (LowV2Elements > LowV1Elements) {
11440 return DAG.getCommutedVectorShuffle(*SVOp);
11441 } else if (LowV2Elements == LowV1Elements) {
11442 int SumV1Indices = 0, SumV2Indices = 0;
11443 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11444 if (SVOp->getMask()[i] >= NumElements)
11446 else if (SVOp->getMask()[i] >= 0)
11448 if (SumV2Indices < SumV1Indices) {
11449 return DAG.getCommutedVectorShuffle(*SVOp);
11450 } else if (SumV2Indices == SumV1Indices) {
11451 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11452 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11453 if (SVOp->getMask()[i] >= NumElements)
11454 NumV2OddIndices += i % 2;
11455 else if (SVOp->getMask()[i] >= 0)
11456 NumV1OddIndices += i % 2;
11457 if (NumV2OddIndices < NumV1OddIndices)
11458 return DAG.getCommutedVectorShuffle(*SVOp);
11463 // For each vector width, delegate to a specialized lowering routine.
11464 if (VT.is128BitVector())
11465 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11467 if (VT.is256BitVector())
11468 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11470 if (VT.is512BitVector())
11471 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11474 return lower1BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11475 llvm_unreachable("Unimplemented!");
11478 // This function assumes its argument is a BUILD_VECTOR of constants or
11479 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
11481 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
11482 unsigned &MaskValue) {
11484 unsigned NumElems = BuildVector->getNumOperands();
11486 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11487 // We don't handle the >2 lanes case right now.
11488 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11492 unsigned NumElemsInLane = NumElems / NumLanes;
11494 // Blend for v16i16 should be symmetric for the both lanes.
11495 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11496 SDValue EltCond = BuildVector->getOperand(i);
11497 SDValue SndLaneEltCond =
11498 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
11500 int Lane1Cond = -1, Lane2Cond = -1;
11501 if (isa<ConstantSDNode>(EltCond))
11502 Lane1Cond = !isNullConstant(EltCond);
11503 if (isa<ConstantSDNode>(SndLaneEltCond))
11504 Lane2Cond = !isNullConstant(SndLaneEltCond);
11506 unsigned LaneMask = 0;
11507 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
11508 // Lane1Cond != 0, means we want the first argument.
11509 // Lane1Cond == 0, means we want the second argument.
11510 // The encoding of this argument is 0 for the first argument, 1
11511 // for the second. Therefore, invert the condition.
11512 LaneMask = !Lane1Cond << i;
11513 else if (Lane1Cond < 0)
11514 LaneMask = !Lane2Cond << i;
11518 MaskValue |= LaneMask;
11520 MaskValue |= LaneMask << NumElemsInLane;
11525 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
11526 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
11527 const X86Subtarget *Subtarget,
11528 SelectionDAG &DAG) {
11529 SDValue Cond = Op.getOperand(0);
11530 SDValue LHS = Op.getOperand(1);
11531 SDValue RHS = Op.getOperand(2);
11533 MVT VT = Op.getSimpleValueType();
11535 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
11537 auto *CondBV = cast<BuildVectorSDNode>(Cond);
11539 // Only non-legal VSELECTs reach this lowering, convert those into generic
11540 // shuffles and re-use the shuffle lowering path for blends.
11541 SmallVector<int, 32> Mask;
11542 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
11543 SDValue CondElt = CondBV->getOperand(i);
11545 isa<ConstantSDNode>(CondElt) ? i + (isNullConstant(CondElt) ? Size : 0)
11548 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
11551 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
11552 // A vselect where all conditions and data are constants can be optimized into
11553 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
11554 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
11555 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
11556 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
11559 // Try to lower this to a blend-style vector shuffle. This can handle all
11560 // constant condition cases.
11561 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
11564 // Variable blends are only legal from SSE4.1 onward.
11565 if (!Subtarget->hasSSE41())
11568 // Only some types will be legal on some subtargets. If we can emit a legal
11569 // VSELECT-matching blend, return Op, and but if we need to expand, return
11571 switch (Op.getSimpleValueType().SimpleTy) {
11573 // Most of the vector types have blends past SSE4.1.
11577 // The byte blends for AVX vectors were introduced only in AVX2.
11578 if (Subtarget->hasAVX2())
11585 // AVX-512 BWI and VLX features support VSELECT with i16 elements.
11586 if (Subtarget->hasBWI() && Subtarget->hasVLX())
11589 // FIXME: We should custom lower this by fixing the condition and using i8
11595 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
11596 MVT VT = Op.getSimpleValueType();
11599 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
11602 if (VT.getSizeInBits() == 8) {
11603 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
11604 Op.getOperand(0), Op.getOperand(1));
11605 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
11606 DAG.getValueType(VT));
11607 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
11610 if (VT.getSizeInBits() == 16) {
11611 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
11612 if (isNullConstant(Op.getOperand(1)))
11613 return DAG.getNode(
11614 ISD::TRUNCATE, dl, MVT::i16,
11615 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
11616 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
11617 Op.getOperand(1)));
11618 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
11619 Op.getOperand(0), Op.getOperand(1));
11620 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
11621 DAG.getValueType(VT));
11622 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
11625 if (VT == MVT::f32) {
11626 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
11627 // the result back to FR32 register. It's only worth matching if the
11628 // result has a single use which is a store or a bitcast to i32. And in
11629 // the case of a store, it's not worth it if the index is a constant 0,
11630 // because a MOVSSmr can be used instead, which is smaller and faster.
11631 if (!Op.hasOneUse())
11633 SDNode *User = *Op.getNode()->use_begin();
11634 if ((User->getOpcode() != ISD::STORE ||
11635 isNullConstant(Op.getOperand(1))) &&
11636 (User->getOpcode() != ISD::BITCAST ||
11637 User->getValueType(0) != MVT::i32))
11639 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
11640 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
11642 return DAG.getBitcast(MVT::f32, Extract);
11645 if (VT == MVT::i32 || VT == MVT::i64) {
11646 // ExtractPS/pextrq works with constant index.
11647 if (isa<ConstantSDNode>(Op.getOperand(1)))
11653 /// Extract one bit from mask vector, like v16i1 or v8i1.
11654 /// AVX-512 feature.
11656 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
11657 SDValue Vec = Op.getOperand(0);
11659 MVT VecVT = Vec.getSimpleValueType();
11660 SDValue Idx = Op.getOperand(1);
11661 MVT EltVT = Op.getSimpleValueType();
11663 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
11664 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
11665 "Unexpected vector type in ExtractBitFromMaskVector");
11667 // variable index can't be handled in mask registers,
11668 // extend vector to VR512
11669 if (!isa<ConstantSDNode>(Idx)) {
11670 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
11671 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
11672 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
11673 ExtVT.getVectorElementType(), Ext, Idx);
11674 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
11677 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
11678 const TargetRegisterClass* rc = getRegClassFor(VecVT);
11679 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
11680 rc = getRegClassFor(MVT::v16i1);
11681 unsigned MaxSift = rc->getSize()*8 - 1;
11682 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
11683 DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
11684 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
11685 DAG.getConstant(MaxSift, dl, MVT::i8));
11686 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
11687 DAG.getIntPtrConstant(0, dl));
11691 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
11692 SelectionDAG &DAG) const {
11694 SDValue Vec = Op.getOperand(0);
11695 MVT VecVT = Vec.getSimpleValueType();
11696 SDValue Idx = Op.getOperand(1);
11698 if (Op.getSimpleValueType() == MVT::i1)
11699 return ExtractBitFromMaskVector(Op, DAG);
11701 if (!isa<ConstantSDNode>(Idx)) {
11702 if (VecVT.is512BitVector() ||
11703 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
11704 VecVT.getVectorElementType().getSizeInBits() == 32)) {
11707 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
11708 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
11709 MaskEltVT.getSizeInBits());
11711 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
11712 auto PtrVT = getPointerTy(DAG.getDataLayout());
11713 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
11714 getZeroVector(MaskVT, Subtarget, DAG, dl), Idx,
11715 DAG.getConstant(0, dl, PtrVT));
11716 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
11717 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Perm,
11718 DAG.getConstant(0, dl, PtrVT));
11723 // If this is a 256-bit vector result, first extract the 128-bit vector and
11724 // then extract the element from the 128-bit vector.
11725 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
11727 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
11728 // Get the 128-bit vector.
11729 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
11730 MVT EltVT = VecVT.getVectorElementType();
11732 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
11733 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
11735 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
11736 // this can be done with a mask.
11737 IdxVal &= ElemsPerChunk - 1;
11738 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
11739 DAG.getConstant(IdxVal, dl, MVT::i32));
11742 assert(VecVT.is128BitVector() && "Unexpected vector length");
11744 if (Subtarget->hasSSE41())
11745 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
11748 MVT VT = Op.getSimpleValueType();
11749 // TODO: handle v16i8.
11750 if (VT.getSizeInBits() == 16) {
11751 SDValue Vec = Op.getOperand(0);
11752 if (isNullConstant(Op.getOperand(1)))
11753 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
11754 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
11755 DAG.getBitcast(MVT::v4i32, Vec),
11756 Op.getOperand(1)));
11757 // Transform it so it match pextrw which produces a 32-bit result.
11758 MVT EltVT = MVT::i32;
11759 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
11760 Op.getOperand(0), Op.getOperand(1));
11761 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
11762 DAG.getValueType(VT));
11763 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
11766 if (VT.getSizeInBits() == 32) {
11767 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
11771 // SHUFPS the element to the lowest double word, then movss.
11772 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
11773 MVT VVT = Op.getOperand(0).getSimpleValueType();
11774 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
11775 DAG.getUNDEF(VVT), Mask);
11776 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
11777 DAG.getIntPtrConstant(0, dl));
11780 if (VT.getSizeInBits() == 64) {
11781 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
11782 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
11783 // to match extract_elt for f64.
11784 if (isNullConstant(Op.getOperand(1)))
11787 // UNPCKHPD the element to the lowest double word, then movsd.
11788 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
11789 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
11790 int Mask[2] = { 1, -1 };
11791 MVT VVT = Op.getOperand(0).getSimpleValueType();
11792 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
11793 DAG.getUNDEF(VVT), Mask);
11794 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
11795 DAG.getIntPtrConstant(0, dl));
11801 /// Insert one bit to mask vector, like v16i1 or v8i1.
11802 /// AVX-512 feature.
11804 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
11806 SDValue Vec = Op.getOperand(0);
11807 SDValue Elt = Op.getOperand(1);
11808 SDValue Idx = Op.getOperand(2);
11809 MVT VecVT = Vec.getSimpleValueType();
11811 if (!isa<ConstantSDNode>(Idx)) {
11812 // Non constant index. Extend source and destination,
11813 // insert element and then truncate the result.
11814 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
11815 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
11816 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
11817 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
11818 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
11819 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
11822 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
11823 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
11825 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
11826 DAG.getConstant(IdxVal, dl, MVT::i8));
11827 if (Vec.getOpcode() == ISD::UNDEF)
11829 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
11832 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
11833 SelectionDAG &DAG) const {
11834 MVT VT = Op.getSimpleValueType();
11835 MVT EltVT = VT.getVectorElementType();
11837 if (EltVT == MVT::i1)
11838 return InsertBitToMaskVector(Op, DAG);
11841 SDValue N0 = Op.getOperand(0);
11842 SDValue N1 = Op.getOperand(1);
11843 SDValue N2 = Op.getOperand(2);
11844 if (!isa<ConstantSDNode>(N2))
11846 auto *N2C = cast<ConstantSDNode>(N2);
11847 unsigned IdxVal = N2C->getZExtValue();
11849 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
11850 // into that, and then insert the subvector back into the result.
11851 if (VT.is256BitVector() || VT.is512BitVector()) {
11852 // With a 256-bit vector, we can insert into the zero element efficiently
11853 // using a blend if we have AVX or AVX2 and the right data type.
11854 if (VT.is256BitVector() && IdxVal == 0) {
11855 // TODO: It is worthwhile to cast integer to floating point and back
11856 // and incur a domain crossing penalty if that's what we'll end up
11857 // doing anyway after extracting to a 128-bit vector.
11858 if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
11859 (Subtarget->hasAVX2() && EltVT == MVT::i32)) {
11860 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
11861 N2 = DAG.getIntPtrConstant(1, dl);
11862 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
11866 // Get the desired 128-bit vector chunk.
11867 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
11869 // Insert the element into the desired chunk.
11870 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
11871 assert(isPowerOf2_32(NumEltsIn128));
11872 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
11873 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
11875 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
11876 DAG.getConstant(IdxIn128, dl, MVT::i32));
11878 // Insert the changed part back into the bigger vector
11879 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
11881 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
11883 if (Subtarget->hasSSE41()) {
11884 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
11886 if (VT == MVT::v8i16) {
11887 Opc = X86ISD::PINSRW;
11889 assert(VT == MVT::v16i8);
11890 Opc = X86ISD::PINSRB;
11893 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
11895 if (N1.getValueType() != MVT::i32)
11896 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
11897 if (N2.getValueType() != MVT::i32)
11898 N2 = DAG.getIntPtrConstant(IdxVal, dl);
11899 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
11902 if (EltVT == MVT::f32) {
11903 // Bits [7:6] of the constant are the source select. This will always be
11904 // zero here. The DAG Combiner may combine an extract_elt index into
11905 // these bits. For example (insert (extract, 3), 2) could be matched by
11906 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
11907 // Bits [5:4] of the constant are the destination select. This is the
11908 // value of the incoming immediate.
11909 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
11910 // combine either bitwise AND or insert of float 0.0 to set these bits.
11912 bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
11913 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
11914 // If this is an insertion of 32-bits into the low 32-bits of
11915 // a vector, we prefer to generate a blend with immediate rather
11916 // than an insertps. Blends are simpler operations in hardware and so
11917 // will always have equal or better performance than insertps.
11918 // But if optimizing for size and there's a load folding opportunity,
11919 // generate insertps because blendps does not have a 32-bit memory
11921 N2 = DAG.getIntPtrConstant(1, dl);
11922 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
11923 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
11925 N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
11926 // Create this as a scalar to vector..
11927 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
11928 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
11931 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
11932 // PINSR* works with constant index.
11937 if (EltVT == MVT::i8)
11940 if (EltVT.getSizeInBits() == 16) {
11941 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
11942 // as its second argument.
11943 if (N1.getValueType() != MVT::i32)
11944 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
11945 if (N2.getValueType() != MVT::i32)
11946 N2 = DAG.getIntPtrConstant(IdxVal, dl);
11947 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
11952 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
11954 MVT OpVT = Op.getSimpleValueType();
11956 // If this is a 256-bit vector result, first insert into a 128-bit
11957 // vector and then insert into the 256-bit vector.
11958 if (!OpVT.is128BitVector()) {
11959 // Insert into a 128-bit vector.
11960 unsigned SizeFactor = OpVT.getSizeInBits()/128;
11961 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
11962 OpVT.getVectorNumElements() / SizeFactor);
11964 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
11966 // Insert the 128-bit vector.
11967 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
11970 if (OpVT == MVT::v1i64 &&
11971 Op.getOperand(0).getValueType() == MVT::i64)
11972 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
11974 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
11975 assert(OpVT.is128BitVector() && "Expected an SSE type!");
11976 return DAG.getBitcast(
11977 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
11980 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
11981 // a simple subregister reference or explicit instructions to grab
11982 // upper bits of a vector.
11983 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
11984 SelectionDAG &DAG) {
11986 SDValue In = Op.getOperand(0);
11987 SDValue Idx = Op.getOperand(1);
11988 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
11989 MVT ResVT = Op.getSimpleValueType();
11990 MVT InVT = In.getSimpleValueType();
11992 if (Subtarget->hasFp256()) {
11993 if (ResVT.is128BitVector() &&
11994 (InVT.is256BitVector() || InVT.is512BitVector()) &&
11995 isa<ConstantSDNode>(Idx)) {
11996 return Extract128BitVector(In, IdxVal, DAG, dl);
11998 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
11999 isa<ConstantSDNode>(Idx)) {
12000 return Extract256BitVector(In, IdxVal, DAG, dl);
12006 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
12007 // simple superregister reference or explicit instructions to insert
12008 // the upper bits of a vector.
12009 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
12010 SelectionDAG &DAG) {
12011 if (!Subtarget->hasAVX())
12015 SDValue Vec = Op.getOperand(0);
12016 SDValue SubVec = Op.getOperand(1);
12017 SDValue Idx = Op.getOperand(2);
12019 if (!isa<ConstantSDNode>(Idx))
12022 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12023 MVT OpVT = Op.getSimpleValueType();
12024 MVT SubVecVT = SubVec.getSimpleValueType();
12026 // Fold two 16-byte subvector loads into one 32-byte load:
12027 // (insert_subvector (insert_subvector undef, (load addr), 0),
12028 // (load addr + 16), Elts/2)
12030 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
12031 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
12032 OpVT.is256BitVector() && SubVecVT.is128BitVector()) {
12033 auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2));
12034 if (Idx2 && Idx2->getZExtValue() == 0) {
12035 SDValue SubVec2 = Vec.getOperand(1);
12036 // If needed, look through a bitcast to get to the load.
12037 if (SubVec2.getNode() && SubVec2.getOpcode() == ISD::BITCAST)
12038 SubVec2 = SubVec2.getOperand(0);
12040 if (auto *FirstLd = dyn_cast<LoadSDNode>(SubVec2)) {
12042 unsigned Alignment = FirstLd->getAlignment();
12043 unsigned AS = FirstLd->getAddressSpace();
12044 const X86TargetLowering *TLI = Subtarget->getTargetLowering();
12045 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
12046 OpVT, AS, Alignment, &Fast) && Fast) {
12047 SDValue Ops[] = { SubVec2, SubVec };
12048 if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false))
12055 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
12056 SubVecVT.is128BitVector())
12057 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
12059 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
12060 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
12062 if (OpVT.getVectorElementType() == MVT::i1)
12063 return Insert1BitVector(Op, DAG);
12068 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
12069 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
12070 // one of the above mentioned nodes. It has to be wrapped because otherwise
12071 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
12072 // be used to form addressing mode. These wrapped nodes will be selected
12075 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
12076 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
12078 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
12079 // global base reg.
12080 unsigned char OpFlag = 0;
12081 unsigned WrapperKind = X86ISD::Wrapper;
12082 CodeModel::Model M = DAG.getTarget().getCodeModel();
12084 if (Subtarget->isPICStyleRIPRel() &&
12085 (M == CodeModel::Small || M == CodeModel::Kernel))
12086 WrapperKind = X86ISD::WrapperRIP;
12087 else if (Subtarget->isPICStyleGOT())
12088 OpFlag = X86II::MO_GOTOFF;
12089 else if (Subtarget->isPICStyleStubPIC())
12090 OpFlag = X86II::MO_PIC_BASE_OFFSET;
12092 auto PtrVT = getPointerTy(DAG.getDataLayout());
12093 SDValue Result = DAG.getTargetConstantPool(
12094 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
12096 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
12097 // With PIC, the address is actually $g + Offset.
12100 DAG.getNode(ISD::ADD, DL, PtrVT,
12101 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
12107 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
12108 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
12110 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
12111 // global base reg.
12112 unsigned char OpFlag = 0;
12113 unsigned WrapperKind = X86ISD::Wrapper;
12114 CodeModel::Model M = DAG.getTarget().getCodeModel();
12116 if (Subtarget->isPICStyleRIPRel() &&
12117 (M == CodeModel::Small || M == CodeModel::Kernel))
12118 WrapperKind = X86ISD::WrapperRIP;
12119 else if (Subtarget->isPICStyleGOT())
12120 OpFlag = X86II::MO_GOTOFF;
12121 else if (Subtarget->isPICStyleStubPIC())
12122 OpFlag = X86II::MO_PIC_BASE_OFFSET;
12124 auto PtrVT = getPointerTy(DAG.getDataLayout());
12125 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
12127 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
12129 // With PIC, the address is actually $g + Offset.
12132 DAG.getNode(ISD::ADD, DL, PtrVT,
12133 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
12139 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
12140 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
12142 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
12143 // global base reg.
12144 unsigned char OpFlag = 0;
12145 unsigned WrapperKind = X86ISD::Wrapper;
12146 CodeModel::Model M = DAG.getTarget().getCodeModel();
12148 if (Subtarget->isPICStyleRIPRel() &&
12149 (M == CodeModel::Small || M == CodeModel::Kernel)) {
12150 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
12151 OpFlag = X86II::MO_GOTPCREL;
12152 WrapperKind = X86ISD::WrapperRIP;
12153 } else if (Subtarget->isPICStyleGOT()) {
12154 OpFlag = X86II::MO_GOT;
12155 } else if (Subtarget->isPICStyleStubPIC()) {
12156 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
12157 } else if (Subtarget->isPICStyleStubNoDynamic()) {
12158 OpFlag = X86II::MO_DARWIN_NONLAZY;
12161 auto PtrVT = getPointerTy(DAG.getDataLayout());
12162 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
12165 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
12167 // With PIC, the address is actually $g + Offset.
12168 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
12169 !Subtarget->is64Bit()) {
12171 DAG.getNode(ISD::ADD, DL, PtrVT,
12172 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
12175 // For symbols that require a load from a stub to get the address, emit the
12177 if (isGlobalStubReference(OpFlag))
12178 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
12179 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
12180 false, false, false, 0);
12186 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
12187 // Create the TargetBlockAddressAddress node.
12188 unsigned char OpFlags =
12189 Subtarget->ClassifyBlockAddressReference();
12190 CodeModel::Model M = DAG.getTarget().getCodeModel();
12191 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
12192 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
12194 auto PtrVT = getPointerTy(DAG.getDataLayout());
12195 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
12197 if (Subtarget->isPICStyleRIPRel() &&
12198 (M == CodeModel::Small || M == CodeModel::Kernel))
12199 Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
12201 Result = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, Result);
12203 // With PIC, the address is actually $g + Offset.
12204 if (isGlobalRelativeToPICBase(OpFlags)) {
12205 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
12206 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
12213 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
12214 int64_t Offset, SelectionDAG &DAG) const {
12215 // Create the TargetGlobalAddress node, folding in the constant
12216 // offset if it is legal.
12217 unsigned char OpFlags =
12218 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
12219 CodeModel::Model M = DAG.getTarget().getCodeModel();
12220 auto PtrVT = getPointerTy(DAG.getDataLayout());
12222 if (OpFlags == X86II::MO_NO_FLAG &&
12223 X86::isOffsetSuitableForCodeModel(Offset, M)) {
12224 // A direct static reference to a global.
12225 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
12228 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags);
12231 if (Subtarget->isPICStyleRIPRel() &&
12232 (M == CodeModel::Small || M == CodeModel::Kernel))
12233 Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
12235 Result = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, Result);
12237 // With PIC, the address is actually $g + Offset.
12238 if (isGlobalRelativeToPICBase(OpFlags)) {
12239 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
12240 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
12243 // For globals that require a load from a stub to get the address, emit the
12245 if (isGlobalStubReference(OpFlags))
12246 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
12247 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
12248 false, false, false, 0);
12250 // If there was a non-zero offset that we didn't fold, create an explicit
12251 // addition for it.
12253 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
12254 DAG.getConstant(Offset, dl, PtrVT));
12260 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
12261 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
12262 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
12263 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
12267 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
12268 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
12269 unsigned char OperandFlags, bool LocalDynamic = false) {
12270 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
12271 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
12273 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
12274 GA->getValueType(0),
12278 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
12282 SDValue Ops[] = { Chain, TGA, *InFlag };
12283 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
12285 SDValue Ops[] = { Chain, TGA };
12286 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
12289 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
12290 MFI->setAdjustsStack(true);
12291 MFI->setHasCalls(true);
12293 SDValue Flag = Chain.getValue(1);
12294 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
12297 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
12299 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
12302 SDLoc dl(GA); // ? function entry point might be better
12303 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
12304 DAG.getNode(X86ISD::GlobalBaseReg,
12305 SDLoc(), PtrVT), InFlag);
12306 InFlag = Chain.getValue(1);
12308 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
12311 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
12313 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
12315 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
12316 X86::RAX, X86II::MO_TLSGD);
12319 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
12325 // Get the start address of the TLS block for this module.
12326 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
12327 .getInfo<X86MachineFunctionInfo>();
12328 MFI->incNumLocalDynamicTLSAccesses();
12332 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
12333 X86II::MO_TLSLD, /*LocalDynamic=*/true);
12336 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
12337 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
12338 InFlag = Chain.getValue(1);
12339 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
12340 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
12343 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
12347 unsigned char OperandFlags = X86II::MO_DTPOFF;
12348 unsigned WrapperKind = X86ISD::Wrapper;
12349 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
12350 GA->getValueType(0),
12351 GA->getOffset(), OperandFlags);
12352 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
12354 // Add x@dtpoff with the base.
12355 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
12358 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
12359 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
12360 const EVT PtrVT, TLSModel::Model model,
12361 bool is64Bit, bool isPIC) {
12364 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
12365 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
12366 is64Bit ? 257 : 256));
12368 SDValue ThreadPointer =
12369 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
12370 MachinePointerInfo(Ptr), false, false, false, 0);
12372 unsigned char OperandFlags = 0;
12373 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
12375 unsigned WrapperKind = X86ISD::Wrapper;
12376 if (model == TLSModel::LocalExec) {
12377 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
12378 } else if (model == TLSModel::InitialExec) {
12380 OperandFlags = X86II::MO_GOTTPOFF;
12381 WrapperKind = X86ISD::WrapperRIP;
12383 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
12386 llvm_unreachable("Unexpected model");
12389 // emit "addl x@ntpoff,%eax" (local exec)
12390 // or "addl x@indntpoff,%eax" (initial exec)
12391 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
12393 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
12394 GA->getOffset(), OperandFlags);
12395 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
12397 if (model == TLSModel::InitialExec) {
12398 if (isPIC && !is64Bit) {
12399 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
12400 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
12404 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
12405 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
12406 false, false, false, 0);
12409 // The address of the thread local variable is the add of the thread
12410 // pointer with the offset of the variable.
12411 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
12415 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
12417 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
12419 // Cygwin uses emutls.
12420 // FIXME: It may be EmulatedTLS-generic also for X86-Android.
12421 if (Subtarget->isTargetWindowsCygwin())
12422 return LowerToTLSEmulatedModel(GA, DAG);
12424 const GlobalValue *GV = GA->getGlobal();
12425 auto PtrVT = getPointerTy(DAG.getDataLayout());
12427 if (Subtarget->isTargetELF()) {
12428 if (DAG.getTarget().Options.EmulatedTLS)
12429 return LowerToTLSEmulatedModel(GA, DAG);
12430 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
12432 case TLSModel::GeneralDynamic:
12433 if (Subtarget->is64Bit())
12434 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
12435 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
12436 case TLSModel::LocalDynamic:
12437 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
12438 Subtarget->is64Bit());
12439 case TLSModel::InitialExec:
12440 case TLSModel::LocalExec:
12441 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget->is64Bit(),
12442 DAG.getTarget().getRelocationModel() ==
12445 llvm_unreachable("Unknown TLS model.");
12448 if (Subtarget->isTargetDarwin()) {
12449 // Darwin only has one model of TLS. Lower to that.
12450 unsigned char OpFlag = 0;
12451 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
12452 X86ISD::WrapperRIP : X86ISD::Wrapper;
12454 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
12455 // global base reg.
12456 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
12457 !Subtarget->is64Bit();
12459 OpFlag = X86II::MO_TLVP_PIC_BASE;
12461 OpFlag = X86II::MO_TLVP;
12463 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
12464 GA->getValueType(0),
12465 GA->getOffset(), OpFlag);
12466 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
12468 // With PIC32, the address is actually $g + Offset.
12470 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
12471 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
12474 // Lowering the machine isd will make sure everything is in the right
12476 SDValue Chain = DAG.getEntryNode();
12477 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
12478 SDValue Args[] = { Chain, Offset };
12479 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
12481 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
12482 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
12483 MFI->setAdjustsStack(true);
12485 // And our return value (tls address) is in the standard call return value
12487 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
12488 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
12491 if (Subtarget->isTargetKnownWindowsMSVC() ||
12492 Subtarget->isTargetWindowsGNU()) {
12493 // Just use the implicit TLS architecture
12494 // Need to generate someting similar to:
12495 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
12497 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
12498 // mov rcx, qword [rdx+rcx*8]
12499 // mov eax, .tls$:tlsvar
12500 // [rax+rcx] contains the address
12501 // Windows 64bit: gs:0x58
12502 // Windows 32bit: fs:__tls_array
12505 SDValue Chain = DAG.getEntryNode();
12507 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
12508 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
12509 // use its literal value of 0x2C.
12510 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
12511 ? Type::getInt8PtrTy(*DAG.getContext(),
12513 : Type::getInt32PtrTy(*DAG.getContext(),
12516 SDValue TlsArray = Subtarget->is64Bit()
12517 ? DAG.getIntPtrConstant(0x58, dl)
12518 : (Subtarget->isTargetWindowsGNU()
12519 ? DAG.getIntPtrConstant(0x2C, dl)
12520 : DAG.getExternalSymbol("_tls_array", PtrVT));
12522 SDValue ThreadPointer =
12523 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr), false,
12527 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
12528 res = ThreadPointer;
12530 // Load the _tls_index variable
12531 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
12532 if (Subtarget->is64Bit())
12533 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
12534 MachinePointerInfo(), MVT::i32, false, false,
12537 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo(), false,
12540 auto &DL = DAG.getDataLayout();
12542 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, PtrVT);
12543 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
12545 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
12548 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo(), false, false,
12551 // Get the offset of start of .tls section
12552 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
12553 GA->getValueType(0),
12554 GA->getOffset(), X86II::MO_SECREL);
12555 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
12557 // The address of the thread local variable is the add of the thread
12558 // pointer with the offset of the variable.
12559 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
12562 llvm_unreachable("TLS not implemented for this target.");
12565 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
12566 /// and take a 2 x i32 value to shift plus a shift amount.
12567 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
12568 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
12569 MVT VT = Op.getSimpleValueType();
12570 unsigned VTBits = VT.getSizeInBits();
12572 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
12573 SDValue ShOpLo = Op.getOperand(0);
12574 SDValue ShOpHi = Op.getOperand(1);
12575 SDValue ShAmt = Op.getOperand(2);
12576 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
12577 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
12579 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
12580 DAG.getConstant(VTBits - 1, dl, MVT::i8));
12581 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
12582 DAG.getConstant(VTBits - 1, dl, MVT::i8))
12583 : DAG.getConstant(0, dl, VT);
12585 SDValue Tmp2, Tmp3;
12586 if (Op.getOpcode() == ISD::SHL_PARTS) {
12587 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
12588 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
12590 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
12591 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
12594 // If the shift amount is larger or equal than the width of a part we can't
12595 // rely on the results of shld/shrd. Insert a test and select the appropriate
12596 // values for large shift amounts.
12597 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
12598 DAG.getConstant(VTBits, dl, MVT::i8));
12599 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
12600 AndNode, DAG.getConstant(0, dl, MVT::i8));
12603 SDValue CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
12604 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
12605 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
12607 if (Op.getOpcode() == ISD::SHL_PARTS) {
12608 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
12609 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
12611 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
12612 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
12615 SDValue Ops[2] = { Lo, Hi };
12616 return DAG.getMergeValues(Ops, dl);
12619 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
12620 SelectionDAG &DAG) const {
12621 SDValue Src = Op.getOperand(0);
12622 MVT SrcVT = Src.getSimpleValueType();
12623 MVT VT = Op.getSimpleValueType();
12626 if (SrcVT.isVector()) {
12627 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
12628 return DAG.getNode(X86ISD::CVTDQ2PD, dl, VT,
12629 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
12630 DAG.getUNDEF(SrcVT)));
12632 if (SrcVT.getVectorElementType() == MVT::i1) {
12633 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
12634 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
12635 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT, Src));
12640 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
12641 "Unknown SINT_TO_FP to lower!");
12643 // These are really Legal; return the operand so the caller accepts it as
12645 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
12647 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
12648 Subtarget->is64Bit()) {
12652 unsigned Size = SrcVT.getSizeInBits()/8;
12653 MachineFunction &MF = DAG.getMachineFunction();
12654 auto PtrVT = getPointerTy(MF.getDataLayout());
12655 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
12656 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
12657 SDValue Chain = DAG.getStore(
12658 DAG.getEntryNode(), dl, Op.getOperand(0), StackSlot,
12659 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI), false,
12661 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
12664 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
12666 SelectionDAG &DAG) const {
12670 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
12672 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
12674 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
12676 unsigned ByteSize = SrcVT.getSizeInBits()/8;
12678 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
12679 MachineMemOperand *MMO;
12681 int SSFI = FI->getIndex();
12682 MMO = DAG.getMachineFunction().getMachineMemOperand(
12683 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
12684 MachineMemOperand::MOLoad, ByteSize, ByteSize);
12686 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
12687 StackSlot = StackSlot.getOperand(1);
12689 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
12690 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
12692 Tys, Ops, SrcVT, MMO);
12695 Chain = Result.getValue(1);
12696 SDValue InFlag = Result.getValue(2);
12698 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
12699 // shouldn't be necessary except that RFP cannot be live across
12700 // multiple blocks. When stackifier is fixed, they can be uncoupled.
12701 MachineFunction &MF = DAG.getMachineFunction();
12702 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
12703 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
12704 auto PtrVT = getPointerTy(MF.getDataLayout());
12705 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
12706 Tys = DAG.getVTList(MVT::Other);
12708 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
12710 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
12711 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
12712 MachineMemOperand::MOStore, SSFISize, SSFISize);
12714 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
12715 Ops, Op.getValueType(), MMO);
12716 Result = DAG.getLoad(
12717 Op.getValueType(), DL, Chain, StackSlot,
12718 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
12719 false, false, false, 0);
12725 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
12726 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
12727 SelectionDAG &DAG) const {
12728 // This algorithm is not obvious. Here it is what we're trying to output:
12731 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
12732 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
12734 haddpd %xmm0, %xmm0
12736 pshufd $0x4e, %xmm0, %xmm1
12742 LLVMContext *Context = DAG.getContext();
12744 // Build some magic constants.
12745 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
12746 Constant *C0 = ConstantDataVector::get(*Context, CV0);
12747 auto PtrVT = getPointerTy(DAG.getDataLayout());
12748 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
12750 SmallVector<Constant*,2> CV1;
12752 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
12753 APInt(64, 0x4330000000000000ULL))));
12755 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
12756 APInt(64, 0x4530000000000000ULL))));
12757 Constant *C1 = ConstantVector::get(CV1);
12758 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
12760 // Load the 64-bit value into an XMM register.
12761 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
12764 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
12765 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
12766 false, false, false, 16);
12768 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
12771 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
12772 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
12773 false, false, false, 16);
12774 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
12775 // TODO: Are there any fast-math-flags to propagate here?
12776 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
12779 if (Subtarget->hasSSE3()) {
12780 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
12781 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
12783 SDValue S2F = DAG.getBitcast(MVT::v4i32, Sub);
12784 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
12786 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
12787 DAG.getBitcast(MVT::v2f64, Shuffle), Sub);
12790 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
12791 DAG.getIntPtrConstant(0, dl));
12794 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
12795 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
12796 SelectionDAG &DAG) const {
12798 // FP constant to bias correct the final result.
12799 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
12802 // Load the 32-bit value into an XMM register.
12803 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
12806 // Zero out the upper parts of the register.
12807 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
12809 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
12810 DAG.getBitcast(MVT::v2f64, Load),
12811 DAG.getIntPtrConstant(0, dl));
12813 // Or the load with the bias.
12814 SDValue Or = DAG.getNode(
12815 ISD::OR, dl, MVT::v2i64,
12816 DAG.getBitcast(MVT::v2i64,
12817 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
12818 DAG.getBitcast(MVT::v2i64,
12819 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
12821 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
12822 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
12824 // Subtract the bias.
12825 // TODO: Are there any fast-math-flags to propagate here?
12826 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
12828 // Handle final rounding.
12829 MVT DestVT = Op.getSimpleValueType();
12831 if (DestVT.bitsLT(MVT::f64))
12832 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
12833 DAG.getIntPtrConstant(0, dl));
12834 if (DestVT.bitsGT(MVT::f64))
12835 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
12837 // Handle final rounding.
12841 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
12842 const X86Subtarget &Subtarget) {
12843 // The algorithm is the following:
12844 // #ifdef __SSE4_1__
12845 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
12846 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
12847 // (uint4) 0x53000000, 0xaa);
12849 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
12850 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
12852 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
12853 // return (float4) lo + fhi;
12855 // We shouldn't use it when unsafe-fp-math is enabled though: we might later
12856 // reassociate the two FADDs, and if we do that, the algorithm fails
12857 // spectacularly (PR24512).
12858 // FIXME: If we ever have some kind of Machine FMF, this should be marked
12859 // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
12860 // there's also the MachineCombiner reassociations happening on Machine IR.
12861 if (DAG.getTarget().Options.UnsafeFPMath)
12865 SDValue V = Op->getOperand(0);
12866 MVT VecIntVT = V.getSimpleValueType();
12867 bool Is128 = VecIntVT == MVT::v4i32;
12868 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
12869 // If we convert to something else than the supported type, e.g., to v4f64,
12871 if (VecFloatVT != Op->getSimpleValueType(0))
12874 unsigned NumElts = VecIntVT.getVectorNumElements();
12875 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
12876 "Unsupported custom type");
12877 assert(NumElts <= 8 && "The size of the constant array must be fixed");
12879 // In the #idef/#else code, we have in common:
12880 // - The vector of constants:
12886 // Create the splat vector for 0x4b000000.
12887 SDValue CstLow = DAG.getConstant(0x4b000000, DL, MVT::i32);
12888 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
12889 CstLow, CstLow, CstLow, CstLow};
12890 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
12891 makeArrayRef(&CstLowArray[0], NumElts));
12892 // Create the splat vector for 0x53000000.
12893 SDValue CstHigh = DAG.getConstant(0x53000000, DL, MVT::i32);
12894 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
12895 CstHigh, CstHigh, CstHigh, CstHigh};
12896 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
12897 makeArrayRef(&CstHighArray[0], NumElts));
12899 // Create the right shift.
12900 SDValue CstShift = DAG.getConstant(16, DL, MVT::i32);
12901 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
12902 CstShift, CstShift, CstShift, CstShift};
12903 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
12904 makeArrayRef(&CstShiftArray[0], NumElts));
12905 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
12908 if (Subtarget.hasSSE41()) {
12909 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
12910 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
12911 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
12912 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
12913 // Low will be bitcasted right away, so do not bother bitcasting back to its
12915 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
12916 VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
12917 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
12918 // (uint4) 0x53000000, 0xaa);
12919 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
12920 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
12921 // High will be bitcasted right away, so do not bother bitcasting back to
12922 // its original type.
12923 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
12924 VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
12926 SDValue CstMask = DAG.getConstant(0xffff, DL, MVT::i32);
12927 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
12928 CstMask, CstMask, CstMask);
12929 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
12930 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
12931 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
12933 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
12934 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
12937 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
12938 SDValue CstFAdd = DAG.getConstantFP(
12939 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, MVT::f32);
12940 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
12941 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
12942 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
12943 makeArrayRef(&CstFAddArray[0], NumElts));
12945 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
12946 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
12947 // TODO: Are there any fast-math-flags to propagate here?
12949 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
12950 // return (float4) lo + fhi;
12951 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
12952 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
12955 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
12956 SelectionDAG &DAG) const {
12957 SDValue N0 = Op.getOperand(0);
12958 MVT SVT = N0.getSimpleValueType();
12961 switch (SVT.SimpleTy) {
12963 llvm_unreachable("Custom UINT_TO_FP is not supported!");
12968 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
12969 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
12970 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
12974 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
12977 assert(Subtarget->hasAVX512());
12978 return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
12979 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, N0));
12983 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
12984 SelectionDAG &DAG) const {
12985 SDValue N0 = Op.getOperand(0);
12987 auto PtrVT = getPointerTy(DAG.getDataLayout());
12989 if (Op.getSimpleValueType().isVector())
12990 return lowerUINT_TO_FP_vec(Op, DAG);
12992 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
12993 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
12994 // the optimization here.
12995 if (DAG.SignBitIsZero(N0))
12996 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
12998 MVT SrcVT = N0.getSimpleValueType();
12999 MVT DstVT = Op.getSimpleValueType();
13001 if (Subtarget->hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
13002 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget->is64Bit()))) {
13003 // Conversions from unsigned i32 to f32/f64 are legal,
13004 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
13008 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
13009 return LowerUINT_TO_FP_i64(Op, DAG);
13010 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
13011 return LowerUINT_TO_FP_i32(Op, DAG);
13012 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
13015 // Make a 64-bit buffer, and use it to build an FILD.
13016 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
13017 if (SrcVT == MVT::i32) {
13018 SDValue WordOff = DAG.getConstant(4, dl, PtrVT);
13019 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, WordOff);
13020 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13021 StackSlot, MachinePointerInfo(),
13023 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
13024 OffsetSlot, MachinePointerInfo(),
13026 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
13030 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
13031 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13032 StackSlot, MachinePointerInfo(),
13034 // For i64 source, we need to add the appropriate power of 2 if the input
13035 // was negative. This is the same as the optimization in
13036 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
13037 // we must be careful to do the computation in x87 extended precision, not
13038 // in SSE. (The generic code can't know it's OK to do this, or how to.)
13039 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
13040 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
13041 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
13042 MachineMemOperand::MOLoad, 8, 8);
13044 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
13045 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
13046 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
13049 APInt FF(32, 0x5F800000ULL);
13051 // Check whether the sign bit is set.
13052 SDValue SignSet = DAG.getSetCC(
13053 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
13054 Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
13056 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
13057 SDValue FudgePtr = DAG.getConstantPool(
13058 ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
13060 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
13061 SDValue Zero = DAG.getIntPtrConstant(0, dl);
13062 SDValue Four = DAG.getIntPtrConstant(4, dl);
13063 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
13065 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
13067 // Load the value out, extending it from f32 to f80.
13068 // FIXME: Avoid the extend by constructing the right constant pool?
13069 SDValue Fudge = DAG.getExtLoad(
13070 ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
13071 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
13072 false, false, false, 4);
13073 // Extend everything to 80 bits to force it to be done on x87.
13074 // TODO: Are there any fast-math-flags to propagate here?
13075 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
13076 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
13077 DAG.getIntPtrConstant(0, dl));
13080 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
13081 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
13082 // just return an <SDValue(), SDValue()> pair.
13083 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
13084 // to i16, i32 or i64, and we lower it to a legal sequence.
13085 // If lowered to the final integer result we return a <result, SDValue()> pair.
13086 // Otherwise we lower it to a sequence ending with a FIST, return a
13087 // <FIST, StackSlot> pair, and the caller is responsible for loading
13088 // the final integer result from StackSlot.
13089 std::pair<SDValue,SDValue>
13090 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
13091 bool IsSigned, bool IsReplace) const {
13094 EVT DstTy = Op.getValueType();
13095 EVT TheVT = Op.getOperand(0).getValueType();
13096 auto PtrVT = getPointerTy(DAG.getDataLayout());
13098 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
13099 // f16 must be promoted before using the lowering in this routine.
13100 // fp128 does not use this lowering.
13101 return std::make_pair(SDValue(), SDValue());
13104 // If using FIST to compute an unsigned i64, we'll need some fixup
13105 // to handle values above the maximum signed i64. A FIST is always
13106 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
13107 bool UnsignedFixup = !IsSigned &&
13108 DstTy == MVT::i64 &&
13109 (!Subtarget->is64Bit() ||
13110 !isScalarFPTypeInSSEReg(TheVT));
13112 if (!IsSigned && DstTy != MVT::i64 && !Subtarget->hasAVX512()) {
13113 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
13114 // The low 32 bits of the fist result will have the correct uint32 result.
13115 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
13119 assert(DstTy.getSimpleVT() <= MVT::i64 &&
13120 DstTy.getSimpleVT() >= MVT::i16 &&
13121 "Unknown FP_TO_INT to lower!");
13123 // These are really Legal.
13124 if (DstTy == MVT::i32 &&
13125 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
13126 return std::make_pair(SDValue(), SDValue());
13127 if (Subtarget->is64Bit() &&
13128 DstTy == MVT::i64 &&
13129 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
13130 return std::make_pair(SDValue(), SDValue());
13132 // We lower FP->int64 into FISTP64 followed by a load from a temporary
13134 MachineFunction &MF = DAG.getMachineFunction();
13135 unsigned MemSize = DstTy.getSizeInBits()/8;
13136 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
13137 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
13140 switch (DstTy.getSimpleVT().SimpleTy) {
13141 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
13142 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
13143 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
13144 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
13147 SDValue Chain = DAG.getEntryNode();
13148 SDValue Value = Op.getOperand(0);
13149 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
13151 if (UnsignedFixup) {
13153 // Conversion to unsigned i64 is implemented with a select,
13154 // depending on whether the source value fits in the range
13155 // of a signed i64. Let Thresh be the FP equivalent of
13156 // 0x8000000000000000ULL.
13158 // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
13159 // FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
13160 // Fist-to-mem64 FistSrc
13161 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
13162 // to XOR'ing the high 32 bits with Adjust.
13164 // Being a power of 2, Thresh is exactly representable in all FP formats.
13165 // For X87 we'd like to use the smallest FP type for this constant, but
13166 // for DAG type consistency we have to match the FP operand type.
13168 APFloat Thresh(APFloat::IEEEsingle, APInt(32, 0x5f000000));
13169 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
13170 bool LosesInfo = false;
13171 if (TheVT == MVT::f64)
13172 // The rounding mode is irrelevant as the conversion should be exact.
13173 Status = Thresh.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven,
13175 else if (TheVT == MVT::f80)
13176 Status = Thresh.convert(APFloat::x87DoubleExtended,
13177 APFloat::rmNearestTiesToEven, &LosesInfo);
13179 assert(Status == APFloat::opOK && !LosesInfo &&
13180 "FP conversion should have been exact");
13182 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
13184 SDValue Cmp = DAG.getSetCC(DL,
13185 getSetCCResultType(DAG.getDataLayout(),
13186 *DAG.getContext(), TheVT),
13187 Value, ThreshVal, ISD::SETLT);
13188 Adjust = DAG.getSelect(DL, MVT::i32, Cmp,
13189 DAG.getConstant(0, DL, MVT::i32),
13190 DAG.getConstant(0x80000000, DL, MVT::i32));
13191 SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
13192 Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
13193 *DAG.getContext(), TheVT),
13194 Value, ThreshVal, ISD::SETLT);
13195 Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
13198 // FIXME This causes a redundant load/store if the SSE-class value is already
13199 // in memory, such as if it is on the callstack.
13200 if (isScalarFPTypeInSSEReg(TheVT)) {
13201 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
13202 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
13203 MachinePointerInfo::getFixedStack(MF, SSFI), false,
13205 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
13207 Chain, StackSlot, DAG.getValueType(TheVT)
13210 MachineMemOperand *MMO =
13211 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
13212 MachineMemOperand::MOLoad, MemSize, MemSize);
13213 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
13214 Chain = Value.getValue(1);
13215 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
13216 StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
13219 MachineMemOperand *MMO =
13220 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
13221 MachineMemOperand::MOStore, MemSize, MemSize);
13223 if (UnsignedFixup) {
13225 // Insert the FIST, load its result as two i32's,
13226 // and XOR the high i32 with Adjust.
13228 SDValue FistOps[] = { Chain, Value, StackSlot };
13229 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
13230 FistOps, DstTy, MMO);
13232 SDValue Low32 = DAG.getLoad(MVT::i32, DL, FIST, StackSlot,
13233 MachinePointerInfo(),
13234 false, false, false, 0);
13235 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackSlot,
13236 DAG.getConstant(4, DL, PtrVT));
13238 SDValue High32 = DAG.getLoad(MVT::i32, DL, FIST, HighAddr,
13239 MachinePointerInfo(),
13240 false, false, false, 0);
13241 High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust);
13243 if (Subtarget->is64Bit()) {
13244 // Join High32 and Low32 into a 64-bit result.
13245 // (High32 << 32) | Low32
13246 Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32);
13247 High32 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, High32);
13248 High32 = DAG.getNode(ISD::SHL, DL, MVT::i64, High32,
13249 DAG.getConstant(32, DL, MVT::i8));
13250 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i64, High32, Low32);
13251 return std::make_pair(Result, SDValue());
13254 SDValue ResultOps[] = { Low32, High32 };
13256 SDValue pair = IsReplace
13257 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResultOps)
13258 : DAG.getMergeValues(ResultOps, DL);
13259 return std::make_pair(pair, SDValue());
13261 // Build the FP_TO_INT*_IN_MEM
13262 SDValue Ops[] = { Chain, Value, StackSlot };
13263 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
13265 return std::make_pair(FIST, StackSlot);
13269 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
13270 const X86Subtarget *Subtarget) {
13271 MVT VT = Op->getSimpleValueType(0);
13272 SDValue In = Op->getOperand(0);
13273 MVT InVT = In.getSimpleValueType();
13276 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
13277 return DAG.getNode(ISD::ZERO_EXTEND, dl, VT, In);
13279 // Optimize vectors in AVX mode:
13282 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
13283 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
13284 // Concat upper and lower parts.
13287 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
13288 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
13289 // Concat upper and lower parts.
13292 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
13293 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
13294 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
13297 if (Subtarget->hasInt256())
13298 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
13300 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
13301 SDValue Undef = DAG.getUNDEF(InVT);
13302 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
13303 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
13304 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
13306 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
13307 VT.getVectorNumElements()/2);
13309 OpLo = DAG.getBitcast(HVT, OpLo);
13310 OpHi = DAG.getBitcast(HVT, OpHi);
13312 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
13315 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
13316 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
13317 MVT VT = Op->getSimpleValueType(0);
13318 SDValue In = Op->getOperand(0);
13319 MVT InVT = In.getSimpleValueType();
13321 unsigned int NumElts = VT.getVectorNumElements();
13322 if (NumElts != 8 && NumElts != 16 && !Subtarget->hasBWI())
13325 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
13326 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
13328 assert(InVT.getVectorElementType() == MVT::i1);
13329 MVT ExtVT = NumElts == 8 ? MVT::v8i64 : MVT::v16i32;
13331 DAG.getConstant(APInt(ExtVT.getScalarSizeInBits(), 1), DL, ExtVT);
13333 DAG.getConstant(APInt::getNullValue(ExtVT.getScalarSizeInBits()), DL, ExtVT);
13335 SDValue V = DAG.getNode(ISD::VSELECT, DL, ExtVT, In, One, Zero);
13336 if (VT.is512BitVector())
13338 return DAG.getNode(X86ISD::VTRUNC, DL, VT, V);
13341 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
13342 SelectionDAG &DAG) {
13343 if (Subtarget->hasFp256())
13344 if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
13350 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
13351 SelectionDAG &DAG) {
13353 MVT VT = Op.getSimpleValueType();
13354 SDValue In = Op.getOperand(0);
13355 MVT SVT = In.getSimpleValueType();
13357 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
13358 return LowerZERO_EXTEND_AVX512(Op, Subtarget, DAG);
13360 if (Subtarget->hasFp256())
13361 if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
13364 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
13365 VT.getVectorNumElements() != SVT.getVectorNumElements());
13369 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
13370 const X86Subtarget *Subtarget) {
13373 MVT VT = Op.getSimpleValueType();
13374 SDValue In = Op.getOperand(0);
13375 MVT InVT = In.getSimpleValueType();
13377 assert(VT.getVectorElementType() == MVT::i1 && "Unexected vector type.");
13379 // Shift LSB to MSB and use VPMOVB2M - SKX.
13380 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
13381 if ((InVT.is512BitVector() && InVT.getScalarSizeInBits() <= 16 &&
13382 Subtarget->hasBWI()) || // legal, will go to VPMOVB2M, VPMOVW2M
13383 ((InVT.is256BitVector() || InVT.is128BitVector()) &&
13384 InVT.getScalarSizeInBits() <= 16 && Subtarget->hasBWI() &&
13385 Subtarget->hasVLX())) { // legal, will go to VPMOVB2M, VPMOVW2M
13386 // Shift packed bytes not supported natively, bitcast to dword
13387 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
13388 SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, ExtVT,
13389 DAG.getBitcast(ExtVT, In),
13390 DAG.getConstant(ShiftInx, DL, ExtVT));
13391 ShiftNode = DAG.getBitcast(InVT, ShiftNode);
13392 return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
13394 if ((InVT.is512BitVector() && InVT.getScalarSizeInBits() >= 32 &&
13395 Subtarget->hasDQI()) || // legal, will go to VPMOVD2M, VPMOVQ2M
13396 ((InVT.is256BitVector() || InVT.is128BitVector()) &&
13397 InVT.getScalarSizeInBits() >= 32 && Subtarget->hasDQI() &&
13398 Subtarget->hasVLX())) { // legal, will go to VPMOVD2M, VPMOVQ2M
13400 SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, InVT, In,
13401 DAG.getConstant(ShiftInx, DL, InVT));
13402 return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
13405 // Shift LSB to MSB, extend if necessary and use TESTM.
13406 unsigned NumElts = InVT.getVectorNumElements();
13407 if (InVT.getSizeInBits() < 512 &&
13408 (InVT.getScalarType() == MVT::i8 || InVT.getScalarType() == MVT::i16 ||
13409 !Subtarget->hasVLX())) {
13410 assert((NumElts == 8 || NumElts == 16) && "Unexected vector type.");
13412 // TESTD/Q should be used (if BW supported we use CVT2MASK above),
13413 // so vector should be extended to packed dword/qword.
13414 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
13415 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
13417 ShiftInx = InVT.getScalarSizeInBits() - 1;
13420 SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, InVT, In,
13421 DAG.getConstant(ShiftInx, DL, InVT));
13422 return DAG.getNode(X86ISD::TESTM, DL, VT, ShiftNode, ShiftNode);
13425 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
13427 MVT VT = Op.getSimpleValueType();
13428 SDValue In = Op.getOperand(0);
13429 MVT InVT = In.getSimpleValueType();
13431 if (VT == MVT::i1) {
13432 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
13433 "Invalid scalar TRUNCATE operation");
13434 if (InVT.getSizeInBits() >= 32)
13436 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
13437 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
13439 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
13440 "Invalid TRUNCATE operation");
13442 if (VT.getVectorElementType() == MVT::i1)
13443 return LowerTruncateVecI1(Op, DAG, Subtarget);
13445 // vpmovqb/w/d, vpmovdb/w, vpmovwb
13446 if (Subtarget->hasAVX512()) {
13447 // word to byte only under BWI
13448 if (InVT == MVT::v16i16 && !Subtarget->hasBWI()) // v16i16 -> v16i8
13449 return DAG.getNode(X86ISD::VTRUNC, DL, VT,
13450 DAG.getNode(X86ISD::VSEXT, DL, MVT::v16i32, In));
13451 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
13453 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
13454 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
13455 if (Subtarget->hasInt256()) {
13456 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
13457 In = DAG.getBitcast(MVT::v8i32, In);
13458 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
13460 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
13461 DAG.getIntPtrConstant(0, DL));
13464 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
13465 DAG.getIntPtrConstant(0, DL));
13466 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
13467 DAG.getIntPtrConstant(2, DL));
13468 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
13469 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
13470 static const int ShufMask[] = {0, 2, 4, 6};
13471 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
13474 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
13475 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
13476 if (Subtarget->hasInt256()) {
13477 In = DAG.getBitcast(MVT::v32i8, In);
13479 SmallVector<SDValue,32> pshufbMask;
13480 for (unsigned i = 0; i < 2; ++i) {
13481 pshufbMask.push_back(DAG.getConstant(0x0, DL, MVT::i8));
13482 pshufbMask.push_back(DAG.getConstant(0x1, DL, MVT::i8));
13483 pshufbMask.push_back(DAG.getConstant(0x4, DL, MVT::i8));
13484 pshufbMask.push_back(DAG.getConstant(0x5, DL, MVT::i8));
13485 pshufbMask.push_back(DAG.getConstant(0x8, DL, MVT::i8));
13486 pshufbMask.push_back(DAG.getConstant(0x9, DL, MVT::i8));
13487 pshufbMask.push_back(DAG.getConstant(0xc, DL, MVT::i8));
13488 pshufbMask.push_back(DAG.getConstant(0xd, DL, MVT::i8));
13489 for (unsigned j = 0; j < 8; ++j)
13490 pshufbMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
13492 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
13493 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
13494 In = DAG.getBitcast(MVT::v4i64, In);
13496 static const int ShufMask[] = {0, 2, -1, -1};
13497 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
13499 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
13500 DAG.getIntPtrConstant(0, DL));
13501 return DAG.getBitcast(VT, In);
13504 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
13505 DAG.getIntPtrConstant(0, DL));
13507 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
13508 DAG.getIntPtrConstant(4, DL));
13510 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
13511 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
13513 // The PSHUFB mask:
13514 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
13515 -1, -1, -1, -1, -1, -1, -1, -1};
13517 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
13518 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
13519 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
13521 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
13522 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
13524 // The MOVLHPS Mask:
13525 static const int ShufMask2[] = {0, 1, 4, 5};
13526 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
13527 return DAG.getBitcast(MVT::v8i16, res);
13530 // Handle truncation of V256 to V128 using shuffles.
13531 if (!VT.is128BitVector() || !InVT.is256BitVector())
13534 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
13536 unsigned NumElems = VT.getVectorNumElements();
13537 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
13539 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
13540 // Prepare truncation shuffle mask
13541 for (unsigned i = 0; i != NumElems; ++i)
13542 MaskVec[i] = i * 2;
13543 SDValue V = DAG.getVectorShuffle(NVT, DL, DAG.getBitcast(NVT, In),
13544 DAG.getUNDEF(NVT), &MaskVec[0]);
13545 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
13546 DAG.getIntPtrConstant(0, DL));
13549 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
13550 SelectionDAG &DAG) const {
13551 assert(!Op.getSimpleValueType().isVector());
13553 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
13554 /*IsSigned=*/ true, /*IsReplace=*/ false);
13555 SDValue FIST = Vals.first, StackSlot = Vals.second;
13556 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
13557 if (!FIST.getNode())
13560 if (StackSlot.getNode())
13561 // Load the result.
13562 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
13563 FIST, StackSlot, MachinePointerInfo(),
13564 false, false, false, 0);
13566 // The node is the result.
13570 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
13571 SelectionDAG &DAG) const {
13572 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
13573 /*IsSigned=*/ false, /*IsReplace=*/ false);
13574 SDValue FIST = Vals.first, StackSlot = Vals.second;
13575 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
13576 if (!FIST.getNode())
13579 if (StackSlot.getNode())
13580 // Load the result.
13581 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
13582 FIST, StackSlot, MachinePointerInfo(),
13583 false, false, false, 0);
13585 // The node is the result.
13589 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
13591 MVT VT = Op.getSimpleValueType();
13592 SDValue In = Op.getOperand(0);
13593 MVT SVT = In.getSimpleValueType();
13595 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
13597 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
13598 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
13599 In, DAG.getUNDEF(SVT)));
13602 /// The only differences between FABS and FNEG are the mask and the logic op.
13603 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
13604 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
13605 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
13606 "Wrong opcode for lowering FABS or FNEG.");
13608 bool IsFABS = (Op.getOpcode() == ISD::FABS);
13610 // If this is a FABS and it has an FNEG user, bail out to fold the combination
13611 // into an FNABS. We'll lower the FABS after that if it is still in use.
13613 for (SDNode *User : Op->uses())
13614 if (User->getOpcode() == ISD::FNEG)
13618 MVT VT = Op.getSimpleValueType();
13620 bool IsF128 = (VT == MVT::f128);
13622 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
13623 // decide if we should generate a 16-byte constant mask when we only need 4 or
13624 // 8 bytes for the scalar case.
13630 if (VT.isVector()) {
13632 EltVT = VT.getVectorElementType();
13633 NumElts = VT.getVectorNumElements();
13634 } else if (IsF128) {
13635 // SSE instructions are used for optimized f128 logical operations.
13636 LogicVT = MVT::f128;
13640 // There are no scalar bitwise logical SSE/AVX instructions, so we
13641 // generate a 16-byte vector constant and logic op even for the scalar case.
13642 // Using a 16-byte mask allows folding the load of the mask with
13643 // the logic op, so it can save (~4 bytes) on code size.
13644 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
13646 NumElts = (VT == MVT::f64) ? 2 : 4;
13649 unsigned EltBits = EltVT.getSizeInBits();
13650 LLVMContext *Context = DAG.getContext();
13651 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
13653 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
13654 Constant *C = ConstantInt::get(*Context, MaskElt);
13655 C = ConstantVector::getSplat(NumElts, C);
13656 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13657 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
13658 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
13660 DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
13661 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
13662 false, false, false, Alignment);
13664 SDValue Op0 = Op.getOperand(0);
13665 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
13667 IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
13668 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
13670 if (VT.isVector() || IsF128)
13671 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
13673 // For the scalar case extend to a 128-bit vector, perform the logic op,
13674 // and extract the scalar result back out.
13675 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
13676 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
13677 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
13678 DAG.getIntPtrConstant(0, dl));
13681 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
13682 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13683 LLVMContext *Context = DAG.getContext();
13684 SDValue Op0 = Op.getOperand(0);
13685 SDValue Op1 = Op.getOperand(1);
13687 MVT VT = Op.getSimpleValueType();
13688 MVT SrcVT = Op1.getSimpleValueType();
13689 bool IsF128 = (VT == MVT::f128);
13691 // If second operand is smaller, extend it first.
13692 if (SrcVT.bitsLT(VT)) {
13693 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
13696 // And if it is bigger, shrink it first.
13697 if (SrcVT.bitsGT(VT)) {
13698 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1, dl));
13702 // At this point the operands and the result should have the same
13703 // type, and that won't be f80 since that is not custom lowered.
13704 assert((VT == MVT::f64 || VT == MVT::f32 || IsF128) &&
13705 "Unexpected type in LowerFCOPYSIGN");
13707 const fltSemantics &Sem =
13708 VT == MVT::f64 ? APFloat::IEEEdouble :
13709 (IsF128 ? APFloat::IEEEquad : APFloat::IEEEsingle);
13710 const unsigned SizeInBits = VT.getSizeInBits();
13712 SmallVector<Constant *, 4> CV(
13713 VT == MVT::f64 ? 2 : (IsF128 ? 1 : 4),
13714 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
13716 // First, clear all bits but the sign bit from the second operand (sign).
13717 CV[0] = ConstantFP::get(*Context,
13718 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
13719 Constant *C = ConstantVector::get(CV);
13720 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
13721 SDValue CPIdx = DAG.getConstantPool(C, PtrVT, 16);
13723 // Perform all logic operations as 16-byte vectors because there are no
13724 // scalar FP logic instructions in SSE. This allows load folding of the
13725 // constants into the logic instructions.
13726 MVT LogicVT = (VT == MVT::f64) ? MVT::v2f64 : (IsF128 ? MVT::f128 : MVT::v4f32);
13728 DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
13729 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
13730 false, false, false, 16);
13732 Op1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Op1);
13733 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Op1, Mask1);
13735 // Next, clear the sign bit from the first operand (magnitude).
13736 // If it's a constant, we can clear it here.
13737 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
13738 APFloat APF = Op0CN->getValueAPF();
13739 // If the magnitude is a positive zero, the sign bit alone is enough.
13740 if (APF.isPosZero())
13741 return IsF128 ? SignBit :
13742 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcVT, SignBit,
13743 DAG.getIntPtrConstant(0, dl));
13745 CV[0] = ConstantFP::get(*Context, APF);
13747 CV[0] = ConstantFP::get(
13749 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
13751 C = ConstantVector::get(CV);
13752 CPIdx = DAG.getConstantPool(C, PtrVT, 16);
13754 DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
13755 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
13756 false, false, false, 16);
13757 // If the magnitude operand wasn't a constant, we need to AND out the sign.
13758 if (!isa<ConstantFPSDNode>(Op0)) {
13760 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Op0);
13761 Val = DAG.getNode(X86ISD::FAND, dl, LogicVT, Op0, Val);
13763 // OR the magnitude value with the sign bit.
13764 Val = DAG.getNode(X86ISD::FOR, dl, LogicVT, Val, SignBit);
13765 return IsF128 ? Val :
13766 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcVT, Val,
13767 DAG.getIntPtrConstant(0, dl));
13770 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
13771 SDValue N0 = Op.getOperand(0);
13773 MVT VT = Op.getSimpleValueType();
13775 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
13776 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
13777 DAG.getConstant(1, dl, VT));
13778 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, dl, VT));
13781 // Check whether an OR'd tree is PTEST-able.
13782 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
13783 SelectionDAG &DAG) {
13784 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
13786 if (!Subtarget->hasSSE41())
13789 if (!Op->hasOneUse())
13792 SDNode *N = Op.getNode();
13795 SmallVector<SDValue, 8> Opnds;
13796 DenseMap<SDValue, unsigned> VecInMap;
13797 SmallVector<SDValue, 8> VecIns;
13798 EVT VT = MVT::Other;
13800 // Recognize a special case where a vector is casted into wide integer to
13802 Opnds.push_back(N->getOperand(0));
13803 Opnds.push_back(N->getOperand(1));
13805 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
13806 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
13807 // BFS traverse all OR'd operands.
13808 if (I->getOpcode() == ISD::OR) {
13809 Opnds.push_back(I->getOperand(0));
13810 Opnds.push_back(I->getOperand(1));
13811 // Re-evaluate the number of nodes to be traversed.
13812 e += 2; // 2 more nodes (LHS and RHS) are pushed.
13816 // Quit if a non-EXTRACT_VECTOR_ELT
13817 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13820 // Quit if without a constant index.
13821 SDValue Idx = I->getOperand(1);
13822 if (!isa<ConstantSDNode>(Idx))
13825 SDValue ExtractedFromVec = I->getOperand(0);
13826 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
13827 if (M == VecInMap.end()) {
13828 VT = ExtractedFromVec.getValueType();
13829 // Quit if not 128/256-bit vector.
13830 if (!VT.is128BitVector() && !VT.is256BitVector())
13832 // Quit if not the same type.
13833 if (VecInMap.begin() != VecInMap.end() &&
13834 VT != VecInMap.begin()->first.getValueType())
13836 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
13837 VecIns.push_back(ExtractedFromVec);
13839 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
13842 assert((VT.is128BitVector() || VT.is256BitVector()) &&
13843 "Not extracted from 128-/256-bit vector.");
13845 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
13847 for (DenseMap<SDValue, unsigned>::const_iterator
13848 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
13849 // Quit if not all elements are used.
13850 if (I->second != FullMask)
13854 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
13856 // Cast all vectors into TestVT for PTEST.
13857 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
13858 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
13860 // If more than one full vectors are evaluated, OR them first before PTEST.
13861 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
13862 // Each iteration will OR 2 nodes and append the result until there is only
13863 // 1 node left, i.e. the final OR'd value of all vectors.
13864 SDValue LHS = VecIns[Slot];
13865 SDValue RHS = VecIns[Slot + 1];
13866 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
13869 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
13870 VecIns.back(), VecIns.back());
13873 /// \brief return true if \c Op has a use that doesn't just read flags.
13874 static bool hasNonFlagsUse(SDValue Op) {
13875 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
13877 SDNode *User = *UI;
13878 unsigned UOpNo = UI.getOperandNo();
13879 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
13880 // Look pass truncate.
13881 UOpNo = User->use_begin().getOperandNo();
13882 User = *User->use_begin();
13885 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
13886 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
13892 /// Emit nodes that will be selected as "test Op0,Op0", or something
13894 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
13895 SelectionDAG &DAG) const {
13896 if (Op.getValueType() == MVT::i1) {
13897 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
13898 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
13899 DAG.getConstant(0, dl, MVT::i8));
13901 // CF and OF aren't always set the way we want. Determine which
13902 // of these we need.
13903 bool NeedCF = false;
13904 bool NeedOF = false;
13907 case X86::COND_A: case X86::COND_AE:
13908 case X86::COND_B: case X86::COND_BE:
13911 case X86::COND_G: case X86::COND_GE:
13912 case X86::COND_L: case X86::COND_LE:
13913 case X86::COND_O: case X86::COND_NO: {
13914 // Check if we really need to set the
13915 // Overflow flag. If NoSignedWrap is present
13916 // that is not actually needed.
13917 switch (Op->getOpcode()) {
13922 const auto *BinNode = cast<BinaryWithFlagsSDNode>(Op.getNode());
13923 if (BinNode->Flags.hasNoSignedWrap())
13933 // See if we can use the EFLAGS value from the operand instead of
13934 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
13935 // we prove that the arithmetic won't overflow, we can't use OF or CF.
13936 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
13937 // Emit a CMP with 0, which is the TEST pattern.
13938 //if (Op.getValueType() == MVT::i1)
13939 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
13940 // DAG.getConstant(0, MVT::i1));
13941 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
13942 DAG.getConstant(0, dl, Op.getValueType()));
13944 unsigned Opcode = 0;
13945 unsigned NumOperands = 0;
13947 // Truncate operations may prevent the merge of the SETCC instruction
13948 // and the arithmetic instruction before it. Attempt to truncate the operands
13949 // of the arithmetic instruction and use a reduced bit-width instruction.
13950 bool NeedTruncation = false;
13951 SDValue ArithOp = Op;
13952 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
13953 SDValue Arith = Op->getOperand(0);
13954 // Both the trunc and the arithmetic op need to have one user each.
13955 if (Arith->hasOneUse())
13956 switch (Arith.getOpcode()) {
13963 NeedTruncation = true;
13969 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
13970 // which may be the result of a CAST. We use the variable 'Op', which is the
13971 // non-casted variable when we check for possible users.
13972 switch (ArithOp.getOpcode()) {
13974 // Due to an isel shortcoming, be conservative if this add is likely to be
13975 // selected as part of a load-modify-store instruction. When the root node
13976 // in a match is a store, isel doesn't know how to remap non-chain non-flag
13977 // uses of other nodes in the match, such as the ADD in this case. This
13978 // leads to the ADD being left around and reselected, with the result being
13979 // two adds in the output. Alas, even if none our users are stores, that
13980 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
13981 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
13982 // climbing the DAG back to the root, and it doesn't seem to be worth the
13984 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
13985 UE = Op.getNode()->use_end(); UI != UE; ++UI)
13986 if (UI->getOpcode() != ISD::CopyToReg &&
13987 UI->getOpcode() != ISD::SETCC &&
13988 UI->getOpcode() != ISD::STORE)
13991 if (ConstantSDNode *C =
13992 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
13993 // An add of one will be selected as an INC.
13994 if (C->isOne() && !Subtarget->slowIncDec()) {
13995 Opcode = X86ISD::INC;
14000 // An add of negative one (subtract of one) will be selected as a DEC.
14001 if (C->isAllOnesValue() && !Subtarget->slowIncDec()) {
14002 Opcode = X86ISD::DEC;
14008 // Otherwise use a regular EFLAGS-setting add.
14009 Opcode = X86ISD::ADD;
14014 // If we have a constant logical shift that's only used in a comparison
14015 // against zero turn it into an equivalent AND. This allows turning it into
14016 // a TEST instruction later.
14017 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
14018 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
14019 EVT VT = Op.getValueType();
14020 unsigned BitWidth = VT.getSizeInBits();
14021 unsigned ShAmt = Op->getConstantOperandVal(1);
14022 if (ShAmt >= BitWidth) // Avoid undefined shifts.
14024 APInt Mask = ArithOp.getOpcode() == ISD::SRL
14025 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
14026 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
14027 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
14029 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
14030 DAG.getConstant(Mask, dl, VT));
14031 DAG.ReplaceAllUsesWith(Op, New);
14037 // If the primary and result isn't used, don't bother using X86ISD::AND,
14038 // because a TEST instruction will be better.
14039 if (!hasNonFlagsUse(Op))
14045 // Due to the ISEL shortcoming noted above, be conservative if this op is
14046 // likely to be selected as part of a load-modify-store instruction.
14047 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14048 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14049 if (UI->getOpcode() == ISD::STORE)
14052 // Otherwise use a regular EFLAGS-setting instruction.
14053 switch (ArithOp.getOpcode()) {
14054 default: llvm_unreachable("unexpected operator!");
14055 case ISD::SUB: Opcode = X86ISD::SUB; break;
14056 case ISD::XOR: Opcode = X86ISD::XOR; break;
14057 case ISD::AND: Opcode = X86ISD::AND; break;
14059 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
14060 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
14061 if (EFLAGS.getNode())
14064 Opcode = X86ISD::OR;
14078 return SDValue(Op.getNode(), 1);
14084 // If we found that truncation is beneficial, perform the truncation and
14086 if (NeedTruncation) {
14087 EVT VT = Op.getValueType();
14088 SDValue WideVal = Op->getOperand(0);
14089 EVT WideVT = WideVal.getValueType();
14090 unsigned ConvertedOp = 0;
14091 // Use a target machine opcode to prevent further DAGCombine
14092 // optimizations that may separate the arithmetic operations
14093 // from the setcc node.
14094 switch (WideVal.getOpcode()) {
14096 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
14097 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
14098 case ISD::AND: ConvertedOp = X86ISD::AND; break;
14099 case ISD::OR: ConvertedOp = X86ISD::OR; break;
14100 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
14104 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14105 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
14106 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
14107 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
14108 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
14114 // Emit a CMP with 0, which is the TEST pattern.
14115 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14116 DAG.getConstant(0, dl, Op.getValueType()));
14118 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
14119 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
14121 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
14122 DAG.ReplaceAllUsesWith(Op, New);
14123 return SDValue(New.getNode(), 1);
14126 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
14128 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
14129 SDLoc dl, SelectionDAG &DAG) const {
14130 if (isNullConstant(Op1))
14131 return EmitTest(Op0, X86CC, dl, DAG);
14133 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
14134 "Unexpected comparison operation for MVT::i1 operands");
14136 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
14137 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
14138 // Do the comparison at i32 if it's smaller, besides the Atom case.
14139 // This avoids subregister aliasing issues. Keep the smaller reference
14140 // if we're optimizing for size, however, as that'll allow better folding
14141 // of memory operations.
14142 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
14143 !DAG.getMachineFunction().getFunction()->optForMinSize() &&
14144 !Subtarget->isAtom()) {
14145 unsigned ExtendOp =
14146 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
14147 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
14148 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
14150 // Use SUB instead of CMP to enable CSE between SUB and CMP.
14151 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
14152 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
14154 return SDValue(Sub.getNode(), 1);
14156 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
14159 /// Convert a comparison if required by the subtarget.
14160 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
14161 SelectionDAG &DAG) const {
14162 // If the subtarget does not support the FUCOMI instruction, floating-point
14163 // comparisons have to be converted.
14164 if (Subtarget->hasCMov() ||
14165 Cmp.getOpcode() != X86ISD::CMP ||
14166 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
14167 !Cmp.getOperand(1).getValueType().isFloatingPoint())
14170 // The instruction selector will select an FUCOM instruction instead of
14171 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
14172 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
14173 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
14175 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
14176 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
14177 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
14178 DAG.getConstant(8, dl, MVT::i8));
14179 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
14181 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
14182 assert(Subtarget->hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
14183 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
14186 /// The minimum architected relative accuracy is 2^-12. We need one
14187 /// Newton-Raphson step to have a good float result (24 bits of precision).
14188 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
14189 DAGCombinerInfo &DCI,
14190 unsigned &RefinementSteps,
14191 bool &UseOneConstNR) const {
14192 EVT VT = Op.getValueType();
14193 const char *RecipOp;
14195 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
14196 // TODO: Add support for AVX512 (v16f32).
14197 // It is likely not profitable to do this for f64 because a double-precision
14198 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
14199 // instructions: convert to single, rsqrtss, convert back to double, refine
14200 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
14201 // along with FMA, this could be a throughput win.
14202 if (VT == MVT::f32 && Subtarget->hasSSE1())
14204 else if ((VT == MVT::v4f32 && Subtarget->hasSSE1()) ||
14205 (VT == MVT::v8f32 && Subtarget->hasAVX()))
14206 RecipOp = "vec-sqrtf";
14210 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
14211 if (!Recips.isEnabled(RecipOp))
14214 RefinementSteps = Recips.getRefinementSteps(RecipOp);
14215 UseOneConstNR = false;
14216 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
14219 /// The minimum architected relative accuracy is 2^-12. We need one
14220 /// Newton-Raphson step to have a good float result (24 bits of precision).
14221 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
14222 DAGCombinerInfo &DCI,
14223 unsigned &RefinementSteps) const {
14224 EVT VT = Op.getValueType();
14225 const char *RecipOp;
14227 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
14228 // TODO: Add support for AVX512 (v16f32).
14229 // It is likely not profitable to do this for f64 because a double-precision
14230 // reciprocal estimate with refinement on x86 prior to FMA requires
14231 // 15 instructions: convert to single, rcpss, convert back to double, refine
14232 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
14233 // along with FMA, this could be a throughput win.
14234 if (VT == MVT::f32 && Subtarget->hasSSE1())
14236 else if ((VT == MVT::v4f32 && Subtarget->hasSSE1()) ||
14237 (VT == MVT::v8f32 && Subtarget->hasAVX()))
14238 RecipOp = "vec-divf";
14242 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
14243 if (!Recips.isEnabled(RecipOp))
14246 RefinementSteps = Recips.getRefinementSteps(RecipOp);
14247 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
14250 /// If we have at least two divisions that use the same divisor, convert to
14251 /// multplication by a reciprocal. This may need to be adjusted for a given
14252 /// CPU if a division's cost is not at least twice the cost of a multiplication.
14253 /// This is because we still need one division to calculate the reciprocal and
14254 /// then we need two multiplies by that reciprocal as replacements for the
14255 /// original divisions.
14256 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
14260 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
14261 /// if it's possible.
14262 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
14263 SDLoc dl, SelectionDAG &DAG) const {
14264 SDValue Op0 = And.getOperand(0);
14265 SDValue Op1 = And.getOperand(1);
14266 if (Op0.getOpcode() == ISD::TRUNCATE)
14267 Op0 = Op0.getOperand(0);
14268 if (Op1.getOpcode() == ISD::TRUNCATE)
14269 Op1 = Op1.getOperand(0);
14272 if (Op1.getOpcode() == ISD::SHL)
14273 std::swap(Op0, Op1);
14274 if (Op0.getOpcode() == ISD::SHL) {
14275 if (isOneConstant(Op0.getOperand(0))) {
14276 // If we looked past a truncate, check that it's only truncating away
14278 unsigned BitWidth = Op0.getValueSizeInBits();
14279 unsigned AndBitWidth = And.getValueSizeInBits();
14280 if (BitWidth > AndBitWidth) {
14282 DAG.computeKnownBits(Op0, Zeros, Ones);
14283 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
14287 RHS = Op0.getOperand(1);
14289 } else if (Op1.getOpcode() == ISD::Constant) {
14290 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
14291 uint64_t AndRHSVal = AndRHS->getZExtValue();
14292 SDValue AndLHS = Op0;
14294 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
14295 LHS = AndLHS.getOperand(0);
14296 RHS = AndLHS.getOperand(1);
14299 // Use BT if the immediate can't be encoded in a TEST instruction.
14300 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
14302 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType());
14306 if (LHS.getNode()) {
14307 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
14308 // instruction. Since the shift amount is in-range-or-undefined, we know
14309 // that doing a bittest on the i32 value is ok. We extend to i32 because
14310 // the encoding for the i16 version is larger than the i32 version.
14311 // Also promote i16 to i32 for performance / code size reason.
14312 if (LHS.getValueType() == MVT::i8 ||
14313 LHS.getValueType() == MVT::i16)
14314 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
14316 // If the operand types disagree, extend the shift amount to match. Since
14317 // BT ignores high bits (like shifts) we can use anyextend.
14318 if (LHS.getValueType() != RHS.getValueType())
14319 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
14321 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
14322 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
14323 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
14324 DAG.getConstant(Cond, dl, MVT::i8), BT);
14330 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
14332 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
14337 // SSE Condition code mapping:
14346 switch (SetCCOpcode) {
14347 default: llvm_unreachable("Unexpected SETCC condition");
14349 case ISD::SETEQ: SSECC = 0; break;
14351 case ISD::SETGT: Swap = true; // Fallthrough
14353 case ISD::SETOLT: SSECC = 1; break;
14355 case ISD::SETGE: Swap = true; // Fallthrough
14357 case ISD::SETOLE: SSECC = 2; break;
14358 case ISD::SETUO: SSECC = 3; break;
14360 case ISD::SETNE: SSECC = 4; break;
14361 case ISD::SETULE: Swap = true; // Fallthrough
14362 case ISD::SETUGE: SSECC = 5; break;
14363 case ISD::SETULT: Swap = true; // Fallthrough
14364 case ISD::SETUGT: SSECC = 6; break;
14365 case ISD::SETO: SSECC = 7; break;
14367 case ISD::SETONE: SSECC = 8; break;
14370 std::swap(Op0, Op1);
14375 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
14376 // ones, and then concatenate the result back.
14377 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
14378 MVT VT = Op.getSimpleValueType();
14380 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
14381 "Unsupported value type for operation");
14383 unsigned NumElems = VT.getVectorNumElements();
14385 SDValue CC = Op.getOperand(2);
14387 // Extract the LHS vectors
14388 SDValue LHS = Op.getOperand(0);
14389 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
14390 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
14392 // Extract the RHS vectors
14393 SDValue RHS = Op.getOperand(1);
14394 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
14395 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
14397 // Issue the operation on the smaller types and concatenate the result back
14398 MVT EltVT = VT.getVectorElementType();
14399 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
14400 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
14401 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
14402 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
14405 static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
14406 SDValue Op0 = Op.getOperand(0);
14407 SDValue Op1 = Op.getOperand(1);
14408 SDValue CC = Op.getOperand(2);
14409 MVT VT = Op.getSimpleValueType();
14412 assert(Op0.getSimpleValueType().getVectorElementType() == MVT::i1 &&
14413 "Unexpected type for boolean compare operation");
14414 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
14415 SDValue NotOp0 = DAG.getNode(ISD::XOR, dl, VT, Op0,
14416 DAG.getConstant(-1, dl, VT));
14417 SDValue NotOp1 = DAG.getNode(ISD::XOR, dl, VT, Op1,
14418 DAG.getConstant(-1, dl, VT));
14419 switch (SetCCOpcode) {
14420 default: llvm_unreachable("Unexpected SETCC condition");
14422 // (x == y) -> ~(x ^ y)
14423 return DAG.getNode(ISD::XOR, dl, VT,
14424 DAG.getNode(ISD::XOR, dl, VT, Op0, Op1),
14425 DAG.getConstant(-1, dl, VT));
14427 // (x != y) -> (x ^ y)
14428 return DAG.getNode(ISD::XOR, dl, VT, Op0, Op1);
14431 // (x > y) -> (x & ~y)
14432 return DAG.getNode(ISD::AND, dl, VT, Op0, NotOp1);
14435 // (x < y) -> (~x & y)
14436 return DAG.getNode(ISD::AND, dl, VT, NotOp0, Op1);
14439 // (x <= y) -> (~x | y)
14440 return DAG.getNode(ISD::OR, dl, VT, NotOp0, Op1);
14443 // (x >=y) -> (x | ~y)
14444 return DAG.getNode(ISD::OR, dl, VT, Op0, NotOp1);
14448 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
14449 const X86Subtarget *Subtarget) {
14450 SDValue Op0 = Op.getOperand(0);
14451 SDValue Op1 = Op.getOperand(1);
14452 SDValue CC = Op.getOperand(2);
14453 MVT VT = Op.getSimpleValueType();
14456 assert(Op0.getSimpleValueType().getVectorElementType().getSizeInBits() >= 8 &&
14457 Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
14458 "Cannot set masked compare for this operation");
14460 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
14462 bool Unsigned = false;
14465 switch (SetCCOpcode) {
14466 default: llvm_unreachable("Unexpected SETCC condition");
14467 case ISD::SETNE: SSECC = 4; break;
14468 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
14469 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
14470 case ISD::SETLT: Swap = true; //fall-through
14471 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
14472 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
14473 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
14474 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
14475 case ISD::SETULE: Unsigned = true; //fall-through
14476 case ISD::SETLE: SSECC = 2; break;
14480 std::swap(Op0, Op1);
14482 return DAG.getNode(Opc, dl, VT, Op0, Op1);
14483 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
14484 return DAG.getNode(Opc, dl, VT, Op0, Op1,
14485 DAG.getConstant(SSECC, dl, MVT::i8));
14488 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
14489 /// operand \p Op1. If non-trivial (for example because it's not constant)
14490 /// return an empty value.
14491 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
14493 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
14497 MVT VT = Op1.getSimpleValueType();
14498 MVT EVT = VT.getVectorElementType();
14499 unsigned n = VT.getVectorNumElements();
14500 SmallVector<SDValue, 8> ULTOp1;
14502 for (unsigned i = 0; i < n; ++i) {
14503 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
14504 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EVT)
14507 // Avoid underflow.
14508 APInt Val = Elt->getAPIntValue();
14512 ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
14515 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
14518 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
14519 SelectionDAG &DAG) {
14520 SDValue Op0 = Op.getOperand(0);
14521 SDValue Op1 = Op.getOperand(1);
14522 SDValue CC = Op.getOperand(2);
14523 MVT VT = Op.getSimpleValueType();
14524 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
14525 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
14530 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
14531 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
14534 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
14535 unsigned Opc = X86ISD::CMPP;
14536 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
14537 assert(VT.getVectorNumElements() <= 16);
14538 Opc = X86ISD::CMPM;
14540 // In the two special cases we can't handle, emit two comparisons.
14543 unsigned CombineOpc;
14544 if (SetCCOpcode == ISD::SETUEQ) {
14545 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
14547 assert(SetCCOpcode == ISD::SETONE);
14548 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
14551 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
14552 DAG.getConstant(CC0, dl, MVT::i8));
14553 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
14554 DAG.getConstant(CC1, dl, MVT::i8));
14555 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
14557 // Handle all other FP comparisons here.
14558 return DAG.getNode(Opc, dl, VT, Op0, Op1,
14559 DAG.getConstant(SSECC, dl, MVT::i8));
14562 MVT VTOp0 = Op0.getSimpleValueType();
14563 assert(VTOp0 == Op1.getSimpleValueType() &&
14564 "Expected operands with same type!");
14565 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
14566 "Invalid number of packed elements for source and destination!");
14568 if (VT.is128BitVector() && VTOp0.is256BitVector()) {
14569 // On non-AVX512 targets, a vector of MVT::i1 is promoted by the type
14570 // legalizer to a wider vector type. In the case of 'vsetcc' nodes, the
14571 // legalizer firstly checks if the first operand in input to the setcc has
14572 // a legal type. If so, then it promotes the return type to that same type.
14573 // Otherwise, the return type is promoted to the 'next legal type' which,
14574 // for a vector of MVT::i1 is always a 128-bit integer vector type.
14576 // We reach this code only if the following two conditions are met:
14577 // 1. Both return type and operand type have been promoted to wider types
14578 // by the type legalizer.
14579 // 2. The original operand type has been promoted to a 256-bit vector.
14581 // Note that condition 2. only applies for AVX targets.
14582 SDValue NewOp = DAG.getSetCC(dl, VTOp0, Op0, Op1, SetCCOpcode);
14583 return DAG.getZExtOrTrunc(NewOp, dl, VT);
14586 // The non-AVX512 code below works under the assumption that source and
14587 // destination types are the same.
14588 assert((Subtarget->hasAVX512() || (VT == VTOp0)) &&
14589 "Value types for source and destination must be the same!");
14591 // Break 256-bit integer vector compare into smaller ones.
14592 if (VT.is256BitVector() && !Subtarget->hasInt256())
14593 return Lower256IntVSETCC(Op, DAG);
14595 MVT OpVT = Op1.getSimpleValueType();
14596 if (OpVT.getVectorElementType() == MVT::i1)
14597 return LowerBoolVSETCC_AVX512(Op, DAG);
14599 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
14600 if (Subtarget->hasAVX512()) {
14601 if (Op1.getSimpleValueType().is512BitVector() ||
14602 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
14603 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
14604 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
14606 // In AVX-512 architecture setcc returns mask with i1 elements,
14607 // But there is no compare instruction for i8 and i16 elements in KNL.
14608 // We are not talking about 512-bit operands in this case, these
14609 // types are illegal.
14611 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
14612 OpVT.getVectorElementType().getSizeInBits() >= 8))
14613 return DAG.getNode(ISD::TRUNCATE, dl, VT,
14614 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
14617 // Lower using XOP integer comparisons.
14618 if ((VT == MVT::v16i8 || VT == MVT::v8i16 ||
14619 VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget->hasXOP()) {
14620 // Translate compare code to XOP PCOM compare mode.
14621 unsigned CmpMode = 0;
14622 switch (SetCCOpcode) {
14623 default: llvm_unreachable("Unexpected SETCC condition");
14625 case ISD::SETLT: CmpMode = 0x00; break;
14627 case ISD::SETLE: CmpMode = 0x01; break;
14629 case ISD::SETGT: CmpMode = 0x02; break;
14631 case ISD::SETGE: CmpMode = 0x03; break;
14632 case ISD::SETEQ: CmpMode = 0x04; break;
14633 case ISD::SETNE: CmpMode = 0x05; break;
14636 // Are we comparing unsigned or signed integers?
14637 unsigned Opc = ISD::isUnsignedIntSetCC(SetCCOpcode)
14638 ? X86ISD::VPCOMU : X86ISD::VPCOM;
14640 return DAG.getNode(Opc, dl, VT, Op0, Op1,
14641 DAG.getConstant(CmpMode, dl, MVT::i8));
14644 // We are handling one of the integer comparisons here. Since SSE only has
14645 // GT and EQ comparisons for integer, swapping operands and multiple
14646 // operations may be required for some comparisons.
14648 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
14649 bool Subus = false;
14651 switch (SetCCOpcode) {
14652 default: llvm_unreachable("Unexpected SETCC condition");
14653 case ISD::SETNE: Invert = true;
14654 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
14655 case ISD::SETLT: Swap = true;
14656 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
14657 case ISD::SETGE: Swap = true;
14658 case ISD::SETLE: Opc = X86ISD::PCMPGT;
14659 Invert = true; break;
14660 case ISD::SETULT: Swap = true;
14661 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
14662 FlipSigns = true; break;
14663 case ISD::SETUGE: Swap = true;
14664 case ISD::SETULE: Opc = X86ISD::PCMPGT;
14665 FlipSigns = true; Invert = true; break;
14668 // Special case: Use min/max operations for SETULE/SETUGE
14669 MVT VET = VT.getVectorElementType();
14671 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
14672 || (Subtarget->hasSSE2() && (VET == MVT::i8));
14675 switch (SetCCOpcode) {
14677 case ISD::SETULE: Opc = ISD::UMIN; MinMax = true; break;
14678 case ISD::SETUGE: Opc = ISD::UMAX; MinMax = true; break;
14681 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
14684 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
14685 if (!MinMax && hasSubus) {
14686 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
14688 // t = psubus Op0, Op1
14689 // pcmpeq t, <0..0>
14690 switch (SetCCOpcode) {
14692 case ISD::SETULT: {
14693 // If the comparison is against a constant we can turn this into a
14694 // setule. With psubus, setule does not require a swap. This is
14695 // beneficial because the constant in the register is no longer
14696 // destructed as the destination so it can be hoisted out of a loop.
14697 // Only do this pre-AVX since vpcmp* is no longer destructive.
14698 if (Subtarget->hasAVX())
14700 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
14701 if (ULEOp1.getNode()) {
14703 Subus = true; Invert = false; Swap = false;
14707 // Psubus is better than flip-sign because it requires no inversion.
14708 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
14709 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
14713 Opc = X86ISD::SUBUS;
14719 std::swap(Op0, Op1);
14721 // Check that the operation in question is available (most are plain SSE2,
14722 // but PCMPGTQ and PCMPEQQ have different requirements).
14723 if (VT == MVT::v2i64) {
14724 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
14725 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
14727 // First cast everything to the right type.
14728 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
14729 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
14731 // Since SSE has no unsigned integer comparisons, we need to flip the sign
14732 // bits of the inputs before performing those operations. The lower
14733 // compare is always unsigned.
14736 SB = DAG.getConstant(0x80000000U, dl, MVT::v4i32);
14738 SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
14739 SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
14740 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
14741 Sign, Zero, Sign, Zero);
14743 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
14744 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
14746 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
14747 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
14748 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
14750 // Create masks for only the low parts/high parts of the 64 bit integers.
14751 static const int MaskHi[] = { 1, 1, 3, 3 };
14752 static const int MaskLo[] = { 0, 0, 2, 2 };
14753 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
14754 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
14755 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
14757 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
14758 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
14761 Result = DAG.getNOT(dl, Result, MVT::v4i32);
14763 return DAG.getBitcast(VT, Result);
14766 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
14767 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
14768 // pcmpeqd + pshufd + pand.
14769 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
14771 // First cast everything to the right type.
14772 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
14773 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
14776 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
14778 // Make sure the lower and upper halves are both all-ones.
14779 static const int Mask[] = { 1, 0, 3, 2 };
14780 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
14781 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
14784 Result = DAG.getNOT(dl, Result, MVT::v4i32);
14786 return DAG.getBitcast(VT, Result);
14790 // Since SSE has no unsigned integer comparisons, we need to flip the sign
14791 // bits of the inputs before performing those operations.
14793 MVT EltVT = VT.getVectorElementType();
14794 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), dl,
14796 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
14797 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
14800 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
14802 // If the logical-not of the result is required, perform that now.
14804 Result = DAG.getNOT(dl, Result, VT);
14807 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
14810 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
14811 getZeroVector(VT, Subtarget, DAG, dl));
14816 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
14818 MVT VT = Op.getSimpleValueType();
14820 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
14822 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
14823 && "SetCC type must be 8-bit or 1-bit integer");
14824 SDValue Op0 = Op.getOperand(0);
14825 SDValue Op1 = Op.getOperand(1);
14827 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
14829 // Optimize to BT if possible.
14830 // Lower (X & (1 << N)) == 0 to BT(X, N).
14831 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
14832 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
14833 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
14834 isNullConstant(Op1) &&
14835 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
14836 if (SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG)) {
14838 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
14843 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
14845 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
14846 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
14848 // If the input is a setcc, then reuse the input setcc or use a new one with
14849 // the inverted condition.
14850 if (Op0.getOpcode() == X86ISD::SETCC) {
14851 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
14852 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
14856 CCode = X86::GetOppositeBranchCondition(CCode);
14857 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
14858 DAG.getConstant(CCode, dl, MVT::i8),
14859 Op0.getOperand(1));
14861 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
14865 if ((Op0.getValueType() == MVT::i1) && isOneConstant(Op1) &&
14866 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
14868 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
14869 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
14872 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
14873 unsigned X86CC = TranslateX86CC(CC, dl, isFP, Op0, Op1, DAG);
14874 if (X86CC == X86::COND_INVALID)
14877 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
14878 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
14879 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
14880 DAG.getConstant(X86CC, dl, MVT::i8), EFLAGS);
14882 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
14886 SDValue X86TargetLowering::LowerSETCCE(SDValue Op, SelectionDAG &DAG) const {
14887 SDValue LHS = Op.getOperand(0);
14888 SDValue RHS = Op.getOperand(1);
14889 SDValue Carry = Op.getOperand(2);
14890 SDValue Cond = Op.getOperand(3);
14893 assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.");
14894 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
14896 assert(Carry.getOpcode() != ISD::CARRY_FALSE);
14897 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
14898 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry);
14899 return DAG.getNode(X86ISD::SETCC, DL, Op.getValueType(),
14900 DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
14903 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
14904 static bool isX86LogicalCmp(SDValue Op) {
14905 unsigned Opc = Op.getNode()->getOpcode();
14906 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
14907 Opc == X86ISD::SAHF)
14909 if (Op.getResNo() == 1 &&
14910 (Opc == X86ISD::ADD ||
14911 Opc == X86ISD::SUB ||
14912 Opc == X86ISD::ADC ||
14913 Opc == X86ISD::SBB ||
14914 Opc == X86ISD::SMUL ||
14915 Opc == X86ISD::UMUL ||
14916 Opc == X86ISD::INC ||
14917 Opc == X86ISD::DEC ||
14918 Opc == X86ISD::OR ||
14919 Opc == X86ISD::XOR ||
14920 Opc == X86ISD::AND))
14923 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
14929 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
14930 if (V.getOpcode() != ISD::TRUNCATE)
14933 SDValue VOp0 = V.getOperand(0);
14934 unsigned InBits = VOp0.getValueSizeInBits();
14935 unsigned Bits = V.getValueSizeInBits();
14936 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
14939 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
14940 bool addTest = true;
14941 SDValue Cond = Op.getOperand(0);
14942 SDValue Op1 = Op.getOperand(1);
14943 SDValue Op2 = Op.getOperand(2);
14945 MVT VT = Op1.getSimpleValueType();
14948 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
14949 // are available or VBLENDV if AVX is available.
14950 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
14951 if (Cond.getOpcode() == ISD::SETCC &&
14952 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
14953 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
14954 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
14955 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
14956 int SSECC = translateX86FSETCC(
14957 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
14960 if (Subtarget->hasAVX512()) {
14961 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
14962 DAG.getConstant(SSECC, DL, MVT::i8));
14963 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
14966 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
14967 DAG.getConstant(SSECC, DL, MVT::i8));
14969 // If we have AVX, we can use a variable vector select (VBLENDV) instead
14970 // of 3 logic instructions for size savings and potentially speed.
14971 // Unfortunately, there is no scalar form of VBLENDV.
14973 // If either operand is a constant, don't try this. We can expect to
14974 // optimize away at least one of the logic instructions later in that
14975 // case, so that sequence would be faster than a variable blend.
14977 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
14978 // uses XMM0 as the selection register. That may need just as many
14979 // instructions as the AND/ANDN/OR sequence due to register moves, so
14982 if (Subtarget->hasAVX() &&
14983 !isa<ConstantFPSDNode>(Op1) && !isa<ConstantFPSDNode>(Op2)) {
14985 // Convert to vectors, do a VSELECT, and convert back to scalar.
14986 // All of the conversions should be optimized away.
14988 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
14989 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
14990 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
14991 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
14993 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
14994 VCmp = DAG.getBitcast(VCmpVT, VCmp);
14996 SDValue VSel = DAG.getNode(ISD::VSELECT, DL, VecVT, VCmp, VOp1, VOp2);
14998 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
14999 VSel, DAG.getIntPtrConstant(0, DL));
15001 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
15002 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
15003 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
15007 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
15009 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
15010 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
15011 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
15012 Op1Scalar = Op1.getOperand(0);
15014 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
15015 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
15016 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
15017 Op2Scalar = Op2.getOperand(0);
15018 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
15019 SDValue newSelect = DAG.getNode(ISD::SELECT, DL,
15020 Op1Scalar.getValueType(),
15021 Cond, Op1Scalar, Op2Scalar);
15022 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
15023 return DAG.getBitcast(VT, newSelect);
15024 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
15025 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
15026 DAG.getIntPtrConstant(0, DL));
15030 if (VT == MVT::v4i1 || VT == MVT::v2i1) {
15031 SDValue zeroConst = DAG.getIntPtrConstant(0, DL);
15032 Op1 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
15033 DAG.getUNDEF(MVT::v8i1), Op1, zeroConst);
15034 Op2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
15035 DAG.getUNDEF(MVT::v8i1), Op2, zeroConst);
15036 SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::v8i1,
15038 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, newSelect, zeroConst);
15041 if (Cond.getOpcode() == ISD::SETCC) {
15042 SDValue NewCond = LowerSETCC(Cond, DAG);
15043 if (NewCond.getNode())
15047 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
15048 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
15049 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
15050 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
15051 if (Cond.getOpcode() == X86ISD::SETCC &&
15052 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
15053 isNullConstant(Cond.getOperand(1).getOperand(1))) {
15054 SDValue Cmp = Cond.getOperand(1);
15056 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
15058 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
15059 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
15060 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
15062 SDValue CmpOp0 = Cmp.getOperand(0);
15063 // Apply further optimizations for special cases
15064 // (select (x != 0), -1, 0) -> neg & sbb
15065 // (select (x == 0), 0, -1) -> neg & sbb
15066 if (isNullConstant(Y) &&
15067 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
15068 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
15069 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
15070 DAG.getConstant(0, DL,
15071 CmpOp0.getValueType()),
15073 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15074 DAG.getConstant(X86::COND_B, DL, MVT::i8),
15075 SDValue(Neg.getNode(), 1));
15079 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
15080 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
15081 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15083 SDValue Res = // Res = 0 or -1.
15084 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15085 DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp);
15087 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
15088 Res = DAG.getNOT(DL, Res, Res.getValueType());
15090 if (!isNullConstant(Op2))
15091 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
15096 // Look past (and (setcc_carry (cmp ...)), 1).
15097 if (Cond.getOpcode() == ISD::AND &&
15098 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
15099 isOneConstant(Cond.getOperand(1)))
15100 Cond = Cond.getOperand(0);
15102 // If condition flag is set by a X86ISD::CMP, then use it as the condition
15103 // setting operand in place of the X86ISD::SETCC.
15104 unsigned CondOpcode = Cond.getOpcode();
15105 if (CondOpcode == X86ISD::SETCC ||
15106 CondOpcode == X86ISD::SETCC_CARRY) {
15107 CC = Cond.getOperand(0);
15109 SDValue Cmp = Cond.getOperand(1);
15110 unsigned Opc = Cmp.getOpcode();
15111 MVT VT = Op.getSimpleValueType();
15113 bool IllegalFPCMov = false;
15114 if (VT.isFloatingPoint() && !VT.isVector() &&
15115 !isScalarFPTypeInSSEReg(VT)) // FPStack?
15116 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
15118 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
15119 Opc == X86ISD::BT) { // FIXME
15123 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
15124 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
15125 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
15126 Cond.getOperand(0).getValueType() != MVT::i8)) {
15127 SDValue LHS = Cond.getOperand(0);
15128 SDValue RHS = Cond.getOperand(1);
15129 unsigned X86Opcode;
15132 switch (CondOpcode) {
15133 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
15134 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
15135 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
15136 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
15137 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
15138 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
15139 default: llvm_unreachable("unexpected overflowing operator");
15141 if (CondOpcode == ISD::UMULO)
15142 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
15145 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15147 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
15149 if (CondOpcode == ISD::UMULO)
15150 Cond = X86Op.getValue(2);
15152 Cond = X86Op.getValue(1);
15154 CC = DAG.getConstant(X86Cond, DL, MVT::i8);
15159 // Look past the truncate if the high bits are known zero.
15160 if (isTruncWithZeroHighBitsInput(Cond, DAG))
15161 Cond = Cond.getOperand(0);
15163 // We know the result of AND is compared against zero. Try to match
15165 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
15166 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
15167 CC = NewSetCC.getOperand(0);
15168 Cond = NewSetCC.getOperand(1);
15175 CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
15176 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
15179 // a < b ? -1 : 0 -> RES = ~setcc_carry
15180 // a < b ? 0 : -1 -> RES = setcc_carry
15181 // a >= b ? -1 : 0 -> RES = setcc_carry
15182 // a >= b ? 0 : -1 -> RES = ~setcc_carry
15183 if (Cond.getOpcode() == X86ISD::SUB) {
15184 Cond = ConvertCmpIfNecessary(Cond, DAG);
15185 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
15187 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
15188 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
15189 (isNullConstant(Op1) || isNullConstant(Op2))) {
15190 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15191 DAG.getConstant(X86::COND_B, DL, MVT::i8),
15193 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
15194 return DAG.getNOT(DL, Res, Res.getValueType());
15199 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
15200 // widen the cmov and push the truncate through. This avoids introducing a new
15201 // branch during isel and doesn't add any extensions.
15202 if (Op.getValueType() == MVT::i8 &&
15203 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
15204 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
15205 if (T1.getValueType() == T2.getValueType() &&
15206 // Blacklist CopyFromReg to avoid partial register stalls.
15207 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
15208 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
15209 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
15210 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
15214 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
15215 // condition is true.
15216 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
15217 SDValue Ops[] = { Op2, Op1, CC, Cond };
15218 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
15221 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
15222 const X86Subtarget *Subtarget,
15223 SelectionDAG &DAG) {
15224 MVT VT = Op->getSimpleValueType(0);
15225 SDValue In = Op->getOperand(0);
15226 MVT InVT = In.getSimpleValueType();
15227 MVT VTElt = VT.getVectorElementType();
15228 MVT InVTElt = InVT.getVectorElementType();
15232 if ((InVTElt == MVT::i1) &&
15233 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
15234 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
15236 ((Subtarget->hasBWI() && VT.is512BitVector() &&
15237 VTElt.getSizeInBits() <= 16)) ||
15239 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
15240 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
15242 ((Subtarget->hasDQI() && VT.is512BitVector() &&
15243 VTElt.getSizeInBits() >= 32))))
15244 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15246 unsigned int NumElts = VT.getVectorNumElements();
15248 if (NumElts != 8 && NumElts != 16 && !Subtarget->hasBWI())
15251 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
15252 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
15253 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
15254 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15257 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
15258 MVT ExtVT = NumElts == 8 ? MVT::v8i64 : MVT::v16i32;
15260 DAG.getConstant(APInt::getAllOnesValue(ExtVT.getScalarSizeInBits()), dl,
15263 DAG.getConstant(APInt::getNullValue(ExtVT.getScalarSizeInBits()), dl, ExtVT);
15265 SDValue V = DAG.getNode(ISD::VSELECT, dl, ExtVT, In, NegOne, Zero);
15266 if (VT.is512BitVector())
15268 return DAG.getNode(X86ISD::VTRUNC, dl, VT, V);
15271 static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
15272 const X86Subtarget *Subtarget,
15273 SelectionDAG &DAG) {
15274 SDValue In = Op->getOperand(0);
15275 MVT VT = Op->getSimpleValueType(0);
15276 MVT InVT = In.getSimpleValueType();
15277 assert(VT.getSizeInBits() == InVT.getSizeInBits());
15279 MVT InSVT = InVT.getVectorElementType();
15280 assert(VT.getVectorElementType().getSizeInBits() > InSVT.getSizeInBits());
15282 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
15284 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
15289 // SSE41 targets can use the pmovsx* instructions directly.
15290 if (Subtarget->hasSSE41())
15291 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15293 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
15297 // As SRAI is only available on i16/i32 types, we expand only up to i32
15298 // and handle i64 separately.
15299 while (CurrVT != VT && CurrVT.getVectorElementType() != MVT::i32) {
15300 Curr = DAG.getNode(X86ISD::UNPCKL, dl, CurrVT, DAG.getUNDEF(CurrVT), Curr);
15301 MVT CurrSVT = MVT::getIntegerVT(CurrVT.getScalarSizeInBits() * 2);
15302 CurrVT = MVT::getVectorVT(CurrSVT, CurrVT.getVectorNumElements() / 2);
15303 Curr = DAG.getBitcast(CurrVT, Curr);
15306 SDValue SignExt = Curr;
15307 if (CurrVT != InVT) {
15308 unsigned SignExtShift =
15309 CurrVT.getVectorElementType().getSizeInBits() - InSVT.getSizeInBits();
15310 SignExt = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
15311 DAG.getConstant(SignExtShift, dl, MVT::i8));
15317 if (VT == MVT::v2i64 && CurrVT == MVT::v4i32) {
15318 SDValue Sign = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
15319 DAG.getConstant(31, dl, MVT::i8));
15320 SDValue Ext = DAG.getVectorShuffle(CurrVT, dl, SignExt, Sign, {0, 4, 1, 5});
15321 return DAG.getBitcast(VT, Ext);
15327 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
15328 SelectionDAG &DAG) {
15329 MVT VT = Op->getSimpleValueType(0);
15330 SDValue In = Op->getOperand(0);
15331 MVT InVT = In.getSimpleValueType();
15334 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
15335 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
15337 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
15338 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
15339 (VT != MVT::v16i16 || InVT != MVT::v16i8))
15342 if (Subtarget->hasInt256())
15343 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15345 // Optimize vectors in AVX mode
15346 // Sign extend v8i16 to v8i32 and
15349 // Divide input vector into two parts
15350 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
15351 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
15352 // concat the vectors to original VT
15354 unsigned NumElems = InVT.getVectorNumElements();
15355 SDValue Undef = DAG.getUNDEF(InVT);
15357 SmallVector<int,8> ShufMask1(NumElems, -1);
15358 for (unsigned i = 0; i != NumElems/2; ++i)
15361 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
15363 SmallVector<int,8> ShufMask2(NumElems, -1);
15364 for (unsigned i = 0; i != NumElems/2; ++i)
15365 ShufMask2[i] = i + NumElems/2;
15367 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
15369 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
15370 VT.getVectorNumElements()/2);
15372 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
15373 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
15375 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
15378 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
15379 // may emit an illegal shuffle but the expansion is still better than scalar
15380 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
15381 // we'll emit a shuffle and a arithmetic shift.
15382 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
15383 // TODO: It is possible to support ZExt by zeroing the undef values during
15384 // the shuffle phase or after the shuffle.
15385 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
15386 SelectionDAG &DAG) {
15387 MVT RegVT = Op.getSimpleValueType();
15388 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
15389 assert(RegVT.isInteger() &&
15390 "We only custom lower integer vector sext loads.");
15392 // Nothing useful we can do without SSE2 shuffles.
15393 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
15395 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
15397 EVT MemVT = Ld->getMemoryVT();
15398 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15399 unsigned RegSz = RegVT.getSizeInBits();
15401 ISD::LoadExtType Ext = Ld->getExtensionType();
15403 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
15404 && "Only anyext and sext are currently implemented.");
15405 assert(MemVT != RegVT && "Cannot extend to the same type");
15406 assert(MemVT.isVector() && "Must load a vector from memory");
15408 unsigned NumElems = RegVT.getVectorNumElements();
15409 unsigned MemSz = MemVT.getSizeInBits();
15410 assert(RegSz > MemSz && "Register size must be greater than the mem size");
15412 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
15413 // The only way in which we have a legal 256-bit vector result but not the
15414 // integer 256-bit operations needed to directly lower a sextload is if we
15415 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
15416 // a 128-bit vector and a normal sign_extend to 256-bits that should get
15417 // correctly legalized. We do this late to allow the canonical form of
15418 // sextload to persist throughout the rest of the DAG combiner -- it wants
15419 // to fold together any extensions it can, and so will fuse a sign_extend
15420 // of an sextload into a sextload targeting a wider value.
15422 if (MemSz == 128) {
15423 // Just switch this to a normal load.
15424 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
15425 "it must be a legal 128-bit vector "
15427 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
15428 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
15429 Ld->isInvariant(), Ld->getAlignment());
15431 assert(MemSz < 128 &&
15432 "Can't extend a type wider than 128 bits to a 256 bit vector!");
15433 // Do an sext load to a 128-bit vector type. We want to use the same
15434 // number of elements, but elements half as wide. This will end up being
15435 // recursively lowered by this routine, but will succeed as we definitely
15436 // have all the necessary features if we're using AVX1.
15438 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
15439 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
15441 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
15442 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
15443 Ld->isNonTemporal(), Ld->isInvariant(),
15444 Ld->getAlignment());
15447 // Replace chain users with the new chain.
15448 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
15449 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
15451 // Finally, do a normal sign-extend to the desired register.
15452 return DAG.getSExtOrTrunc(Load, dl, RegVT);
15455 // All sizes must be a power of two.
15456 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
15457 "Non-power-of-two elements are not custom lowered!");
15459 // Attempt to load the original value using scalar loads.
15460 // Find the largest scalar type that divides the total loaded size.
15461 MVT SclrLoadTy = MVT::i8;
15462 for (MVT Tp : MVT::integer_valuetypes()) {
15463 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
15468 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
15469 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
15471 SclrLoadTy = MVT::f64;
15473 // Calculate the number of scalar loads that we need to perform
15474 // in order to load our vector from memory.
15475 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
15477 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
15478 "Can only lower sext loads with a single scalar load!");
15480 unsigned loadRegZize = RegSz;
15481 if (Ext == ISD::SEXTLOAD && RegSz >= 256)
15484 // Represent our vector as a sequence of elements which are the
15485 // largest scalar that we can load.
15486 EVT LoadUnitVecVT = EVT::getVectorVT(
15487 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
15489 // Represent the data using the same element type that is stored in
15490 // memory. In practice, we ''widen'' MemVT.
15492 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
15493 loadRegZize / MemVT.getScalarSizeInBits());
15495 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
15496 "Invalid vector type");
15498 // We can't shuffle using an illegal type.
15499 assert(TLI.isTypeLegal(WideVecVT) &&
15500 "We only lower types that form legal widened vector types");
15502 SmallVector<SDValue, 8> Chains;
15503 SDValue Ptr = Ld->getBasePtr();
15504 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, dl,
15505 TLI.getPointerTy(DAG.getDataLayout()));
15506 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
15508 for (unsigned i = 0; i < NumLoads; ++i) {
15509 // Perform a single load.
15510 SDValue ScalarLoad =
15511 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
15512 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
15513 Ld->getAlignment());
15514 Chains.push_back(ScalarLoad.getValue(1));
15515 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
15516 // another round of DAGCombining.
15518 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
15520 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
15521 ScalarLoad, DAG.getIntPtrConstant(i, dl));
15523 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15526 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
15528 // Bitcast the loaded value to a vector of the original element type, in
15529 // the size of the target vector type.
15530 SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res);
15531 unsigned SizeRatio = RegSz / MemSz;
15533 if (Ext == ISD::SEXTLOAD) {
15534 // If we have SSE4.1, we can directly emit a VSEXT node.
15535 if (Subtarget->hasSSE41()) {
15536 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
15537 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
15541 // Otherwise we'll use SIGN_EXTEND_VECTOR_INREG to sign extend the lowest
15543 assert(TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND_VECTOR_INREG, RegVT) &&
15544 "We can't implement a sext load without SIGN_EXTEND_VECTOR_INREG!");
15546 SDValue Shuff = DAG.getSignExtendVectorInReg(SlicedVec, dl, RegVT);
15547 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
15551 // Redistribute the loaded elements into the different locations.
15552 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
15553 for (unsigned i = 0; i != NumElems; ++i)
15554 ShuffleVec[i * SizeRatio] = i;
15556 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
15557 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
15559 // Bitcast to the requested type.
15560 Shuff = DAG.getBitcast(RegVT, Shuff);
15561 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
15565 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
15566 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
15567 // from the AND / OR.
15568 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
15569 Opc = Op.getOpcode();
15570 if (Opc != ISD::OR && Opc != ISD::AND)
15572 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
15573 Op.getOperand(0).hasOneUse() &&
15574 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
15575 Op.getOperand(1).hasOneUse());
15578 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
15579 // 1 and that the SETCC node has a single use.
15580 static bool isXor1OfSetCC(SDValue Op) {
15581 if (Op.getOpcode() != ISD::XOR)
15583 if (isOneConstant(Op.getOperand(1)))
15584 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
15585 Op.getOperand(0).hasOneUse();
15589 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
15590 bool addTest = true;
15591 SDValue Chain = Op.getOperand(0);
15592 SDValue Cond = Op.getOperand(1);
15593 SDValue Dest = Op.getOperand(2);
15596 bool Inverted = false;
15598 if (Cond.getOpcode() == ISD::SETCC) {
15599 // Check for setcc([su]{add,sub,mul}o == 0).
15600 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
15601 isNullConstant(Cond.getOperand(1)) &&
15602 Cond.getOperand(0).getResNo() == 1 &&
15603 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
15604 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
15605 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
15606 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
15607 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
15608 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
15610 Cond = Cond.getOperand(0);
15612 SDValue NewCond = LowerSETCC(Cond, DAG);
15613 if (NewCond.getNode())
15618 // FIXME: LowerXALUO doesn't handle these!!
15619 else if (Cond.getOpcode() == X86ISD::ADD ||
15620 Cond.getOpcode() == X86ISD::SUB ||
15621 Cond.getOpcode() == X86ISD::SMUL ||
15622 Cond.getOpcode() == X86ISD::UMUL)
15623 Cond = LowerXALUO(Cond, DAG);
15626 // Look pass (and (setcc_carry (cmp ...)), 1).
15627 if (Cond.getOpcode() == ISD::AND &&
15628 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
15629 isOneConstant(Cond.getOperand(1)))
15630 Cond = Cond.getOperand(0);
15632 // If condition flag is set by a X86ISD::CMP, then use it as the condition
15633 // setting operand in place of the X86ISD::SETCC.
15634 unsigned CondOpcode = Cond.getOpcode();
15635 if (CondOpcode == X86ISD::SETCC ||
15636 CondOpcode == X86ISD::SETCC_CARRY) {
15637 CC = Cond.getOperand(0);
15639 SDValue Cmp = Cond.getOperand(1);
15640 unsigned Opc = Cmp.getOpcode();
15641 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
15642 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
15646 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
15650 // These can only come from an arithmetic instruction with overflow,
15651 // e.g. SADDO, UADDO.
15652 Cond = Cond.getNode()->getOperand(1);
15658 CondOpcode = Cond.getOpcode();
15659 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
15660 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
15661 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
15662 Cond.getOperand(0).getValueType() != MVT::i8)) {
15663 SDValue LHS = Cond.getOperand(0);
15664 SDValue RHS = Cond.getOperand(1);
15665 unsigned X86Opcode;
15668 // Keep this in sync with LowerXALUO, otherwise we might create redundant
15669 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
15671 switch (CondOpcode) {
15672 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
15674 if (isOneConstant(RHS)) {
15675 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
15678 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
15679 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
15681 if (isOneConstant(RHS)) {
15682 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
15685 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
15686 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
15687 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
15688 default: llvm_unreachable("unexpected overflowing operator");
15691 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
15692 if (CondOpcode == ISD::UMULO)
15693 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
15696 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15698 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
15700 if (CondOpcode == ISD::UMULO)
15701 Cond = X86Op.getValue(2);
15703 Cond = X86Op.getValue(1);
15705 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
15709 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
15710 SDValue Cmp = Cond.getOperand(0).getOperand(1);
15711 if (CondOpc == ISD::OR) {
15712 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
15713 // two branches instead of an explicit OR instruction with a
15715 if (Cmp == Cond.getOperand(1).getOperand(1) &&
15716 isX86LogicalCmp(Cmp)) {
15717 CC = Cond.getOperand(0).getOperand(0);
15718 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
15719 Chain, Dest, CC, Cmp);
15720 CC = Cond.getOperand(1).getOperand(0);
15724 } else { // ISD::AND
15725 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
15726 // two branches instead of an explicit AND instruction with a
15727 // separate test. However, we only do this if this block doesn't
15728 // have a fall-through edge, because this requires an explicit
15729 // jmp when the condition is false.
15730 if (Cmp == Cond.getOperand(1).getOperand(1) &&
15731 isX86LogicalCmp(Cmp) &&
15732 Op.getNode()->hasOneUse()) {
15733 X86::CondCode CCode =
15734 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
15735 CCode = X86::GetOppositeBranchCondition(CCode);
15736 CC = DAG.getConstant(CCode, dl, MVT::i8);
15737 SDNode *User = *Op.getNode()->use_begin();
15738 // Look for an unconditional branch following this conditional branch.
15739 // We need this because we need to reverse the successors in order
15740 // to implement FCMP_OEQ.
15741 if (User->getOpcode() == ISD::BR) {
15742 SDValue FalseBB = User->getOperand(1);
15744 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
15745 assert(NewBR == User);
15749 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
15750 Chain, Dest, CC, Cmp);
15751 X86::CondCode CCode =
15752 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
15753 CCode = X86::GetOppositeBranchCondition(CCode);
15754 CC = DAG.getConstant(CCode, dl, MVT::i8);
15760 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
15761 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
15762 // It should be transformed during dag combiner except when the condition
15763 // is set by a arithmetics with overflow node.
15764 X86::CondCode CCode =
15765 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
15766 CCode = X86::GetOppositeBranchCondition(CCode);
15767 CC = DAG.getConstant(CCode, dl, MVT::i8);
15768 Cond = Cond.getOperand(0).getOperand(1);
15770 } else if (Cond.getOpcode() == ISD::SETCC &&
15771 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
15772 // For FCMP_OEQ, we can emit
15773 // two branches instead of an explicit AND instruction with a
15774 // separate test. However, we only do this if this block doesn't
15775 // have a fall-through edge, because this requires an explicit
15776 // jmp when the condition is false.
15777 if (Op.getNode()->hasOneUse()) {
15778 SDNode *User = *Op.getNode()->use_begin();
15779 // Look for an unconditional branch following this conditional branch.
15780 // We need this because we need to reverse the successors in order
15781 // to implement FCMP_OEQ.
15782 if (User->getOpcode() == ISD::BR) {
15783 SDValue FalseBB = User->getOperand(1);
15785 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
15786 assert(NewBR == User);
15790 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
15791 Cond.getOperand(0), Cond.getOperand(1));
15792 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15793 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
15794 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
15795 Chain, Dest, CC, Cmp);
15796 CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
15801 } else if (Cond.getOpcode() == ISD::SETCC &&
15802 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
15803 // For FCMP_UNE, we can emit
15804 // two branches instead of an explicit AND instruction with a
15805 // separate test. However, we only do this if this block doesn't
15806 // have a fall-through edge, because this requires an explicit
15807 // jmp when the condition is false.
15808 if (Op.getNode()->hasOneUse()) {
15809 SDNode *User = *Op.getNode()->use_begin();
15810 // Look for an unconditional branch following this conditional branch.
15811 // We need this because we need to reverse the successors in order
15812 // to implement FCMP_UNE.
15813 if (User->getOpcode() == ISD::BR) {
15814 SDValue FalseBB = User->getOperand(1);
15816 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
15817 assert(NewBR == User);
15820 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
15821 Cond.getOperand(0), Cond.getOperand(1));
15822 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15823 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
15824 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
15825 Chain, Dest, CC, Cmp);
15826 CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8);
15836 // Look pass the truncate if the high bits are known zero.
15837 if (isTruncWithZeroHighBitsInput(Cond, DAG))
15838 Cond = Cond.getOperand(0);
15840 // We know the result of AND is compared against zero. Try to match
15842 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
15843 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG)) {
15844 CC = NewSetCC.getOperand(0);
15845 Cond = NewSetCC.getOperand(1);
15852 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
15853 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
15854 Cond = EmitTest(Cond, X86Cond, dl, DAG);
15856 Cond = ConvertCmpIfNecessary(Cond, DAG);
15857 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
15858 Chain, Dest, CC, Cond);
15861 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
15862 // Calls to _alloca are needed to probe the stack when allocating more than 4k
15863 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
15864 // that the guard pages used by the OS virtual memory manager are allocated in
15865 // correct sequence.
15867 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
15868 SelectionDAG &DAG) const {
15869 MachineFunction &MF = DAG.getMachineFunction();
15870 bool SplitStack = MF.shouldSplitStack();
15871 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
15876 SDNode *Node = Op.getNode();
15877 SDValue Chain = Op.getOperand(0);
15878 SDValue Size = Op.getOperand(1);
15879 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
15880 EVT VT = Node->getValueType(0);
15882 // Chain the dynamic stack allocation so that it doesn't modify the stack
15883 // pointer when other instructions are using the stack.
15884 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, dl, true), dl);
15886 bool Is64Bit = Subtarget->is64Bit();
15887 MVT SPTy = getPointerTy(DAG.getDataLayout());
15891 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15892 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
15893 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
15894 " not tell us which reg is the stack pointer!");
15895 EVT VT = Node->getValueType(0);
15896 SDValue Tmp3 = Node->getOperand(2);
15898 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
15899 Chain = SP.getValue(1);
15900 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
15901 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
15902 unsigned StackAlign = TFI.getStackAlignment();
15903 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
15904 if (Align > StackAlign)
15905 Result = DAG.getNode(ISD::AND, dl, VT, Result,
15906 DAG.getConstant(-(uint64_t)Align, dl, VT));
15907 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
15908 } else if (SplitStack) {
15909 MachineRegisterInfo &MRI = MF.getRegInfo();
15912 // The 64 bit implementation of segmented stacks needs to clobber both r10
15913 // r11. This makes it impossible to use it along with nested parameters.
15914 const Function *F = MF.getFunction();
15916 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
15918 if (I->hasNestAttr())
15919 report_fatal_error("Cannot use segmented stacks with functions that "
15920 "have nested arguments.");
15923 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
15924 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
15925 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
15926 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
15927 DAG.getRegister(Vreg, SPTy));
15930 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
15932 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
15933 Flag = Chain.getValue(1);
15934 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
15936 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
15938 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
15939 unsigned SPReg = RegInfo->getStackRegister();
15940 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
15941 Chain = SP.getValue(1);
15944 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
15945 DAG.getConstant(-(uint64_t)Align, dl, VT));
15946 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
15952 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
15953 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
15955 SDValue Ops[2] = {Result, Chain};
15956 return DAG.getMergeValues(Ops, dl);
15959 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
15960 MachineFunction &MF = DAG.getMachineFunction();
15961 auto PtrVT = getPointerTy(MF.getDataLayout());
15962 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
15964 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
15967 if (!Subtarget->is64Bit() ||
15968 Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv())) {
15969 // vastart just stores the address of the VarArgsFrameIndex slot into the
15970 // memory location argument.
15971 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
15972 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
15973 MachinePointerInfo(SV), false, false, 0);
15977 // gp_offset (0 - 6 * 8)
15978 // fp_offset (48 - 48 + 8 * 16)
15979 // overflow_arg_area (point to parameters coming in memory).
15981 SmallVector<SDValue, 8> MemOps;
15982 SDValue FIN = Op.getOperand(1);
15984 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
15985 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
15987 FIN, MachinePointerInfo(SV), false, false, 0);
15988 MemOps.push_back(Store);
15991 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
15992 Store = DAG.getStore(Op.getOperand(0), DL,
15993 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL,
15995 FIN, MachinePointerInfo(SV, 4), false, false, 0);
15996 MemOps.push_back(Store);
15998 // Store ptr to overflow_arg_area
15999 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
16000 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
16001 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
16002 MachinePointerInfo(SV, 8),
16004 MemOps.push_back(Store);
16006 // Store ptr to reg_save_area.
16007 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
16008 Subtarget->isTarget64BitLP64() ? 8 : 4, DL));
16009 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
16010 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, MachinePointerInfo(
16011 SV, Subtarget->isTarget64BitLP64() ? 16 : 12), false, false, 0);
16012 MemOps.push_back(Store);
16013 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16016 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16017 assert(Subtarget->is64Bit() &&
16018 "LowerVAARG only handles 64-bit va_arg!");
16019 assert(Op.getNode()->getNumOperands() == 4);
16021 MachineFunction &MF = DAG.getMachineFunction();
16022 if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
16023 // The Win64 ABI uses char* instead of a structure.
16024 return DAG.expandVAArg(Op.getNode());
16026 SDValue Chain = Op.getOperand(0);
16027 SDValue SrcPtr = Op.getOperand(1);
16028 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16029 unsigned Align = Op.getConstantOperandVal(3);
16032 EVT ArgVT = Op.getNode()->getValueType(0);
16033 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16034 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
16037 // Decide which area this value should be read from.
16038 // TODO: Implement the AMD64 ABI in its entirety. This simple
16039 // selection mechanism works only for the basic types.
16040 if (ArgVT == MVT::f80) {
16041 llvm_unreachable("va_arg for f80 not yet implemented");
16042 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
16043 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
16044 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
16045 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
16047 llvm_unreachable("Unhandled argument type in LowerVAARG");
16050 if (ArgMode == 2) {
16051 // Sanity Check: Make sure using fp_offset makes sense.
16052 assert(!Subtarget->useSoftFloat() &&
16053 !(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
16054 Subtarget->hasSSE1());
16057 // Insert VAARG_64 node into the DAG
16058 // VAARG_64 returns two values: Variable Argument Address, Chain
16059 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
16060 DAG.getConstant(ArgMode, dl, MVT::i8),
16061 DAG.getConstant(Align, dl, MVT::i32)};
16062 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
16063 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
16064 VTs, InstOps, MVT::i64,
16065 MachinePointerInfo(SV),
16067 /*Volatile=*/false,
16069 /*WriteMem=*/true);
16070 Chain = VAARG.getValue(1);
16072 // Load the next argument and return it
16073 return DAG.getLoad(ArgVT, dl,
16076 MachinePointerInfo(),
16077 false, false, false, 0);
16080 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
16081 SelectionDAG &DAG) {
16082 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
16083 // where a va_list is still an i8*.
16084 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
16085 if (Subtarget->isCallingConvWin64(
16086 DAG.getMachineFunction().getFunction()->getCallingConv()))
16087 // Probably a Win64 va_copy.
16088 return DAG.expandVACopy(Op.getNode());
16090 SDValue Chain = Op.getOperand(0);
16091 SDValue DstPtr = Op.getOperand(1);
16092 SDValue SrcPtr = Op.getOperand(2);
16093 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
16094 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
16097 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
16098 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
16100 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
16103 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
16104 // amount is a constant. Takes immediate version of shift as input.
16105 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
16106 SDValue SrcOp, uint64_t ShiftAmt,
16107 SelectionDAG &DAG) {
16108 MVT ElementType = VT.getVectorElementType();
16110 // Fold this packed shift into its first operand if ShiftAmt is 0.
16114 // Check for ShiftAmt >= element width
16115 if (ShiftAmt >= ElementType.getSizeInBits()) {
16116 if (Opc == X86ISD::VSRAI)
16117 ShiftAmt = ElementType.getSizeInBits() - 1;
16119 return DAG.getConstant(0, dl, VT);
16122 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
16123 && "Unknown target vector shift-by-constant node");
16125 // Fold this packed vector shift into a build vector if SrcOp is a
16126 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
16127 if (VT == SrcOp.getSimpleValueType() &&
16128 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
16129 SmallVector<SDValue, 8> Elts;
16130 unsigned NumElts = SrcOp->getNumOperands();
16131 ConstantSDNode *ND;
16134 default: llvm_unreachable(nullptr);
16135 case X86ISD::VSHLI:
16136 for (unsigned i=0; i!=NumElts; ++i) {
16137 SDValue CurrentOp = SrcOp->getOperand(i);
16138 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16139 Elts.push_back(CurrentOp);
16142 ND = cast<ConstantSDNode>(CurrentOp);
16143 const APInt &C = ND->getAPIntValue();
16144 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
16147 case X86ISD::VSRLI:
16148 for (unsigned i=0; i!=NumElts; ++i) {
16149 SDValue CurrentOp = SrcOp->getOperand(i);
16150 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16151 Elts.push_back(CurrentOp);
16154 ND = cast<ConstantSDNode>(CurrentOp);
16155 const APInt &C = ND->getAPIntValue();
16156 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
16159 case X86ISD::VSRAI:
16160 for (unsigned i=0; i!=NumElts; ++i) {
16161 SDValue CurrentOp = SrcOp->getOperand(i);
16162 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16163 Elts.push_back(CurrentOp);
16166 ND = cast<ConstantSDNode>(CurrentOp);
16167 const APInt &C = ND->getAPIntValue();
16168 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
16173 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
16176 return DAG.getNode(Opc, dl, VT, SrcOp,
16177 DAG.getConstant(ShiftAmt, dl, MVT::i8));
16180 // getTargetVShiftNode - Handle vector element shifts where the shift amount
16181 // may or may not be a constant. Takes immediate version of shift as input.
16182 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
16183 SDValue SrcOp, SDValue ShAmt,
16184 SelectionDAG &DAG) {
16185 MVT SVT = ShAmt.getSimpleValueType();
16186 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
16188 // Catch shift-by-constant.
16189 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
16190 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
16191 CShAmt->getZExtValue(), DAG);
16193 // Change opcode to non-immediate version
16195 default: llvm_unreachable("Unknown target vector shift node");
16196 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
16197 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
16198 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
16201 const X86Subtarget &Subtarget =
16202 static_cast<const X86Subtarget &>(DAG.getSubtarget());
16203 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
16204 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
16205 // Let the shuffle legalizer expand this shift amount node.
16206 SDValue Op0 = ShAmt.getOperand(0);
16207 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
16208 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
16210 // Need to build a vector containing shift amount.
16211 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
16212 SmallVector<SDValue, 4> ShOps;
16213 ShOps.push_back(ShAmt);
16214 if (SVT == MVT::i32) {
16215 ShOps.push_back(DAG.getConstant(0, dl, SVT));
16216 ShOps.push_back(DAG.getUNDEF(SVT));
16218 ShOps.push_back(DAG.getUNDEF(SVT));
16220 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
16221 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
16224 // The return type has to be a 128-bit type with the same element
16225 // type as the input type.
16226 MVT EltVT = VT.getVectorElementType();
16227 MVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
16229 ShAmt = DAG.getBitcast(ShVT, ShAmt);
16230 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
16233 /// \brief Return Mask with the necessary casting or extending
16234 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
16235 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
16236 const X86Subtarget *Subtarget,
16237 SelectionDAG &DAG, SDLoc dl) {
16239 if (MaskVT.bitsGT(Mask.getSimpleValueType())) {
16240 // Mask should be extended
16241 Mask = DAG.getNode(ISD::ANY_EXTEND, dl,
16242 MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask);
16245 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget->is32Bit()) {
16246 if (MaskVT == MVT::v64i1) {
16247 assert(Subtarget->hasBWI() && "Expected AVX512BW target!");
16248 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
16250 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
16251 DAG.getConstant(0, dl, MVT::i32));
16252 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
16253 DAG.getConstant(1, dl, MVT::i32));
16255 Lo = DAG.getBitcast(MVT::v32i1, Lo);
16256 Hi = DAG.getBitcast(MVT::v32i1, Hi);
16258 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
16260 // MaskVT require < 64bit. Truncate mask (should succeed in any case),
16262 MVT TruncVT = MVT::getIntegerVT(MaskVT.getSizeInBits());
16263 return DAG.getBitcast(MaskVT,
16264 DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Mask));
16268 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
16269 Mask.getSimpleValueType().getSizeInBits());
16270 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
16271 // are extracted by EXTRACT_SUBVECTOR.
16272 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
16273 DAG.getBitcast(BitcastVT, Mask),
16274 DAG.getIntPtrConstant(0, dl));
16278 /// \brief Return (and \p Op, \p Mask) for compare instructions or
16279 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
16280 /// necessary casting or extending for \p Mask when lowering masking intrinsics
16281 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
16282 SDValue PreservedSrc,
16283 const X86Subtarget *Subtarget,
16284 SelectionDAG &DAG) {
16285 MVT VT = Op.getSimpleValueType();
16286 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
16287 unsigned OpcodeSelect = ISD::VSELECT;
16290 if (isAllOnesConstant(Mask))
16293 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
16295 switch (Op.getOpcode()) {
16297 case X86ISD::PCMPEQM:
16298 case X86ISD::PCMPGTM:
16300 case X86ISD::CMPMU:
16301 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
16302 case X86ISD::VFPCLASS:
16303 case X86ISD::VFPCLASSS:
16304 return DAG.getNode(ISD::OR, dl, VT, Op, VMask);
16305 case X86ISD::VTRUNC:
16306 case X86ISD::VTRUNCS:
16307 case X86ISD::VTRUNCUS:
16308 // We can't use ISD::VSELECT here because it is not always "Legal"
16309 // for the destination type. For example vpmovqb require only AVX512
16310 // and vselect that can operate on byte element type require BWI
16311 OpcodeSelect = X86ISD::SELECT;
16314 if (PreservedSrc.getOpcode() == ISD::UNDEF)
16315 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
16316 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
16319 /// \brief Creates an SDNode for a predicated scalar operation.
16320 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
16321 /// The mask is coming as MVT::i8 and it should be truncated
16322 /// to MVT::i1 while lowering masking intrinsics.
16323 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
16324 /// "X86select" instead of "vselect". We just can't create the "vselect" node
16325 /// for a scalar instruction.
16326 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
16327 SDValue PreservedSrc,
16328 const X86Subtarget *Subtarget,
16329 SelectionDAG &DAG) {
16330 if (isAllOnesConstant(Mask))
16333 MVT VT = Op.getSimpleValueType();
16335 // The mask should be of type MVT::i1
16336 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
16338 if (Op.getOpcode() == X86ISD::FSETCC)
16339 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
16340 if (Op.getOpcode() == X86ISD::VFPCLASS ||
16341 Op.getOpcode() == X86ISD::VFPCLASSS)
16342 return DAG.getNode(ISD::OR, dl, VT, Op, IMask);
16344 if (PreservedSrc.getOpcode() == ISD::UNDEF)
16345 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
16346 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
16349 static int getSEHRegistrationNodeSize(const Function *Fn) {
16350 if (!Fn->hasPersonalityFn())
16351 report_fatal_error(
16352 "querying registration node size for function without personality");
16353 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
16354 // WinEHStatePass for the full struct definition.
16355 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
16356 case EHPersonality::MSVC_X86SEH: return 24;
16357 case EHPersonality::MSVC_CXX: return 16;
16360 report_fatal_error(
16361 "can only recover FP for 32-bit MSVC EH personality functions");
16364 /// When the MSVC runtime transfers control to us, either to an outlined
16365 /// function or when returning to a parent frame after catching an exception, we
16366 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
16367 /// Here's the math:
16368 /// RegNodeBase = EntryEBP - RegNodeSize
16369 /// ParentFP = RegNodeBase - ParentFrameOffset
16370 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
16371 /// subtracting the offset (negative on x86) takes us back to the parent FP.
16372 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
16373 SDValue EntryEBP) {
16374 MachineFunction &MF = DAG.getMachineFunction();
16377 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16378 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
16380 // It's possible that the parent function no longer has a personality function
16381 // if the exceptional code was optimized away, in which case we just return
16382 // the incoming EBP.
16383 if (!Fn->hasPersonalityFn())
16386 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
16387 // registration, or the .set_setframe offset.
16388 MCSymbol *OffsetSym =
16389 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
16390 GlobalValue::getRealLinkageName(Fn->getName()));
16391 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
16392 SDValue ParentFrameOffset =
16393 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
16395 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
16396 // prologue to RBP in the parent function.
16397 const X86Subtarget &Subtarget =
16398 static_cast<const X86Subtarget &>(DAG.getSubtarget());
16399 if (Subtarget.is64Bit())
16400 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
16402 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
16403 // RegNodeBase = EntryEBP - RegNodeSize
16404 // ParentFP = RegNodeBase - ParentFrameOffset
16405 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
16406 DAG.getConstant(RegNodeSize, dl, PtrVT));
16407 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
16410 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
16411 SelectionDAG &DAG) {
16413 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
16414 MVT VT = Op.getSimpleValueType();
16415 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
16417 switch(IntrData->Type) {
16418 case INTR_TYPE_1OP:
16419 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
16420 case INTR_TYPE_2OP:
16421 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
16423 case INTR_TYPE_2OP_IMM8:
16424 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
16425 DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(2)));
16426 case INTR_TYPE_3OP:
16427 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
16428 Op.getOperand(2), Op.getOperand(3));
16429 case INTR_TYPE_4OP:
16430 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
16431 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
16432 case INTR_TYPE_1OP_MASK_RM: {
16433 SDValue Src = Op.getOperand(1);
16434 SDValue PassThru = Op.getOperand(2);
16435 SDValue Mask = Op.getOperand(3);
16436 SDValue RoundingMode;
16437 // We allways add rounding mode to the Node.
16438 // If the rounding mode is not specified, we add the
16439 // "current direction" mode.
16440 if (Op.getNumOperands() == 4)
16442 DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
16444 RoundingMode = Op.getOperand(4);
16445 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
16446 if (IntrWithRoundingModeOpcode != 0)
16447 if (cast<ConstantSDNode>(RoundingMode)->getZExtValue() !=
16448 X86::STATIC_ROUNDING::CUR_DIRECTION)
16449 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
16450 dl, Op.getValueType(), Src, RoundingMode),
16451 Mask, PassThru, Subtarget, DAG);
16452 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
16454 Mask, PassThru, Subtarget, DAG);
16456 case INTR_TYPE_1OP_MASK: {
16457 SDValue Src = Op.getOperand(1);
16458 SDValue PassThru = Op.getOperand(2);
16459 SDValue Mask = Op.getOperand(3);
16460 // We add rounding mode to the Node when
16461 // - RM Opcode is specified and
16462 // - RM is not "current direction".
16463 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
16464 if (IntrWithRoundingModeOpcode != 0) {
16465 SDValue Rnd = Op.getOperand(4);
16466 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
16467 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
16468 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
16469 dl, Op.getValueType(),
16471 Mask, PassThru, Subtarget, DAG);
16474 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
16475 Mask, PassThru, Subtarget, DAG);
16477 case INTR_TYPE_SCALAR_MASK: {
16478 SDValue Src1 = Op.getOperand(1);
16479 SDValue Src2 = Op.getOperand(2);
16480 SDValue passThru = Op.getOperand(3);
16481 SDValue Mask = Op.getOperand(4);
16482 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2),
16483 Mask, passThru, Subtarget, DAG);
16485 case INTR_TYPE_SCALAR_MASK_RM: {
16486 SDValue Src1 = Op.getOperand(1);
16487 SDValue Src2 = Op.getOperand(2);
16488 SDValue Src0 = Op.getOperand(3);
16489 SDValue Mask = Op.getOperand(4);
16490 // There are 2 kinds of intrinsics in this group:
16491 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
16492 // (2) With rounding mode and sae - 7 operands.
16493 if (Op.getNumOperands() == 6) {
16494 SDValue Sae = Op.getOperand(5);
16495 unsigned Opc = IntrData->Opc1 ? IntrData->Opc1 : IntrData->Opc0;
16496 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2,
16498 Mask, Src0, Subtarget, DAG);
16500 assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
16501 SDValue RoundingMode = Op.getOperand(5);
16502 SDValue Sae = Op.getOperand(6);
16503 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
16504 RoundingMode, Sae),
16505 Mask, Src0, Subtarget, DAG);
16507 case INTR_TYPE_2OP_MASK:
16508 case INTR_TYPE_2OP_IMM8_MASK: {
16509 SDValue Src1 = Op.getOperand(1);
16510 SDValue Src2 = Op.getOperand(2);
16511 SDValue PassThru = Op.getOperand(3);
16512 SDValue Mask = Op.getOperand(4);
16514 if (IntrData->Type == INTR_TYPE_2OP_IMM8_MASK)
16515 Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2);
16517 // We specify 2 possible opcodes for intrinsics with rounding modes.
16518 // First, we check if the intrinsic may have non-default rounding mode,
16519 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
16520 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
16521 if (IntrWithRoundingModeOpcode != 0) {
16522 SDValue Rnd = Op.getOperand(5);
16523 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
16524 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
16525 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
16526 dl, Op.getValueType(),
16528 Mask, PassThru, Subtarget, DAG);
16531 // TODO: Intrinsics should have fast-math-flags to propagate.
16532 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src1,Src2),
16533 Mask, PassThru, Subtarget, DAG);
16535 case INTR_TYPE_2OP_MASK_RM: {
16536 SDValue Src1 = Op.getOperand(1);
16537 SDValue Src2 = Op.getOperand(2);
16538 SDValue PassThru = Op.getOperand(3);
16539 SDValue Mask = Op.getOperand(4);
16540 // We specify 2 possible modes for intrinsics, with/without rounding
16542 // First, we check if the intrinsic have rounding mode (6 operands),
16543 // if not, we set rounding mode to "current".
16545 if (Op.getNumOperands() == 6)
16546 Rnd = Op.getOperand(5);
16548 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
16549 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
16551 Mask, PassThru, Subtarget, DAG);
16553 case INTR_TYPE_3OP_SCALAR_MASK_RM: {
16554 SDValue Src1 = Op.getOperand(1);
16555 SDValue Src2 = Op.getOperand(2);
16556 SDValue Src3 = Op.getOperand(3);
16557 SDValue PassThru = Op.getOperand(4);
16558 SDValue Mask = Op.getOperand(5);
16559 SDValue Sae = Op.getOperand(6);
16561 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
16563 Mask, PassThru, Subtarget, DAG);
16565 case INTR_TYPE_3OP_MASK_RM: {
16566 SDValue Src1 = Op.getOperand(1);
16567 SDValue Src2 = Op.getOperand(2);
16568 SDValue Imm = Op.getOperand(3);
16569 SDValue PassThru = Op.getOperand(4);
16570 SDValue Mask = Op.getOperand(5);
16571 // We specify 2 possible modes for intrinsics, with/without rounding
16573 // First, we check if the intrinsic have rounding mode (7 operands),
16574 // if not, we set rounding mode to "current".
16576 if (Op.getNumOperands() == 7)
16577 Rnd = Op.getOperand(6);
16579 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
16580 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
16581 Src1, Src2, Imm, Rnd),
16582 Mask, PassThru, Subtarget, DAG);
16584 case INTR_TYPE_3OP_IMM8_MASK:
16585 case INTR_TYPE_3OP_MASK:
16586 case INSERT_SUBVEC: {
16587 SDValue Src1 = Op.getOperand(1);
16588 SDValue Src2 = Op.getOperand(2);
16589 SDValue Src3 = Op.getOperand(3);
16590 SDValue PassThru = Op.getOperand(4);
16591 SDValue Mask = Op.getOperand(5);
16593 if (IntrData->Type == INTR_TYPE_3OP_IMM8_MASK)
16594 Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3);
16595 else if (IntrData->Type == INSERT_SUBVEC) {
16596 // imm should be adapted to ISD::INSERT_SUBVECTOR behavior
16597 assert(isa<ConstantSDNode>(Src3) && "Expected a ConstantSDNode here!");
16598 unsigned Imm = cast<ConstantSDNode>(Src3)->getZExtValue();
16599 Imm *= Src2.getSimpleValueType().getVectorNumElements();
16600 Src3 = DAG.getTargetConstant(Imm, dl, MVT::i32);
16603 // We specify 2 possible opcodes for intrinsics with rounding modes.
16604 // First, we check if the intrinsic may have non-default rounding mode,
16605 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
16606 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
16607 if (IntrWithRoundingModeOpcode != 0) {
16608 SDValue Rnd = Op.getOperand(6);
16609 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
16610 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
16611 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
16612 dl, Op.getValueType(),
16613 Src1, Src2, Src3, Rnd),
16614 Mask, PassThru, Subtarget, DAG);
16617 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
16619 Mask, PassThru, Subtarget, DAG);
16621 case VPERM_3OP_MASKZ:
16622 case VPERM_3OP_MASK:{
16623 // Src2 is the PassThru
16624 SDValue Src1 = Op.getOperand(1);
16625 SDValue Src2 = Op.getOperand(2);
16626 SDValue Src3 = Op.getOperand(3);
16627 SDValue Mask = Op.getOperand(4);
16628 MVT VT = Op.getSimpleValueType();
16629 SDValue PassThru = SDValue();
16631 // set PassThru element
16632 if (IntrData->Type == VPERM_3OP_MASKZ)
16633 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
16635 PassThru = DAG.getBitcast(VT, Src2);
16637 // Swap Src1 and Src2 in the node creation
16638 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
16639 dl, Op.getValueType(),
16641 Mask, PassThru, Subtarget, DAG);
16645 case FMA_OP_MASK: {
16646 SDValue Src1 = Op.getOperand(1);
16647 SDValue Src2 = Op.getOperand(2);
16648 SDValue Src3 = Op.getOperand(3);
16649 SDValue Mask = Op.getOperand(4);
16650 MVT VT = Op.getSimpleValueType();
16651 SDValue PassThru = SDValue();
16653 // set PassThru element
16654 if (IntrData->Type == FMA_OP_MASKZ)
16655 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
16656 else if (IntrData->Type == FMA_OP_MASK3)
16661 // We specify 2 possible opcodes for intrinsics with rounding modes.
16662 // First, we check if the intrinsic may have non-default rounding mode,
16663 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
16664 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
16665 if (IntrWithRoundingModeOpcode != 0) {
16666 SDValue Rnd = Op.getOperand(5);
16667 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
16668 X86::STATIC_ROUNDING::CUR_DIRECTION)
16669 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
16670 dl, Op.getValueType(),
16671 Src1, Src2, Src3, Rnd),
16672 Mask, PassThru, Subtarget, DAG);
16674 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
16675 dl, Op.getValueType(),
16677 Mask, PassThru, Subtarget, DAG);
16679 case TERLOG_OP_MASK:
16680 case TERLOG_OP_MASKZ: {
16681 SDValue Src1 = Op.getOperand(1);
16682 SDValue Src2 = Op.getOperand(2);
16683 SDValue Src3 = Op.getOperand(3);
16684 SDValue Src4 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(4));
16685 SDValue Mask = Op.getOperand(5);
16686 MVT VT = Op.getSimpleValueType();
16687 SDValue PassThru = Src1;
16688 // Set PassThru element.
16689 if (IntrData->Type == TERLOG_OP_MASKZ)
16690 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
16692 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
16693 Src1, Src2, Src3, Src4),
16694 Mask, PassThru, Subtarget, DAG);
16697 // FPclass intrinsics with mask
16698 SDValue Src1 = Op.getOperand(1);
16699 MVT VT = Src1.getSimpleValueType();
16700 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
16701 SDValue Imm = Op.getOperand(2);
16702 SDValue Mask = Op.getOperand(3);
16703 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
16704 Mask.getSimpleValueType().getSizeInBits());
16705 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Imm);
16706 SDValue FPclassMask = getVectorMaskingNode(FPclass, Mask,
16707 DAG.getTargetConstant(0, dl, MaskVT),
16709 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
16710 DAG.getUNDEF(BitcastVT), FPclassMask,
16711 DAG.getIntPtrConstant(0, dl));
16712 return DAG.getBitcast(Op.getValueType(), Res);
16715 SDValue Src1 = Op.getOperand(1);
16716 SDValue Imm = Op.getOperand(2);
16717 SDValue Mask = Op.getOperand(3);
16718 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::i1, Src1, Imm);
16719 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask,
16720 DAG.getTargetConstant(0, dl, MVT::i1), Subtarget, DAG);
16721 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i8, FPclassMask);
16724 case CMP_MASK_CC: {
16725 // Comparison intrinsics with masks.
16726 // Example of transformation:
16727 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
16728 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
16730 // (v8i1 (insert_subvector undef,
16731 // (v2i1 (and (PCMPEQM %a, %b),
16732 // (extract_subvector
16733 // (v8i1 (bitcast %mask)), 0))), 0))))
16734 MVT VT = Op.getOperand(1).getSimpleValueType();
16735 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
16736 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
16737 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
16738 Mask.getSimpleValueType().getSizeInBits());
16740 if (IntrData->Type == CMP_MASK_CC) {
16741 SDValue CC = Op.getOperand(3);
16742 CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC);
16743 // We specify 2 possible opcodes for intrinsics with rounding modes.
16744 // First, we check if the intrinsic may have non-default rounding mode,
16745 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
16746 if (IntrData->Opc1 != 0) {
16747 SDValue Rnd = Op.getOperand(5);
16748 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
16749 X86::STATIC_ROUNDING::CUR_DIRECTION)
16750 Cmp = DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
16751 Op.getOperand(2), CC, Rnd);
16753 //default rounding mode
16755 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
16756 Op.getOperand(2), CC);
16759 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
16760 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
16763 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
16764 DAG.getTargetConstant(0, dl,
16767 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
16768 DAG.getUNDEF(BitcastVT), CmpMask,
16769 DAG.getIntPtrConstant(0, dl));
16770 return DAG.getBitcast(Op.getValueType(), Res);
16772 case CMP_MASK_SCALAR_CC: {
16773 SDValue Src1 = Op.getOperand(1);
16774 SDValue Src2 = Op.getOperand(2);
16775 SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3));
16776 SDValue Mask = Op.getOperand(4);
16779 if (IntrData->Opc1 != 0) {
16780 SDValue Rnd = Op.getOperand(5);
16781 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
16782 X86::STATIC_ROUNDING::CUR_DIRECTION)
16783 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::i1, Src1, Src2, CC, Rnd);
16785 //default rounding mode
16787 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::i1, Src1, Src2, CC);
16789 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask,
16790 DAG.getTargetConstant(0, dl,
16794 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i8,
16795 DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, CmpMask),
16796 DAG.getValueType(MVT::i1));
16798 case COMI: { // Comparison intrinsics
16799 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
16800 SDValue LHS = Op.getOperand(1);
16801 SDValue RHS = Op.getOperand(2);
16802 unsigned X86CC = TranslateX86CC(CC, dl, true, LHS, RHS, DAG);
16803 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
16804 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
16805 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16806 DAG.getConstant(X86CC, dl, MVT::i8), Cond);
16807 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
16809 case COMI_RM: { // Comparison intrinsics with Sae
16810 SDValue LHS = Op.getOperand(1);
16811 SDValue RHS = Op.getOperand(2);
16812 SDValue CC = Op.getOperand(3);
16813 SDValue Sae = Op.getOperand(4);
16814 auto ComiType = TranslateX86ConstCondToX86CC(CC);
16815 // choose between ordered and unordered (comi/ucomi)
16816 unsigned comiOp = std::get<0>(ComiType) ? IntrData->Opc0 : IntrData->Opc1;
16818 if (cast<ConstantSDNode>(Sae)->getZExtValue() !=
16819 X86::STATIC_ROUNDING::CUR_DIRECTION)
16820 Cond = DAG.getNode(comiOp, dl, MVT::i32, LHS, RHS, Sae);
16822 Cond = DAG.getNode(comiOp, dl, MVT::i32, LHS, RHS);
16823 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16824 DAG.getConstant(std::get<1>(ComiType), dl, MVT::i8), Cond);
16825 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
16828 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
16829 Op.getOperand(1), Op.getOperand(2), DAG);
16831 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
16832 Op.getSimpleValueType(),
16834 Op.getOperand(2), DAG),
16835 Op.getOperand(4), Op.getOperand(3), Subtarget,
16837 case COMPRESS_EXPAND_IN_REG: {
16838 SDValue Mask = Op.getOperand(3);
16839 SDValue DataToCompress = Op.getOperand(1);
16840 SDValue PassThru = Op.getOperand(2);
16841 if (isAllOnesConstant(Mask)) // return data as is
16842 return Op.getOperand(1);
16844 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
16846 Mask, PassThru, Subtarget, DAG);
16849 SDValue Mask = Op.getOperand(1);
16850 MVT MaskVT = MVT::getVectorVT(MVT::i1,
16851 Mask.getSimpleValueType().getSizeInBits());
16852 Mask = DAG.getBitcast(MaskVT, Mask);
16853 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Mask);
16856 SDValue Mask = Op.getOperand(3);
16857 MVT VT = Op.getSimpleValueType();
16858 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
16859 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
16860 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
16864 MVT VT = Op.getSimpleValueType();
16865 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()/2);
16867 SDValue Src1 = getMaskNode(Op.getOperand(1), MaskVT, Subtarget, DAG, dl);
16868 SDValue Src2 = getMaskNode(Op.getOperand(2), MaskVT, Subtarget, DAG, dl);
16869 // Arguments should be swapped.
16870 SDValue Res = DAG.getNode(IntrData->Opc0, dl,
16871 MVT::getVectorVT(MVT::i1, VT.getSizeInBits()),
16873 return DAG.getBitcast(VT, Res);
16875 case CONVERT_TO_MASK: {
16876 MVT SrcVT = Op.getOperand(1).getSimpleValueType();
16877 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
16878 MVT BitcastVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits());
16880 SDValue CvtMask = DAG.getNode(IntrData->Opc0, dl, MaskVT,
16882 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
16883 DAG.getUNDEF(BitcastVT), CvtMask,
16884 DAG.getIntPtrConstant(0, dl));
16885 return DAG.getBitcast(Op.getValueType(), Res);
16887 case CONVERT_MASK_TO_VEC: {
16888 SDValue Mask = Op.getOperand(1);
16889 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
16890 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
16891 return DAG.getNode(IntrData->Opc0, dl, VT, VMask);
16893 case BRCST_SUBVEC_TO_VEC: {
16894 SDValue Src = Op.getOperand(1);
16895 SDValue Passthru = Op.getOperand(2);
16896 SDValue Mask = Op.getOperand(3);
16897 EVT resVT = Passthru.getValueType();
16898 SDValue subVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, resVT,
16899 DAG.getUNDEF(resVT), Src,
16900 DAG.getIntPtrConstant(0, dl));
16902 if (Src.getSimpleValueType().is256BitVector() && resVT.is512BitVector())
16903 immVal = DAG.getConstant(0x44, dl, MVT::i8);
16905 immVal = DAG.getConstant(0, dl, MVT::i8);
16906 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
16907 subVec, subVec, immVal),
16908 Mask, Passthru, Subtarget, DAG);
16916 default: return SDValue(); // Don't custom lower most intrinsics.
16918 case Intrinsic::x86_avx2_permd:
16919 case Intrinsic::x86_avx2_permps:
16920 // Operands intentionally swapped. Mask is last operand to intrinsic,
16921 // but second operand for node/instruction.
16922 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(),
16923 Op.getOperand(2), Op.getOperand(1));
16925 // ptest and testp intrinsics. The intrinsic these come from are designed to
16926 // return an integer value, not just an instruction so lower it to the ptest
16927 // or testp pattern and a setcc for the result.
16928 case Intrinsic::x86_sse41_ptestz:
16929 case Intrinsic::x86_sse41_ptestc:
16930 case Intrinsic::x86_sse41_ptestnzc:
16931 case Intrinsic::x86_avx_ptestz_256:
16932 case Intrinsic::x86_avx_ptestc_256:
16933 case Intrinsic::x86_avx_ptestnzc_256:
16934 case Intrinsic::x86_avx_vtestz_ps:
16935 case Intrinsic::x86_avx_vtestc_ps:
16936 case Intrinsic::x86_avx_vtestnzc_ps:
16937 case Intrinsic::x86_avx_vtestz_pd:
16938 case Intrinsic::x86_avx_vtestc_pd:
16939 case Intrinsic::x86_avx_vtestnzc_pd:
16940 case Intrinsic::x86_avx_vtestz_ps_256:
16941 case Intrinsic::x86_avx_vtestc_ps_256:
16942 case Intrinsic::x86_avx_vtestnzc_ps_256:
16943 case Intrinsic::x86_avx_vtestz_pd_256:
16944 case Intrinsic::x86_avx_vtestc_pd_256:
16945 case Intrinsic::x86_avx_vtestnzc_pd_256: {
16946 bool IsTestPacked = false;
16949 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
16950 case Intrinsic::x86_avx_vtestz_ps:
16951 case Intrinsic::x86_avx_vtestz_pd:
16952 case Intrinsic::x86_avx_vtestz_ps_256:
16953 case Intrinsic::x86_avx_vtestz_pd_256:
16954 IsTestPacked = true; // Fallthrough
16955 case Intrinsic::x86_sse41_ptestz:
16956 case Intrinsic::x86_avx_ptestz_256:
16958 X86CC = X86::COND_E;
16960 case Intrinsic::x86_avx_vtestc_ps:
16961 case Intrinsic::x86_avx_vtestc_pd:
16962 case Intrinsic::x86_avx_vtestc_ps_256:
16963 case Intrinsic::x86_avx_vtestc_pd_256:
16964 IsTestPacked = true; // Fallthrough
16965 case Intrinsic::x86_sse41_ptestc:
16966 case Intrinsic::x86_avx_ptestc_256:
16968 X86CC = X86::COND_B;
16970 case Intrinsic::x86_avx_vtestnzc_ps:
16971 case Intrinsic::x86_avx_vtestnzc_pd:
16972 case Intrinsic::x86_avx_vtestnzc_ps_256:
16973 case Intrinsic::x86_avx_vtestnzc_pd_256:
16974 IsTestPacked = true; // Fallthrough
16975 case Intrinsic::x86_sse41_ptestnzc:
16976 case Intrinsic::x86_avx_ptestnzc_256:
16978 X86CC = X86::COND_A;
16982 SDValue LHS = Op.getOperand(1);
16983 SDValue RHS = Op.getOperand(2);
16984 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
16985 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
16986 SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
16987 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
16988 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
16990 case Intrinsic::x86_avx512_kortestz_w:
16991 case Intrinsic::x86_avx512_kortestc_w: {
16992 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
16993 SDValue LHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(1));
16994 SDValue RHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(2));
16995 SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
16996 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
16997 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
16998 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17001 case Intrinsic::x86_sse42_pcmpistria128:
17002 case Intrinsic::x86_sse42_pcmpestria128:
17003 case Intrinsic::x86_sse42_pcmpistric128:
17004 case Intrinsic::x86_sse42_pcmpestric128:
17005 case Intrinsic::x86_sse42_pcmpistrio128:
17006 case Intrinsic::x86_sse42_pcmpestrio128:
17007 case Intrinsic::x86_sse42_pcmpistris128:
17008 case Intrinsic::x86_sse42_pcmpestris128:
17009 case Intrinsic::x86_sse42_pcmpistriz128:
17010 case Intrinsic::x86_sse42_pcmpestriz128: {
17014 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17015 case Intrinsic::x86_sse42_pcmpistria128:
17016 Opcode = X86ISD::PCMPISTRI;
17017 X86CC = X86::COND_A;
17019 case Intrinsic::x86_sse42_pcmpestria128:
17020 Opcode = X86ISD::PCMPESTRI;
17021 X86CC = X86::COND_A;
17023 case Intrinsic::x86_sse42_pcmpistric128:
17024 Opcode = X86ISD::PCMPISTRI;
17025 X86CC = X86::COND_B;
17027 case Intrinsic::x86_sse42_pcmpestric128:
17028 Opcode = X86ISD::PCMPESTRI;
17029 X86CC = X86::COND_B;
17031 case Intrinsic::x86_sse42_pcmpistrio128:
17032 Opcode = X86ISD::PCMPISTRI;
17033 X86CC = X86::COND_O;
17035 case Intrinsic::x86_sse42_pcmpestrio128:
17036 Opcode = X86ISD::PCMPESTRI;
17037 X86CC = X86::COND_O;
17039 case Intrinsic::x86_sse42_pcmpistris128:
17040 Opcode = X86ISD::PCMPISTRI;
17041 X86CC = X86::COND_S;
17043 case Intrinsic::x86_sse42_pcmpestris128:
17044 Opcode = X86ISD::PCMPESTRI;
17045 X86CC = X86::COND_S;
17047 case Intrinsic::x86_sse42_pcmpistriz128:
17048 Opcode = X86ISD::PCMPISTRI;
17049 X86CC = X86::COND_E;
17051 case Intrinsic::x86_sse42_pcmpestriz128:
17052 Opcode = X86ISD::PCMPESTRI;
17053 X86CC = X86::COND_E;
17056 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17057 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17058 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17059 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17060 DAG.getConstant(X86CC, dl, MVT::i8),
17061 SDValue(PCMP.getNode(), 1));
17062 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17065 case Intrinsic::x86_sse42_pcmpistri128:
17066 case Intrinsic::x86_sse42_pcmpestri128: {
17068 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17069 Opcode = X86ISD::PCMPISTRI;
17071 Opcode = X86ISD::PCMPESTRI;
17073 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17074 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17075 return DAG.getNode(Opcode, dl, VTs, NewOps);
17078 case Intrinsic::x86_seh_lsda: {
17079 // Compute the symbol for the LSDA. We know it'll get emitted later.
17080 MachineFunction &MF = DAG.getMachineFunction();
17081 SDValue Op1 = Op.getOperand(1);
17082 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
17083 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
17084 GlobalValue::getRealLinkageName(Fn->getName()));
17086 // Generate a simple absolute symbol reference. This intrinsic is only
17087 // supported on 32-bit Windows, which isn't PIC.
17088 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
17089 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
17092 case Intrinsic::x86_seh_recoverfp: {
17093 SDValue FnOp = Op.getOperand(1);
17094 SDValue IncomingFPOp = Op.getOperand(2);
17095 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
17096 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
17098 report_fatal_error(
17099 "llvm.x86.seh.recoverfp must take a function as the first argument");
17100 return recoverFramePointer(DAG, Fn, IncomingFPOp);
17103 case Intrinsic::localaddress: {
17104 // Returns one of the stack, base, or frame pointer registers, depending on
17105 // which is used to reference local variables.
17106 MachineFunction &MF = DAG.getMachineFunction();
17107 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17109 if (RegInfo->hasBasePointer(MF))
17110 Reg = RegInfo->getBaseRegister();
17111 else // This function handles the SP or FP case.
17112 Reg = RegInfo->getPtrSizedFrameRegister(MF);
17113 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
17118 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17119 SDValue Src, SDValue Mask, SDValue Base,
17120 SDValue Index, SDValue ScaleOp, SDValue Chain,
17121 const X86Subtarget * Subtarget) {
17123 auto *C = cast<ConstantSDNode>(ScaleOp);
17124 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
17125 MVT MaskVT = MVT::getVectorVT(MVT::i1,
17126 Index.getSimpleValueType().getVectorNumElements());
17128 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17130 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
17132 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
17133 Mask.getSimpleValueType().getSizeInBits());
17135 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17136 // are extracted by EXTRACT_SUBVECTOR.
17137 MaskInReg = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17138 DAG.getBitcast(BitcastVT, Mask),
17139 DAG.getIntPtrConstant(0, dl));
17141 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17142 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
17143 SDValue Segment = DAG.getRegister(0, MVT::i32);
17144 if (Src.getOpcode() == ISD::UNDEF)
17145 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
17146 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17147 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17148 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17149 return DAG.getMergeValues(RetOps, dl);
17152 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17153 SDValue Src, SDValue Mask, SDValue Base,
17154 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17156 auto *C = cast<ConstantSDNode>(ScaleOp);
17157 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
17158 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
17159 SDValue Segment = DAG.getRegister(0, MVT::i32);
17160 MVT MaskVT = MVT::getVectorVT(MVT::i1,
17161 Index.getSimpleValueType().getVectorNumElements());
17163 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17165 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
17167 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
17168 Mask.getSimpleValueType().getSizeInBits());
17170 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17171 // are extracted by EXTRACT_SUBVECTOR.
17172 MaskInReg = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17173 DAG.getBitcast(BitcastVT, Mask),
17174 DAG.getIntPtrConstant(0, dl));
17176 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17177 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17178 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17179 return SDValue(Res, 1);
17182 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17183 SDValue Mask, SDValue Base, SDValue Index,
17184 SDValue ScaleOp, SDValue Chain) {
17186 auto *C = cast<ConstantSDNode>(ScaleOp);
17187 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
17188 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
17189 SDValue Segment = DAG.getRegister(0, MVT::i32);
17191 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17193 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17195 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
17197 MaskInReg = DAG.getBitcast(MaskVT, Mask);
17198 //SDVTList VTs = DAG.getVTList(MVT::Other);
17199 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17200 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17201 return SDValue(Res, 0);
17204 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17205 // read performance monitor counters (x86_rdpmc).
17206 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17207 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17208 SmallVectorImpl<SDValue> &Results) {
17209 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17210 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17213 // The ECX register is used to select the index of the performance counter
17215 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17217 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17219 // Reads the content of a 64-bit performance counter and returns it in the
17220 // registers EDX:EAX.
17221 if (Subtarget->is64Bit()) {
17222 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17223 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17226 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17227 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17230 Chain = HI.getValue(1);
17232 if (Subtarget->is64Bit()) {
17233 // The EAX register is loaded with the low-order 32 bits. The EDX register
17234 // is loaded with the supported high-order bits of the counter.
17235 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17236 DAG.getConstant(32, DL, MVT::i8));
17237 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17238 Results.push_back(Chain);
17242 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17243 SDValue Ops[] = { LO, HI };
17244 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17245 Results.push_back(Pair);
17246 Results.push_back(Chain);
17249 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17250 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17251 // also used to custom lower READCYCLECOUNTER nodes.
17252 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17253 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17254 SmallVectorImpl<SDValue> &Results) {
17255 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17256 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17259 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17260 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17261 // and the EAX register is loaded with the low-order 32 bits.
17262 if (Subtarget->is64Bit()) {
17263 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17264 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17267 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17268 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17271 SDValue Chain = HI.getValue(1);
17273 if (Opcode == X86ISD::RDTSCP_DAG) {
17274 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17276 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17277 // the ECX register. Add 'ecx' explicitly to the chain.
17278 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17280 // Explicitly store the content of ECX at the location passed in input
17281 // to the 'rdtscp' intrinsic.
17282 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17283 MachinePointerInfo(), false, false, 0);
17286 if (Subtarget->is64Bit()) {
17287 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17288 // the EAX register is loaded with the low-order 32 bits.
17289 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17290 DAG.getConstant(32, DL, MVT::i8));
17291 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17292 Results.push_back(Chain);
17296 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17297 SDValue Ops[] = { LO, HI };
17298 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17299 Results.push_back(Pair);
17300 Results.push_back(Chain);
17303 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17304 SelectionDAG &DAG) {
17305 SmallVector<SDValue, 2> Results;
17307 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17309 return DAG.getMergeValues(Results, DL);
17312 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
17313 MachineFunction &MF = DAG.getMachineFunction();
17314 SDValue Chain = Op.getOperand(0);
17315 SDValue RegNode = Op.getOperand(2);
17316 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
17318 report_fatal_error("EH registrations only live in functions using WinEH");
17320 // Cast the operand to an alloca, and remember the frame index.
17321 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
17323 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
17324 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
17326 // Return the chain operand without making any DAG nodes.
17330 /// \brief Lower intrinsics for TRUNCATE_TO_MEM case
17331 /// return truncate Store/MaskedStore Node
17332 static SDValue LowerINTRINSIC_TRUNCATE_TO_MEM(const SDValue & Op,
17336 SDValue Mask = Op.getOperand(4);
17337 SDValue DataToTruncate = Op.getOperand(3);
17338 SDValue Addr = Op.getOperand(2);
17339 SDValue Chain = Op.getOperand(0);
17341 MVT VT = DataToTruncate.getSimpleValueType();
17342 MVT SVT = MVT::getVectorVT(ElementType, VT.getVectorNumElements());
17344 if (isAllOnesConstant(Mask)) // return just a truncate store
17345 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr,
17346 MachinePointerInfo(), SVT, false, false,
17347 SVT.getScalarSizeInBits()/8);
17349 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
17350 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
17351 Mask.getSimpleValueType().getSizeInBits());
17352 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17353 // are extracted by EXTRACT_SUBVECTOR.
17354 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17355 DAG.getBitcast(BitcastVT, Mask),
17356 DAG.getIntPtrConstant(0, dl));
17358 MachineMemOperand *MMO = DAG.getMachineFunction().
17359 getMachineMemOperand(MachinePointerInfo(),
17360 MachineMemOperand::MOStore, SVT.getStoreSize(),
17361 SVT.getScalarSizeInBits()/8);
17363 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr,
17364 VMask, SVT, MMO, true);
17367 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17368 SelectionDAG &DAG) {
17369 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17371 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17373 if (IntNo == llvm::Intrinsic::x86_seh_ehregnode)
17374 return MarkEHRegistrationNode(Op, DAG);
17379 switch(IntrData->Type) {
17380 default: llvm_unreachable("Unknown Intrinsic Type");
17383 // Emit the node with the right value type.
17384 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17385 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17387 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17388 // Otherwise return the value from Rand, which is always 0, casted to i32.
17389 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17390 DAG.getConstant(1, dl, Op->getValueType(1)),
17391 DAG.getConstant(X86::COND_B, dl, MVT::i32),
17392 SDValue(Result.getNode(), 1) };
17393 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17394 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17397 // Return { result, isValid, chain }.
17398 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17399 SDValue(Result.getNode(), 2));
17402 //gather(v1, mask, index, base, scale);
17403 SDValue Chain = Op.getOperand(0);
17404 SDValue Src = Op.getOperand(2);
17405 SDValue Base = Op.getOperand(3);
17406 SDValue Index = Op.getOperand(4);
17407 SDValue Mask = Op.getOperand(5);
17408 SDValue Scale = Op.getOperand(6);
17409 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale,
17413 //scatter(base, mask, index, v1, scale);
17414 SDValue Chain = Op.getOperand(0);
17415 SDValue Base = Op.getOperand(2);
17416 SDValue Mask = Op.getOperand(3);
17417 SDValue Index = Op.getOperand(4);
17418 SDValue Src = Op.getOperand(5);
17419 SDValue Scale = Op.getOperand(6);
17420 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
17424 SDValue Hint = Op.getOperand(6);
17425 unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
17426 assert(HintVal < 2 && "Wrong prefetch hint in intrinsic: should be 0 or 1");
17427 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17428 SDValue Chain = Op.getOperand(0);
17429 SDValue Mask = Op.getOperand(2);
17430 SDValue Index = Op.getOperand(3);
17431 SDValue Base = Op.getOperand(4);
17432 SDValue Scale = Op.getOperand(5);
17433 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17435 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17437 SmallVector<SDValue, 2> Results;
17438 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
17440 return DAG.getMergeValues(Results, dl);
17442 // Read Performance Monitoring Counters.
17444 SmallVector<SDValue, 2> Results;
17445 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17446 return DAG.getMergeValues(Results, dl);
17448 // XTEST intrinsics.
17450 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17451 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17452 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17453 DAG.getConstant(X86::COND_NE, dl, MVT::i8),
17455 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17456 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17457 Ret, SDValue(InTrans.getNode(), 1));
17461 SmallVector<SDValue, 2> Results;
17462 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17463 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17464 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17465 DAG.getConstant(-1, dl, MVT::i8));
17466 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17467 Op.getOperand(4), GenCF.getValue(1));
17468 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17469 Op.getOperand(5), MachinePointerInfo(),
17471 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17472 DAG.getConstant(X86::COND_B, dl, MVT::i8),
17474 Results.push_back(SetCC);
17475 Results.push_back(Store);
17476 return DAG.getMergeValues(Results, dl);
17478 case COMPRESS_TO_MEM: {
17480 SDValue Mask = Op.getOperand(4);
17481 SDValue DataToCompress = Op.getOperand(3);
17482 SDValue Addr = Op.getOperand(2);
17483 SDValue Chain = Op.getOperand(0);
17485 MVT VT = DataToCompress.getSimpleValueType();
17486 if (isAllOnesConstant(Mask)) // return just a store
17487 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17488 MachinePointerInfo(), false, false,
17489 VT.getScalarSizeInBits()/8);
17491 SDValue Compressed =
17492 getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress),
17493 Mask, DAG.getUNDEF(VT), Subtarget, DAG);
17494 return DAG.getStore(Chain, dl, Compressed, Addr,
17495 MachinePointerInfo(), false, false,
17496 VT.getScalarSizeInBits()/8);
17498 case TRUNCATE_TO_MEM_VI8:
17499 return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i8);
17500 case TRUNCATE_TO_MEM_VI16:
17501 return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i16);
17502 case TRUNCATE_TO_MEM_VI32:
17503 return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i32);
17504 case EXPAND_FROM_MEM: {
17506 SDValue Mask = Op.getOperand(4);
17507 SDValue PassThru = Op.getOperand(3);
17508 SDValue Addr = Op.getOperand(2);
17509 SDValue Chain = Op.getOperand(0);
17510 MVT VT = Op.getSimpleValueType();
17512 if (isAllOnesConstant(Mask)) // return just a load
17513 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
17514 false, VT.getScalarSizeInBits()/8);
17516 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
17517 false, false, false,
17518 VT.getScalarSizeInBits()/8);
17520 SDValue Results[] = {
17521 getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToExpand),
17522 Mask, PassThru, Subtarget, DAG), Chain};
17523 return DAG.getMergeValues(Results, dl);
17528 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
17529 SelectionDAG &DAG) const {
17530 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17531 MFI->setReturnAddressIsTaken(true);
17533 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
17536 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17538 EVT PtrVT = getPointerTy(DAG.getDataLayout());
17541 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
17542 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17543 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
17544 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17545 DAG.getNode(ISD::ADD, dl, PtrVT,
17546 FrameAddr, Offset),
17547 MachinePointerInfo(), false, false, false, 0);
17550 // Just load the return address.
17551 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
17552 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17553 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
17556 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
17557 MachineFunction &MF = DAG.getMachineFunction();
17558 MachineFrameInfo *MFI = MF.getFrameInfo();
17559 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17560 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17561 EVT VT = Op.getValueType();
17563 MFI->setFrameAddressIsTaken(true);
17565 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
17566 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
17567 // is not possible to crawl up the stack without looking at the unwind codes
17569 int FrameAddrIndex = FuncInfo->getFAIndex();
17570 if (!FrameAddrIndex) {
17571 // Set up a frame object for the return address.
17572 unsigned SlotSize = RegInfo->getSlotSize();
17573 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
17574 SlotSize, /*Offset=*/0, /*IsImmutable=*/false);
17575 FuncInfo->setFAIndex(FrameAddrIndex);
17577 return DAG.getFrameIndex(FrameAddrIndex, VT);
17580 unsigned FrameReg =
17581 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
17582 SDLoc dl(Op); // FIXME probably not meaningful
17583 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17584 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
17585 (FrameReg == X86::EBP && VT == MVT::i32)) &&
17586 "Invalid Frame Register!");
17587 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
17589 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
17590 MachinePointerInfo(),
17591 false, false, false, 0);
17595 // FIXME? Maybe this could be a TableGen attribute on some registers and
17596 // this table could be generated automatically from RegInfo.
17597 unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
17598 SelectionDAG &DAG) const {
17599 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17600 const MachineFunction &MF = DAG.getMachineFunction();
17602 unsigned Reg = StringSwitch<unsigned>(RegName)
17603 .Case("esp", X86::ESP)
17604 .Case("rsp", X86::RSP)
17605 .Case("ebp", X86::EBP)
17606 .Case("rbp", X86::RBP)
17609 if (Reg == X86::EBP || Reg == X86::RBP) {
17610 if (!TFI.hasFP(MF))
17611 report_fatal_error("register " + StringRef(RegName) +
17612 " is allocatable: function has no frame pointer");
17615 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17616 unsigned FrameReg =
17617 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
17618 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
17619 "Invalid Frame Register!");
17627 report_fatal_error("Invalid register name global variable");
17630 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
17631 SelectionDAG &DAG) const {
17632 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17633 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
17636 unsigned X86TargetLowering::getExceptionPointerRegister(
17637 const Constant *PersonalityFn) const {
17638 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
17639 return Subtarget->isTarget64BitLP64() ? X86::RDX : X86::EDX;
17641 return Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
17644 unsigned X86TargetLowering::getExceptionSelectorRegister(
17645 const Constant *PersonalityFn) const {
17646 // Funclet personalities don't use selectors (the runtime does the selection).
17647 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
17648 return Subtarget->isTarget64BitLP64() ? X86::RDX : X86::EDX;
17651 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
17652 SDValue Chain = Op.getOperand(0);
17653 SDValue Offset = Op.getOperand(1);
17654 SDValue Handler = Op.getOperand(2);
17657 EVT PtrVT = getPointerTy(DAG.getDataLayout());
17658 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17659 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
17660 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
17661 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
17662 "Invalid Frame Register!");
17663 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
17664 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
17666 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
17667 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
17669 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
17670 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
17672 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
17674 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
17675 DAG.getRegister(StoreAddrReg, PtrVT));
17678 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
17679 SelectionDAG &DAG) const {
17681 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
17682 DAG.getVTList(MVT::i32, MVT::Other),
17683 Op.getOperand(0), Op.getOperand(1));
17686 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
17687 SelectionDAG &DAG) const {
17689 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
17690 Op.getOperand(0), Op.getOperand(1));
17693 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
17694 return Op.getOperand(0);
17697 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
17698 SelectionDAG &DAG) const {
17699 SDValue Root = Op.getOperand(0);
17700 SDValue Trmp = Op.getOperand(1); // trampoline
17701 SDValue FPtr = Op.getOperand(2); // nested function
17702 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
17705 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17706 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
17708 if (Subtarget->is64Bit()) {
17709 SDValue OutChains[6];
17711 // Large code-model.
17712 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
17713 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
17715 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
17716 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
17718 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
17720 // Load the pointer to the nested function into R11.
17721 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
17722 SDValue Addr = Trmp;
17723 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
17724 Addr, MachinePointerInfo(TrmpAddr),
17727 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17728 DAG.getConstant(2, dl, MVT::i64));
17729 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
17730 MachinePointerInfo(TrmpAddr, 2),
17733 // Load the 'nest' parameter value into R10.
17734 // R10 is specified in X86CallingConv.td
17735 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
17736 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17737 DAG.getConstant(10, dl, MVT::i64));
17738 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
17739 Addr, MachinePointerInfo(TrmpAddr, 10),
17742 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17743 DAG.getConstant(12, dl, MVT::i64));
17744 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
17745 MachinePointerInfo(TrmpAddr, 12),
17748 // Jump to the nested function.
17749 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
17750 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17751 DAG.getConstant(20, dl, MVT::i64));
17752 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
17753 Addr, MachinePointerInfo(TrmpAddr, 20),
17756 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
17757 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17758 DAG.getConstant(22, dl, MVT::i64));
17759 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
17760 Addr, MachinePointerInfo(TrmpAddr, 22),
17763 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17765 const Function *Func =
17766 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
17767 CallingConv::ID CC = Func->getCallingConv();
17772 llvm_unreachable("Unsupported calling convention");
17773 case CallingConv::C:
17774 case CallingConv::X86_StdCall: {
17775 // Pass 'nest' parameter in ECX.
17776 // Must be kept in sync with X86CallingConv.td
17777 NestReg = X86::ECX;
17779 // Check that ECX wasn't needed by an 'inreg' parameter.
17780 FunctionType *FTy = Func->getFunctionType();
17781 const AttributeSet &Attrs = Func->getAttributes();
17783 if (!Attrs.isEmpty() && !Func->isVarArg()) {
17784 unsigned InRegCount = 0;
17787 for (FunctionType::param_iterator I = FTy->param_begin(),
17788 E = FTy->param_end(); I != E; ++I, ++Idx)
17789 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
17790 auto &DL = DAG.getDataLayout();
17791 // FIXME: should only count parameters that are lowered to integers.
17792 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
17795 if (InRegCount > 2) {
17796 report_fatal_error("Nest register in use - reduce number of inreg"
17802 case CallingConv::X86_FastCall:
17803 case CallingConv::X86_ThisCall:
17804 case CallingConv::Fast:
17805 // Pass 'nest' parameter in EAX.
17806 // Must be kept in sync with X86CallingConv.td
17807 NestReg = X86::EAX;
17811 SDValue OutChains[4];
17812 SDValue Addr, Disp;
17814 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17815 DAG.getConstant(10, dl, MVT::i32));
17816 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
17818 // This is storing the opcode for MOV32ri.
17819 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
17820 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
17821 OutChains[0] = DAG.getStore(Root, dl,
17822 DAG.getConstant(MOV32ri|N86Reg, dl, MVT::i8),
17823 Trmp, MachinePointerInfo(TrmpAddr),
17826 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17827 DAG.getConstant(1, dl, MVT::i32));
17828 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
17829 MachinePointerInfo(TrmpAddr, 1),
17832 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
17833 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17834 DAG.getConstant(5, dl, MVT::i32));
17835 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
17836 Addr, MachinePointerInfo(TrmpAddr, 5),
17839 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17840 DAG.getConstant(6, dl, MVT::i32));
17841 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
17842 MachinePointerInfo(TrmpAddr, 6),
17845 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17849 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
17850 SelectionDAG &DAG) const {
17852 The rounding mode is in bits 11:10 of FPSR, and has the following
17854 00 Round to nearest
17859 FLT_ROUNDS, on the other hand, expects the following:
17866 To perform the conversion, we do:
17867 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
17870 MachineFunction &MF = DAG.getMachineFunction();
17871 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17872 unsigned StackAlignment = TFI.getStackAlignment();
17873 MVT VT = Op.getSimpleValueType();
17876 // Save FP Control Word to stack slot
17877 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
17878 SDValue StackSlot =
17879 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
17881 MachineMemOperand *MMO =
17882 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
17883 MachineMemOperand::MOStore, 2, 2);
17885 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
17886 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
17887 DAG.getVTList(MVT::Other),
17888 Ops, MVT::i16, MMO);
17890 // Load FP Control Word from stack slot
17891 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
17892 MachinePointerInfo(), false, false, false, 0);
17894 // Transform as necessary
17896 DAG.getNode(ISD::SRL, DL, MVT::i16,
17897 DAG.getNode(ISD::AND, DL, MVT::i16,
17898 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
17899 DAG.getConstant(11, DL, MVT::i8));
17901 DAG.getNode(ISD::SRL, DL, MVT::i16,
17902 DAG.getNode(ISD::AND, DL, MVT::i16,
17903 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
17904 DAG.getConstant(9, DL, MVT::i8));
17907 DAG.getNode(ISD::AND, DL, MVT::i16,
17908 DAG.getNode(ISD::ADD, DL, MVT::i16,
17909 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
17910 DAG.getConstant(1, DL, MVT::i16)),
17911 DAG.getConstant(3, DL, MVT::i16));
17913 return DAG.getNode((VT.getSizeInBits() < 16 ?
17914 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
17917 /// \brief Lower a vector CTLZ using native supported vector CTLZ instruction.
17919 // 1. i32/i64 128/256-bit vector (native support require VLX) are expended
17920 // to 512-bit vector.
17921 // 2. i8/i16 vector implemented using dword LZCNT vector instruction
17922 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
17923 // split the vector, perform operation on it's Lo a Hi part and
17924 // concatenate the results.
17925 static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
17927 MVT VT = Op.getSimpleValueType();
17928 MVT EltVT = VT.getVectorElementType();
17929 unsigned NumElems = VT.getVectorNumElements();
17931 if (EltVT == MVT::i64 || EltVT == MVT::i32) {
17932 // Extend to 512 bit vector.
17933 assert((VT.is256BitVector() || VT.is128BitVector()) &&
17934 "Unsupported value type for operation");
17936 MVT NewVT = MVT::getVectorVT(EltVT, 512 / VT.getScalarSizeInBits());
17937 SDValue Vec512 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
17938 DAG.getUNDEF(NewVT),
17940 DAG.getIntPtrConstant(0, dl));
17941 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Vec512);
17943 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CtlzNode,
17944 DAG.getIntPtrConstant(0, dl));
17947 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
17948 "Unsupported element type");
17950 if (16 < NumElems) {
17951 // Split vector, it's Lo and Hi parts will be handled in next iteration.
17953 std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
17954 MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
17956 Lo = DAG.getNode(Op.getOpcode(), dl, OutVT, Lo);
17957 Hi = DAG.getNode(Op.getOpcode(), dl, OutVT, Hi);
17959 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
17962 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
17964 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
17965 "Unsupported value type for operation");
17967 // Use native supported vector instruction vplzcntd.
17968 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
17969 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
17970 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
17971 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
17973 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
17976 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget *Subtarget,
17977 SelectionDAG &DAG) {
17978 MVT VT = Op.getSimpleValueType();
17980 unsigned NumBits = VT.getSizeInBits();
17983 if (VT.isVector() && Subtarget->hasAVX512())
17984 return LowerVectorCTLZ_AVX512(Op, DAG);
17986 Op = Op.getOperand(0);
17987 if (VT == MVT::i8) {
17988 // Zero extend to i32 since there is not an i8 bsr.
17990 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
17993 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
17994 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
17995 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
17997 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18000 DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
18001 DAG.getConstant(X86::COND_E, dl, MVT::i8),
18004 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18006 // Finally xor with NumBits-1.
18007 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
18008 DAG.getConstant(NumBits - 1, dl, OpVT));
18011 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18015 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, const X86Subtarget *Subtarget,
18016 SelectionDAG &DAG) {
18017 MVT VT = Op.getSimpleValueType();
18019 unsigned NumBits = VT.getSizeInBits();
18022 Op = Op.getOperand(0);
18023 if (VT == MVT::i8) {
18024 // Zero extend to i32 since there is not an i8 bsr.
18026 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18029 // Issue a bsr (scan bits in reverse).
18030 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18031 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18033 // And xor with NumBits-1.
18034 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
18035 DAG.getConstant(NumBits - 1, dl, OpVT));
18038 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18042 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18043 MVT VT = Op.getSimpleValueType();
18044 unsigned NumBits = VT.getScalarSizeInBits();
18047 if (VT.isVector()) {
18048 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
18050 SDValue N0 = Op.getOperand(0);
18051 SDValue Zero = DAG.getConstant(0, dl, VT);
18053 // lsb(x) = (x & -x)
18054 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, N0,
18055 DAG.getNode(ISD::SUB, dl, VT, Zero, N0));
18057 // cttz_undef(x) = (width - 1) - ctlz(lsb)
18058 if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF &&
18059 TLI.isOperationLegal(ISD::CTLZ, VT)) {
18060 SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT);
18061 return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne,
18062 DAG.getNode(ISD::CTLZ, dl, VT, LSB));
18065 // cttz(x) = ctpop(lsb - 1)
18066 SDValue One = DAG.getConstant(1, dl, VT);
18067 return DAG.getNode(ISD::CTPOP, dl, VT,
18068 DAG.getNode(ISD::SUB, dl, VT, LSB, One));
18071 assert(Op.getOpcode() == ISD::CTTZ &&
18072 "Only scalar CTTZ requires custom lowering");
18074 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18075 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18076 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op.getOperand(0));
18078 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18081 DAG.getConstant(NumBits, dl, VT),
18082 DAG.getConstant(X86::COND_E, dl, MVT::i8),
18085 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18088 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18089 // ones, and then concatenate the result back.
18090 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18091 MVT VT = Op.getSimpleValueType();
18093 assert(VT.is256BitVector() && VT.isInteger() &&
18094 "Unsupported value type for operation");
18096 unsigned NumElems = VT.getVectorNumElements();
18099 // Extract the LHS vectors
18100 SDValue LHS = Op.getOperand(0);
18101 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18102 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18104 // Extract the RHS vectors
18105 SDValue RHS = Op.getOperand(1);
18106 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18107 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18109 MVT EltVT = VT.getVectorElementType();
18110 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18112 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18113 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18114 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18117 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18118 if (Op.getValueType() == MVT::i1)
18119 return DAG.getNode(ISD::XOR, SDLoc(Op), Op.getValueType(),
18120 Op.getOperand(0), Op.getOperand(1));
18121 assert(Op.getSimpleValueType().is256BitVector() &&
18122 Op.getSimpleValueType().isInteger() &&
18123 "Only handle AVX 256-bit vector integer operation");
18124 return Lower256IntArith(Op, DAG);
18127 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18128 if (Op.getValueType() == MVT::i1)
18129 return DAG.getNode(ISD::XOR, SDLoc(Op), Op.getValueType(),
18130 Op.getOperand(0), Op.getOperand(1));
18131 assert(Op.getSimpleValueType().is256BitVector() &&
18132 Op.getSimpleValueType().isInteger() &&
18133 "Only handle AVX 256-bit vector integer operation");
18134 return Lower256IntArith(Op, DAG);
18137 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
18138 assert(Op.getSimpleValueType().is256BitVector() &&
18139 Op.getSimpleValueType().isInteger() &&
18140 "Only handle AVX 256-bit vector integer operation");
18141 return Lower256IntArith(Op, DAG);
18144 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18145 SelectionDAG &DAG) {
18147 MVT VT = Op.getSimpleValueType();
18150 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
18152 // Decompose 256-bit ops into smaller 128-bit ops.
18153 if (VT.is256BitVector() && !Subtarget->hasInt256())
18154 return Lower256IntArith(Op, DAG);
18156 SDValue A = Op.getOperand(0);
18157 SDValue B = Op.getOperand(1);
18159 // Lower v16i8/v32i8 mul as promotion to v8i16/v16i16 vector
18160 // pairs, multiply and truncate.
18161 if (VT == MVT::v16i8 || VT == MVT::v32i8) {
18162 if (Subtarget->hasInt256()) {
18163 if (VT == MVT::v32i8) {
18164 MVT SubVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() / 2);
18165 SDValue Lo = DAG.getIntPtrConstant(0, dl);
18166 SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
18167 SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Lo);
18168 SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Lo);
18169 SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Hi);
18170 SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Hi);
18171 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18172 DAG.getNode(ISD::MUL, dl, SubVT, ALo, BLo),
18173 DAG.getNode(ISD::MUL, dl, SubVT, AHi, BHi));
18176 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
18177 return DAG.getNode(
18178 ISD::TRUNCATE, dl, VT,
18179 DAG.getNode(ISD::MUL, dl, ExVT,
18180 DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, A),
18181 DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, B)));
18184 assert(VT == MVT::v16i8 &&
18185 "Pre-AVX2 support only supports v16i8 multiplication");
18186 MVT ExVT = MVT::v8i16;
18188 // Extract the lo parts and sign extend to i16
18190 if (Subtarget->hasSSE41()) {
18191 ALo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, A);
18192 BLo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, B);
18194 const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
18195 -1, 4, -1, 5, -1, 6, -1, 7};
18196 ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
18197 BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
18198 ALo = DAG.getBitcast(ExVT, ALo);
18199 BLo = DAG.getBitcast(ExVT, BLo);
18200 ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
18201 BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
18204 // Extract the hi parts and sign extend to i16
18206 if (Subtarget->hasSSE41()) {
18207 const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
18208 -1, -1, -1, -1, -1, -1, -1, -1};
18209 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
18210 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
18211 AHi = DAG.getNode(X86ISD::VSEXT, dl, ExVT, AHi);
18212 BHi = DAG.getNode(X86ISD::VSEXT, dl, ExVT, BHi);
18214 const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
18215 -1, 12, -1, 13, -1, 14, -1, 15};
18216 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
18217 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
18218 AHi = DAG.getBitcast(ExVT, AHi);
18219 BHi = DAG.getBitcast(ExVT, BHi);
18220 AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
18221 BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
18224 // Multiply, mask the lower 8bits of the lo/hi results and pack
18225 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
18226 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
18227 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
18228 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
18229 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
18232 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18233 if (VT == MVT::v4i32) {
18234 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18235 "Should not custom lower when pmuldq is available!");
18237 // Extract the odd parts.
18238 static const int UnpackMask[] = { 1, -1, 3, -1 };
18239 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18240 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18242 // Multiply the even parts.
18243 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18244 // Now multiply odd parts.
18245 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18247 Evens = DAG.getBitcast(VT, Evens);
18248 Odds = DAG.getBitcast(VT, Odds);
18250 // Merge the two vectors back together with a shuffle. This expands into 2
18252 static const int ShufMask[] = { 0, 4, 2, 6 };
18253 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18256 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18257 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18259 // Ahi = psrlqi(a, 32);
18260 // Bhi = psrlqi(b, 32);
18262 // AloBlo = pmuludq(a, b);
18263 // AloBhi = pmuludq(a, Bhi);
18264 // AhiBlo = pmuludq(Ahi, b);
18266 // AloBhi = psllqi(AloBhi, 32);
18267 // AhiBlo = psllqi(AhiBlo, 32);
18268 // return AloBlo + AloBhi + AhiBlo;
18270 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18271 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18273 SDValue AhiBlo = Ahi;
18274 SDValue AloBhi = Bhi;
18275 // Bit cast to 32-bit vectors for MULUDQ
18276 MVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18277 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18278 A = DAG.getBitcast(MulVT, A);
18279 B = DAG.getBitcast(MulVT, B);
18280 Ahi = DAG.getBitcast(MulVT, Ahi);
18281 Bhi = DAG.getBitcast(MulVT, Bhi);
18283 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18284 // After shifting right const values the result may be all-zero.
18285 if (!ISD::isBuildVectorAllZeros(Ahi.getNode())) {
18286 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18287 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18289 if (!ISD::isBuildVectorAllZeros(Bhi.getNode())) {
18290 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18291 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18294 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18295 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18298 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18299 assert(Subtarget->isTargetWin64() && "Unexpected target");
18300 EVT VT = Op.getValueType();
18301 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18302 "Unexpected return type for lowering");
18306 switch (Op->getOpcode()) {
18307 default: llvm_unreachable("Unexpected request for libcall!");
18308 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18309 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18310 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18311 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18312 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18313 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18317 SDValue InChain = DAG.getEntryNode();
18319 TargetLowering::ArgListTy Args;
18320 TargetLowering::ArgListEntry Entry;
18321 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18322 EVT ArgVT = Op->getOperand(i).getValueType();
18323 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18324 "Unexpected argument type for lowering");
18325 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18326 Entry.Node = StackPtr;
18327 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18329 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18330 Entry.Ty = PointerType::get(ArgTy,0);
18331 Entry.isSExt = false;
18332 Entry.isZExt = false;
18333 Args.push_back(Entry);
18336 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18337 getPointerTy(DAG.getDataLayout()));
18339 TargetLowering::CallLoweringInfo CLI(DAG);
18340 CLI.setDebugLoc(dl).setChain(InChain)
18341 .setCallee(getLibcallCallingConv(LC),
18342 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18343 Callee, std::move(Args), 0)
18344 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18346 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18347 return DAG.getBitcast(VT, CallInfo.first);
18350 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18351 SelectionDAG &DAG) {
18352 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18353 MVT VT = Op0.getSimpleValueType();
18356 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18357 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18359 // PMULxD operations multiply each even value (starting at 0) of LHS with
18360 // the related value of RHS and produce a widen result.
18361 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18362 // => <2 x i64> <ae|cg>
18364 // In other word, to have all the results, we need to perform two PMULxD:
18365 // 1. one with the even values.
18366 // 2. one with the odd values.
18367 // To achieve #2, with need to place the odd values at an even position.
18369 // Place the odd value at an even position (basically, shift all values 1
18370 // step to the left):
18371 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18372 // <a|b|c|d> => <b|undef|d|undef>
18373 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18374 // <e|f|g|h> => <f|undef|h|undef>
18375 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18377 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18379 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18380 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18382 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18383 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18384 // => <2 x i64> <ae|cg>
18385 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18386 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18387 // => <2 x i64> <bf|dh>
18388 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18390 // Shuffle it back into the right order.
18391 SDValue Highs, Lows;
18392 if (VT == MVT::v8i32) {
18393 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18394 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18395 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18396 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18398 const int HighMask[] = {1, 5, 3, 7};
18399 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18400 const int LowMask[] = {0, 4, 2, 6};
18401 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18404 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18405 // unsigned multiply.
18406 if (IsSigned && !Subtarget->hasSSE41()) {
18407 SDValue ShAmt = DAG.getConstant(
18409 DAG.getTargetLoweringInfo().getShiftAmountTy(VT, DAG.getDataLayout()));
18410 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18411 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18412 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18413 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18415 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18416 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18419 // The first result of MUL_LOHI is actually the low value, followed by the
18421 SDValue Ops[] = {Lows, Highs};
18422 return DAG.getMergeValues(Ops, dl);
18425 // Return true if the required (according to Opcode) shift-imm form is natively
18426 // supported by the Subtarget
18427 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
18429 if (VT.getScalarSizeInBits() < 16)
18432 if (VT.is512BitVector() &&
18433 (VT.getScalarSizeInBits() > 16 || Subtarget->hasBWI()))
18436 bool LShift = VT.is128BitVector() ||
18437 (VT.is256BitVector() && Subtarget->hasInt256());
18439 bool AShift = LShift && (Subtarget->hasVLX() ||
18440 (VT != MVT::v2i64 && VT != MVT::v4i64));
18441 return (Opcode == ISD::SRA) ? AShift : LShift;
18444 // The shift amount is a variable, but it is the same for all vector lanes.
18445 // These instructions are defined together with shift-immediate.
18447 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget *Subtarget,
18449 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
18452 // Return true if the required (according to Opcode) variable-shift form is
18453 // natively supported by the Subtarget
18454 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget *Subtarget,
18457 if (!Subtarget->hasInt256() || VT.getScalarSizeInBits() < 16)
18460 // vXi16 supported only on AVX-512, BWI
18461 if (VT.getScalarSizeInBits() == 16 && !Subtarget->hasBWI())
18464 if (VT.is512BitVector() || Subtarget->hasVLX())
18467 bool LShift = VT.is128BitVector() || VT.is256BitVector();
18468 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
18469 return (Opcode == ISD::SRA) ? AShift : LShift;
18472 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18473 const X86Subtarget *Subtarget) {
18474 MVT VT = Op.getSimpleValueType();
18476 SDValue R = Op.getOperand(0);
18477 SDValue Amt = Op.getOperand(1);
18479 unsigned X86Opc = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI :
18480 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
18482 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
18483 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
18484 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
18485 SDValue Ex = DAG.getBitcast(ExVT, R);
18487 if (ShiftAmt >= 32) {
18488 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
18490 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
18491 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
18492 ShiftAmt - 32, DAG);
18493 if (VT == MVT::v2i64)
18494 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
18495 if (VT == MVT::v4i64)
18496 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
18497 {9, 1, 11, 3, 13, 5, 15, 7});
18499 // SRA upper i32, SHL whole i64 and select lower i32.
18500 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
18503 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
18504 Lower = DAG.getBitcast(ExVT, Lower);
18505 if (VT == MVT::v2i64)
18506 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
18507 if (VT == MVT::v4i64)
18508 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
18509 {8, 1, 10, 3, 12, 5, 14, 7});
18511 return DAG.getBitcast(VT, Ex);
18514 // Optimize shl/srl/sra with constant shift amount.
18515 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18516 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18517 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18519 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
18520 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
18522 // i64 SRA needs to be performed as partial shifts.
18523 if ((VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18524 Op.getOpcode() == ISD::SRA && !Subtarget->hasXOP())
18525 return ArithmeticShiftRight64(ShiftAmt);
18527 if (VT == MVT::v16i8 ||
18528 (Subtarget->hasInt256() && VT == MVT::v32i8) ||
18529 VT == MVT::v64i8) {
18530 unsigned NumElts = VT.getVectorNumElements();
18531 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
18533 // Simple i8 add case
18534 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
18535 return DAG.getNode(ISD::ADD, dl, VT, R, R);
18537 // ashr(R, 7) === cmp_slt(R, 0)
18538 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
18539 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18540 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18543 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
18544 if (VT == MVT::v16i8 && Subtarget->hasXOP())
18547 if (Op.getOpcode() == ISD::SHL) {
18548 // Make a large shift.
18549 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT,
18551 SHL = DAG.getBitcast(VT, SHL);
18552 // Zero out the rightmost bits.
18553 return DAG.getNode(ISD::AND, dl, VT, SHL,
18554 DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
18556 if (Op.getOpcode() == ISD::SRL) {
18557 // Make a large shift.
18558 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT,
18560 SRL = DAG.getBitcast(VT, SRL);
18561 // Zero out the leftmost bits.
18562 return DAG.getNode(ISD::AND, dl, VT, SRL,
18563 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
18565 if (Op.getOpcode() == ISD::SRA) {
18566 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
18567 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18569 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
18570 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18571 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18574 llvm_unreachable("Unknown shift opcode.");
18579 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18580 if (!Subtarget->is64Bit() && !Subtarget->hasXOP() &&
18581 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64))) {
18583 // Peek through any splat that was introduced for i64 shift vectorization.
18584 int SplatIndex = -1;
18585 if (ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt.getNode()))
18586 if (SVN->isSplat()) {
18587 SplatIndex = SVN->getSplatIndex();
18588 Amt = Amt.getOperand(0);
18589 assert(SplatIndex < (int)VT.getVectorNumElements() &&
18590 "Splat shuffle referencing second operand");
18593 if (Amt.getOpcode() != ISD::BITCAST ||
18594 Amt.getOperand(0).getOpcode() != ISD::BUILD_VECTOR)
18597 Amt = Amt.getOperand(0);
18598 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18599 VT.getVectorNumElements();
18600 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18601 uint64_t ShiftAmt = 0;
18602 unsigned BaseOp = (SplatIndex < 0 ? 0 : SplatIndex * Ratio);
18603 for (unsigned i = 0; i != Ratio; ++i) {
18604 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + BaseOp));
18608 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18611 // Check remaining shift amounts (if not a splat).
18612 if (SplatIndex < 0) {
18613 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18614 uint64_t ShAmt = 0;
18615 for (unsigned j = 0; j != Ratio; ++j) {
18616 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18620 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18622 if (ShAmt != ShiftAmt)
18627 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
18628 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
18630 if (Op.getOpcode() == ISD::SRA)
18631 return ArithmeticShiftRight64(ShiftAmt);
18637 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18638 const X86Subtarget* Subtarget) {
18639 MVT VT = Op.getSimpleValueType();
18641 SDValue R = Op.getOperand(0);
18642 SDValue Amt = Op.getOperand(1);
18644 unsigned X86OpcI = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI :
18645 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
18647 unsigned X86OpcV = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHL :
18648 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRL : X86ISD::VSRA;
18650 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode())) {
18652 MVT EltVT = VT.getVectorElementType();
18654 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18655 // Check if this build_vector node is doing a splat.
18656 // If so, then set BaseShAmt equal to the splat value.
18657 BaseShAmt = BV->getSplatValue();
18658 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18659 BaseShAmt = SDValue();
18661 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18662 Amt = Amt.getOperand(0);
18664 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18665 if (SVN && SVN->isSplat()) {
18666 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18667 SDValue InVec = Amt.getOperand(0);
18668 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18669 assert((SplatIdx < InVec.getSimpleValueType().getVectorNumElements()) &&
18670 "Unexpected shuffle index found!");
18671 BaseShAmt = InVec.getOperand(SplatIdx);
18672 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18673 if (ConstantSDNode *C =
18674 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18675 if (C->getZExtValue() == SplatIdx)
18676 BaseShAmt = InVec.getOperand(1);
18681 // Avoid introducing an extract element from a shuffle.
18682 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18683 DAG.getIntPtrConstant(SplatIdx, dl));
18687 if (BaseShAmt.getNode()) {
18688 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18689 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18690 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18691 else if (EltVT.bitsLT(MVT::i32))
18692 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18694 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, DAG);
18698 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18699 if (!Subtarget->is64Bit() && VT == MVT::v2i64 &&
18700 Amt.getOpcode() == ISD::BITCAST &&
18701 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18702 Amt = Amt.getOperand(0);
18703 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18704 VT.getVectorNumElements();
18705 std::vector<SDValue> Vals(Ratio);
18706 for (unsigned i = 0; i != Ratio; ++i)
18707 Vals[i] = Amt.getOperand(i);
18708 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18709 for (unsigned j = 0; j != Ratio; ++j)
18710 if (Vals[j] != Amt.getOperand(i + j))
18714 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
18715 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
18720 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18721 SelectionDAG &DAG) {
18722 MVT VT = Op.getSimpleValueType();
18724 SDValue R = Op.getOperand(0);
18725 SDValue Amt = Op.getOperand(1);
18727 assert(VT.isVector() && "Custom lowering only for vector shifts!");
18728 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
18730 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
18733 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
18736 if (SupportedVectorVarShift(VT, Subtarget, Op.getOpcode()))
18739 // XOP has 128-bit variable logical/arithmetic shifts.
18740 // +ve/-ve Amt = shift left/right.
18741 if (Subtarget->hasXOP() &&
18742 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18743 VT == MVT::v8i16 || VT == MVT::v16i8)) {
18744 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) {
18745 SDValue Zero = getZeroVector(VT, Subtarget, DAG, dl);
18746 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
18748 if (Op.getOpcode() == ISD::SHL || Op.getOpcode() == ISD::SRL)
18749 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
18750 if (Op.getOpcode() == ISD::SRA)
18751 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
18754 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
18755 // shifts per-lane and then shuffle the partial results back together.
18756 if (VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) {
18757 // Splat the shift amounts so the scalar shifts above will catch it.
18758 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
18759 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
18760 SDValue R0 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt0);
18761 SDValue R1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt1);
18762 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
18765 // i64 vector arithmetic shift can be emulated with the transform:
18766 // M = lshr(SIGN_BIT, Amt)
18767 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
18768 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget->hasInt256())) &&
18769 Op.getOpcode() == ISD::SRA) {
18770 SDValue S = DAG.getConstant(APInt::getSignBit(64), dl, VT);
18771 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
18772 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18773 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
18774 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
18778 // If possible, lower this packed shift into a vector multiply instead of
18779 // expanding it into a sequence of scalar shifts.
18780 // Do this only if the vector shift count is a constant build_vector.
18781 if (Op.getOpcode() == ISD::SHL &&
18782 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
18783 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
18784 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18785 SmallVector<SDValue, 8> Elts;
18786 MVT SVT = VT.getVectorElementType();
18787 unsigned SVTBits = SVT.getSizeInBits();
18788 APInt One(SVTBits, 1);
18789 unsigned NumElems = VT.getVectorNumElements();
18791 for (unsigned i=0; i !=NumElems; ++i) {
18792 SDValue Op = Amt->getOperand(i);
18793 if (Op->getOpcode() == ISD::UNDEF) {
18794 Elts.push_back(Op);
18798 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
18799 APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
18800 uint64_t ShAmt = C.getZExtValue();
18801 if (ShAmt >= SVTBits) {
18802 Elts.push_back(DAG.getUNDEF(SVT));
18805 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
18807 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
18808 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
18811 // Lower SHL with variable shift amount.
18812 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
18813 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
18815 Op = DAG.getNode(ISD::ADD, dl, VT, Op,
18816 DAG.getConstant(0x3f800000U, dl, VT));
18817 Op = DAG.getBitcast(MVT::v4f32, Op);
18818 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
18819 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
18822 // If possible, lower this shift as a sequence of two shifts by
18823 // constant plus a MOVSS/MOVSD instead of scalarizing it.
18825 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
18827 // Could be rewritten as:
18828 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
18830 // The advantage is that the two shifts from the example would be
18831 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
18832 // the vector shift into four scalar shifts plus four pairs of vector
18834 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
18835 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18836 unsigned TargetOpcode = X86ISD::MOVSS;
18837 bool CanBeSimplified;
18838 // The splat value for the first packed shift (the 'X' from the example).
18839 SDValue Amt1 = Amt->getOperand(0);
18840 // The splat value for the second packed shift (the 'Y' from the example).
18841 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
18842 Amt->getOperand(2);
18844 // See if it is possible to replace this node with a sequence of
18845 // two shifts followed by a MOVSS/MOVSD
18846 if (VT == MVT::v4i32) {
18847 // Check if it is legal to use a MOVSS.
18848 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
18849 Amt2 == Amt->getOperand(3);
18850 if (!CanBeSimplified) {
18851 // Otherwise, check if we can still simplify this node using a MOVSD.
18852 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
18853 Amt->getOperand(2) == Amt->getOperand(3);
18854 TargetOpcode = X86ISD::MOVSD;
18855 Amt2 = Amt->getOperand(2);
18858 // Do similar checks for the case where the machine value type
18860 CanBeSimplified = Amt1 == Amt->getOperand(1);
18861 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
18862 CanBeSimplified = Amt2 == Amt->getOperand(i);
18864 if (!CanBeSimplified) {
18865 TargetOpcode = X86ISD::MOVSD;
18866 CanBeSimplified = true;
18867 Amt2 = Amt->getOperand(4);
18868 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
18869 CanBeSimplified = Amt1 == Amt->getOperand(i);
18870 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
18871 CanBeSimplified = Amt2 == Amt->getOperand(j);
18875 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
18876 isa<ConstantSDNode>(Amt2)) {
18877 // Replace this node with two shifts followed by a MOVSS/MOVSD.
18878 MVT CastVT = MVT::v4i32;
18880 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), dl, VT);
18881 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
18883 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), dl, VT);
18884 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
18885 if (TargetOpcode == X86ISD::MOVSD)
18886 CastVT = MVT::v2i64;
18887 SDValue BitCast1 = DAG.getBitcast(CastVT, Shift1);
18888 SDValue BitCast2 = DAG.getBitcast(CastVT, Shift2);
18889 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
18891 return DAG.getBitcast(VT, Result);
18895 // v4i32 Non Uniform Shifts.
18896 // If the shift amount is constant we can shift each lane using the SSE2
18897 // immediate shifts, else we need to zero-extend each lane to the lower i64
18898 // and shift using the SSE2 variable shifts.
18899 // The separate results can then be blended together.
18900 if (VT == MVT::v4i32) {
18901 unsigned Opc = Op.getOpcode();
18902 SDValue Amt0, Amt1, Amt2, Amt3;
18903 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18904 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
18905 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
18906 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
18907 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
18909 // ISD::SHL is handled above but we include it here for completeness.
18912 llvm_unreachable("Unknown target vector shift node");
18914 Opc = X86ISD::VSHL;
18917 Opc = X86ISD::VSRL;
18920 Opc = X86ISD::VSRA;
18923 // The SSE2 shifts use the lower i64 as the same shift amount for
18924 // all lanes and the upper i64 is ignored. These shuffle masks
18925 // optimally zero-extend each lanes on SSE2/SSE41/AVX targets.
18926 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
18927 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
18928 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
18929 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
18930 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
18933 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
18934 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
18935 SDValue R2 = DAG.getNode(Opc, dl, VT, R, Amt2);
18936 SDValue R3 = DAG.getNode(Opc, dl, VT, R, Amt3);
18937 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
18938 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
18939 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
18942 if (VT == MVT::v16i8 ||
18943 (VT == MVT::v32i8 && Subtarget->hasInt256() && !Subtarget->hasXOP())) {
18944 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
18945 unsigned ShiftOpcode = Op->getOpcode();
18947 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
18948 // On SSE41 targets we make use of the fact that VSELECT lowers
18949 // to PBLENDVB which selects bytes based just on the sign bit.
18950 if (Subtarget->hasSSE41()) {
18951 V0 = DAG.getBitcast(VT, V0);
18952 V1 = DAG.getBitcast(VT, V1);
18953 Sel = DAG.getBitcast(VT, Sel);
18954 return DAG.getBitcast(SelVT,
18955 DAG.getNode(ISD::VSELECT, dl, VT, Sel, V0, V1));
18957 // On pre-SSE41 targets we test for the sign bit by comparing to
18958 // zero - a negative value will set all bits of the lanes to true
18959 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
18960 SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl);
18961 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
18962 return DAG.getNode(ISD::VSELECT, dl, SelVT, C, V0, V1);
18965 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
18966 // We can safely do this using i16 shifts as we're only interested in
18967 // the 3 lower bits of each byte.
18968 Amt = DAG.getBitcast(ExtVT, Amt);
18969 Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT));
18970 Amt = DAG.getBitcast(VT, Amt);
18972 if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) {
18973 // r = VSELECT(r, shift(r, 4), a);
18975 DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
18976 R = SignBitSelect(VT, Amt, M, R);
18979 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
18981 // r = VSELECT(r, shift(r, 2), a);
18982 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
18983 R = SignBitSelect(VT, Amt, M, R);
18986 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
18988 // return VSELECT(r, shift(r, 1), a);
18989 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
18990 R = SignBitSelect(VT, Amt, M, R);
18994 if (Op->getOpcode() == ISD::SRA) {
18995 // For SRA we need to unpack each byte to the higher byte of a i16 vector
18996 // so we can correctly sign extend. We don't care what happens to the
18998 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt);
18999 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt);
19000 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R);
19001 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R);
19002 ALo = DAG.getBitcast(ExtVT, ALo);
19003 AHi = DAG.getBitcast(ExtVT, AHi);
19004 RLo = DAG.getBitcast(ExtVT, RLo);
19005 RHi = DAG.getBitcast(ExtVT, RHi);
19007 // r = VSELECT(r, shift(r, 4), a);
19008 SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
19009 DAG.getConstant(4, dl, ExtVT));
19010 SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
19011 DAG.getConstant(4, dl, ExtVT));
19012 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
19013 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
19016 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
19017 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
19019 // r = VSELECT(r, shift(r, 2), a);
19020 MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
19021 DAG.getConstant(2, dl, ExtVT));
19022 MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
19023 DAG.getConstant(2, dl, ExtVT));
19024 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
19025 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
19028 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
19029 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
19031 // r = VSELECT(r, shift(r, 1), a);
19032 MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
19033 DAG.getConstant(1, dl, ExtVT));
19034 MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
19035 DAG.getConstant(1, dl, ExtVT));
19036 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
19037 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
19039 // Logical shift the result back to the lower byte, leaving a zero upper
19041 // meaning that we can safely pack with PACKUSWB.
19043 DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT));
19045 DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT));
19046 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
19050 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19051 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19052 // solution better.
19053 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19054 MVT ExtVT = MVT::v8i32;
19056 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19057 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
19058 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, ExtVT, Amt);
19059 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19060 DAG.getNode(Op.getOpcode(), dl, ExtVT, R, Amt));
19063 if (Subtarget->hasInt256() && !Subtarget->hasXOP() && VT == MVT::v16i16) {
19064 MVT ExtVT = MVT::v8i32;
19065 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
19066 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
19067 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z);
19068 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, R, R);
19069 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, R, R);
19070 ALo = DAG.getBitcast(ExtVT, ALo);
19071 AHi = DAG.getBitcast(ExtVT, AHi);
19072 RLo = DAG.getBitcast(ExtVT, RLo);
19073 RHi = DAG.getBitcast(ExtVT, RHi);
19074 SDValue Lo = DAG.getNode(Op.getOpcode(), dl, ExtVT, RLo, ALo);
19075 SDValue Hi = DAG.getNode(Op.getOpcode(), dl, ExtVT, RHi, AHi);
19076 Lo = DAG.getNode(ISD::SRL, dl, ExtVT, Lo, DAG.getConstant(16, dl, ExtVT));
19077 Hi = DAG.getNode(ISD::SRL, dl, ExtVT, Hi, DAG.getConstant(16, dl, ExtVT));
19078 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
19081 if (VT == MVT::v8i16) {
19082 unsigned ShiftOpcode = Op->getOpcode();
19084 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
19085 // On SSE41 targets we make use of the fact that VSELECT lowers
19086 // to PBLENDVB which selects bytes based just on the sign bit.
19087 if (Subtarget->hasSSE41()) {
19088 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
19089 V0 = DAG.getBitcast(ExtVT, V0);
19090 V1 = DAG.getBitcast(ExtVT, V1);
19091 Sel = DAG.getBitcast(ExtVT, Sel);
19092 return DAG.getBitcast(
19093 VT, DAG.getNode(ISD::VSELECT, dl, ExtVT, Sel, V0, V1));
19095 // On pre-SSE41 targets we splat the sign bit - a negative value will
19096 // set all bits of the lanes to true and VSELECT uses that in
19097 // its OR(AND(V0,C),AND(V1,~C)) lowering.
19099 DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT));
19100 return DAG.getNode(ISD::VSELECT, dl, VT, C, V0, V1);
19103 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
19104 if (Subtarget->hasSSE41()) {
19105 // On SSE41 targets we need to replicate the shift mask in both
19106 // bytes for PBLENDVB.
19109 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)),
19110 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)));
19112 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT));
19115 // r = VSELECT(r, shift(r, 8), a);
19116 SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT));
19117 R = SignBitSelect(Amt, M, R);
19120 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
19122 // r = VSELECT(r, shift(r, 4), a);
19123 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
19124 R = SignBitSelect(Amt, M, R);
19127 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
19129 // r = VSELECT(r, shift(r, 2), a);
19130 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
19131 R = SignBitSelect(Amt, M, R);
19134 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
19136 // return VSELECT(r, shift(r, 1), a);
19137 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
19138 R = SignBitSelect(Amt, M, R);
19142 // Decompose 256-bit shifts into smaller 128-bit shifts.
19143 if (VT.is256BitVector()) {
19144 unsigned NumElems = VT.getVectorNumElements();
19145 MVT EltVT = VT.getVectorElementType();
19146 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19148 // Extract the two vectors
19149 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19150 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19152 // Recreate the shift amount vectors
19153 SDValue Amt1, Amt2;
19154 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19155 // Constant shift amount
19156 SmallVector<SDValue, 8> Ops(Amt->op_begin(), Amt->op_begin() + NumElems);
19157 ArrayRef<SDValue> Amt1Csts = makeArrayRef(Ops).slice(0, NumElems / 2);
19158 ArrayRef<SDValue> Amt2Csts = makeArrayRef(Ops).slice(NumElems / 2);
19160 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19161 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19163 // Variable shift amount
19164 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19165 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19168 // Issue new vector shifts for the smaller types
19169 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19170 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19172 // Concatenate the result back
19173 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19179 static SDValue LowerRotate(SDValue Op, const X86Subtarget *Subtarget,
19180 SelectionDAG &DAG) {
19181 MVT VT = Op.getSimpleValueType();
19183 SDValue R = Op.getOperand(0);
19184 SDValue Amt = Op.getOperand(1);
19186 assert(VT.isVector() && "Custom lowering only for vector rotates!");
19187 assert(Subtarget->hasXOP() && "XOP support required for vector rotates!");
19188 assert((Op.getOpcode() == ISD::ROTL) && "Only ROTL supported");
19190 // XOP has 128-bit vector variable + immediate rotates.
19191 // +ve/-ve Amt = rotate left/right.
19193 // Split 256-bit integers.
19194 if (VT.is256BitVector())
19195 return Lower256IntArith(Op, DAG);
19197 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
19199 // Attempt to rotate by immediate.
19200 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
19201 if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
19202 uint64_t RotateAmt = RotateConst->getAPIntValue().getZExtValue();
19203 assert(RotateAmt < VT.getScalarSizeInBits() && "Rotation out of range");
19204 return DAG.getNode(X86ISD::VPROTI, DL, VT, R,
19205 DAG.getConstant(RotateAmt, DL, MVT::i8));
19209 // Use general rotate by variable (per-element).
19210 return DAG.getNode(X86ISD::VPROT, DL, VT, R, Amt);
19213 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19214 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19215 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19216 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19217 // has only one use.
19218 SDNode *N = Op.getNode();
19219 SDValue LHS = N->getOperand(0);
19220 SDValue RHS = N->getOperand(1);
19221 unsigned BaseOp = 0;
19224 switch (Op.getOpcode()) {
19225 default: llvm_unreachable("Unknown ovf instruction!");
19227 // A subtract of one will be selected as a INC. Note that INC doesn't
19228 // set CF, so we can't do this for UADDO.
19229 if (isOneConstant(RHS)) {
19230 BaseOp = X86ISD::INC;
19231 Cond = X86::COND_O;
19234 BaseOp = X86ISD::ADD;
19235 Cond = X86::COND_O;
19238 BaseOp = X86ISD::ADD;
19239 Cond = X86::COND_B;
19242 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19243 // set CF, so we can't do this for USUBO.
19244 if (isOneConstant(RHS)) {
19245 BaseOp = X86ISD::DEC;
19246 Cond = X86::COND_O;
19249 BaseOp = X86ISD::SUB;
19250 Cond = X86::COND_O;
19253 BaseOp = X86ISD::SUB;
19254 Cond = X86::COND_B;
19257 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19258 Cond = X86::COND_O;
19260 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19261 if (N->getValueType(0) == MVT::i8) {
19262 BaseOp = X86ISD::UMUL8;
19263 Cond = X86::COND_O;
19266 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19268 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19271 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19272 DAG.getConstant(X86::COND_O, DL, MVT::i32),
19273 SDValue(Sum.getNode(), 2));
19275 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19279 // Also sets EFLAGS.
19280 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19281 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19284 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19285 DAG.getConstant(Cond, DL, MVT::i32),
19286 SDValue(Sum.getNode(), 1));
19288 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19291 /// Returns true if the operand type is exactly twice the native width, and
19292 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19293 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19294 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19295 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
19296 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19299 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19300 else if (OpWidth == 128)
19301 return Subtarget->hasCmpxchg16b();
19306 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19307 return needsCmpXchgNb(SI->getValueOperand()->getType());
19310 // Note: this turns large loads into lock cmpxchg8b/16b.
19311 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19312 TargetLowering::AtomicExpansionKind
19313 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19314 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19315 return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg
19316 : AtomicExpansionKind::None;
19319 TargetLowering::AtomicExpansionKind
19320 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19321 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19322 Type *MemType = AI->getType();
19324 // If the operand is too big, we must see if cmpxchg8/16b is available
19325 // and default to library calls otherwise.
19326 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
19327 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
19328 : AtomicExpansionKind::None;
19331 AtomicRMWInst::BinOp Op = AI->getOperation();
19334 llvm_unreachable("Unknown atomic operation");
19335 case AtomicRMWInst::Xchg:
19336 case AtomicRMWInst::Add:
19337 case AtomicRMWInst::Sub:
19338 // It's better to use xadd, xsub or xchg for these in all cases.
19339 return AtomicExpansionKind::None;
19340 case AtomicRMWInst::Or:
19341 case AtomicRMWInst::And:
19342 case AtomicRMWInst::Xor:
19343 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19344 // prefix to a normal instruction for these operations.
19345 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
19346 : AtomicExpansionKind::None;
19347 case AtomicRMWInst::Nand:
19348 case AtomicRMWInst::Max:
19349 case AtomicRMWInst::Min:
19350 case AtomicRMWInst::UMax:
19351 case AtomicRMWInst::UMin:
19352 // These always require a non-trivial set of data operations on x86. We must
19353 // use a cmpxchg loop.
19354 return AtomicExpansionKind::CmpXChg;
19358 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19359 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19360 // no-sse2). There isn't any reason to disable it if the target processor
19362 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19366 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19367 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19368 Type *MemType = AI->getType();
19369 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19370 // there is no benefit in turning such RMWs into loads, and it is actually
19371 // harmful as it introduces a mfence.
19372 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19375 auto Builder = IRBuilder<>(AI);
19376 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19377 auto SynchScope = AI->getSynchScope();
19378 // We must restrict the ordering to avoid generating loads with Release or
19379 // ReleaseAcquire orderings.
19380 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19381 auto Ptr = AI->getPointerOperand();
19383 // Before the load we need a fence. Here is an example lifted from
19384 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19387 // x.store(1, relaxed);
19388 // r1 = y.fetch_add(0, release);
19390 // y.fetch_add(42, acquire);
19391 // r2 = x.load(relaxed);
19392 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19393 // lowered to just a load without a fence. A mfence flushes the store buffer,
19394 // making the optimization clearly correct.
19395 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19396 // otherwise, we might be able to be more aggressive on relaxed idempotent
19397 // rmw. In practice, they do not look useful, so we don't try to be
19398 // especially clever.
19399 if (SynchScope == SingleThread)
19400 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19401 // the IR level, so we must wrap it in an intrinsic.
19404 if (!hasMFENCE(*Subtarget))
19405 // FIXME: it might make sense to use a locked operation here but on a
19406 // different cache-line to prevent cache-line bouncing. In practice it
19407 // is probably a small win, and x86 processors without mfence are rare
19408 // enough that we do not bother.
19412 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
19413 Builder.CreateCall(MFence, {});
19415 // Finally we can emit the atomic load.
19416 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19417 AI->getType()->getPrimitiveSizeInBits());
19418 Loaded->setAtomic(Order, SynchScope);
19419 AI->replaceAllUsesWith(Loaded);
19420 AI->eraseFromParent();
19424 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19425 SelectionDAG &DAG) {
19427 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19428 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19429 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19430 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19432 // The only fence that needs an instruction is a sequentially-consistent
19433 // cross-thread fence.
19434 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19435 if (hasMFENCE(*Subtarget))
19436 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19438 SDValue Chain = Op.getOperand(0);
19439 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
19441 DAG.getRegister(X86::ESP, MVT::i32), // Base
19442 DAG.getTargetConstant(1, dl, MVT::i8), // Scale
19443 DAG.getRegister(0, MVT::i32), // Index
19444 DAG.getTargetConstant(0, dl, MVT::i32), // Disp
19445 DAG.getRegister(0, MVT::i32), // Segment.
19449 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19450 return SDValue(Res, 0);
19453 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19454 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19457 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19458 SelectionDAG &DAG) {
19459 MVT T = Op.getSimpleValueType();
19463 switch(T.SimpleTy) {
19464 default: llvm_unreachable("Invalid value type!");
19465 case MVT::i8: Reg = X86::AL; size = 1; break;
19466 case MVT::i16: Reg = X86::AX; size = 2; break;
19467 case MVT::i32: Reg = X86::EAX; size = 4; break;
19469 assert(Subtarget->is64Bit() && "Node not type legal!");
19470 Reg = X86::RAX; size = 8;
19473 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19474 Op.getOperand(2), SDValue());
19475 SDValue Ops[] = { cpIn.getValue(0),
19478 DAG.getTargetConstant(size, DL, MVT::i8),
19479 cpIn.getValue(1) };
19480 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19481 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19482 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19486 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19487 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19488 MVT::i32, cpOut.getValue(2));
19489 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19490 DAG.getConstant(X86::COND_E, DL, MVT::i8),
19493 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19494 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19495 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19499 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19500 SelectionDAG &DAG) {
19501 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19502 MVT DstVT = Op.getSimpleValueType();
19504 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19505 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19506 if (DstVT != MVT::f64)
19507 // This conversion needs to be expanded.
19510 SDValue InVec = Op->getOperand(0);
19512 unsigned NumElts = SrcVT.getVectorNumElements();
19513 MVT SVT = SrcVT.getVectorElementType();
19515 // Widen the vector in input in the case of MVT::v2i32.
19516 // Example: from MVT::v2i32 to MVT::v4i32.
19517 SmallVector<SDValue, 16> Elts;
19518 for (unsigned i = 0, e = NumElts; i != e; ++i)
19519 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19520 DAG.getIntPtrConstant(i, dl)));
19522 // Explicitly mark the extra elements as Undef.
19523 Elts.append(NumElts, DAG.getUNDEF(SVT));
19525 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19526 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19527 SDValue ToV2F64 = DAG.getBitcast(MVT::v2f64, BV);
19528 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19529 DAG.getIntPtrConstant(0, dl));
19532 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19533 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19534 assert((DstVT == MVT::i64 ||
19535 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19536 "Unexpected custom BITCAST");
19537 // i64 <=> MMX conversions are Legal.
19538 if (SrcVT==MVT::i64 && DstVT.isVector())
19540 if (DstVT==MVT::i64 && SrcVT.isVector())
19542 // MMX <=> MMX conversions are Legal.
19543 if (SrcVT.isVector() && DstVT.isVector())
19545 // All other conversions need to be expanded.
19549 /// Compute the horizontal sum of bytes in V for the elements of VT.
19551 /// Requires V to be a byte vector and VT to be an integer vector type with
19552 /// wider elements than V's type. The width of the elements of VT determines
19553 /// how many bytes of V are summed horizontally to produce each element of the
19555 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
19556 const X86Subtarget *Subtarget,
19557 SelectionDAG &DAG) {
19559 MVT ByteVecVT = V.getSimpleValueType();
19560 MVT EltVT = VT.getVectorElementType();
19561 int NumElts = VT.getVectorNumElements();
19562 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
19563 "Expected value to have byte element type.");
19564 assert(EltVT != MVT::i8 &&
19565 "Horizontal byte sum only makes sense for wider elements!");
19566 unsigned VecSize = VT.getSizeInBits();
19567 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
19569 // PSADBW instruction horizontally add all bytes and leave the result in i64
19570 // chunks, thus directly computes the pop count for v2i64 and v4i64.
19571 if (EltVT == MVT::i64) {
19572 SDValue Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
19573 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
19574 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
19575 return DAG.getBitcast(VT, V);
19578 if (EltVT == MVT::i32) {
19579 // We unpack the low half and high half into i32s interleaved with zeros so
19580 // that we can use PSADBW to horizontally sum them. The most useful part of
19581 // this is that it lines up the results of two PSADBW instructions to be
19582 // two v2i64 vectors which concatenated are the 4 population counts. We can
19583 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
19584 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, DL);
19585 SDValue Low = DAG.getNode(X86ISD::UNPCKL, DL, VT, V, Zeros);
19586 SDValue High = DAG.getNode(X86ISD::UNPCKH, DL, VT, V, Zeros);
19588 // Do the horizontal sums into two v2i64s.
19589 Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
19590 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
19591 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
19592 DAG.getBitcast(ByteVecVT, Low), Zeros);
19593 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
19594 DAG.getBitcast(ByteVecVT, High), Zeros);
19596 // Merge them together.
19597 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
19598 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
19599 DAG.getBitcast(ShortVecVT, Low),
19600 DAG.getBitcast(ShortVecVT, High));
19602 return DAG.getBitcast(VT, V);
19605 // The only element type left is i16.
19606 assert(EltVT == MVT::i16 && "Unknown how to handle type");
19608 // To obtain pop count for each i16 element starting from the pop count for
19609 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
19610 // right by 8. It is important to shift as i16s as i8 vector shift isn't
19611 // directly supported.
19612 SmallVector<SDValue, 16> Shifters(NumElts, DAG.getConstant(8, DL, EltVT));
19613 SDValue Shifter = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Shifters);
19614 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), Shifter);
19615 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
19616 DAG.getBitcast(ByteVecVT, V));
19617 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), Shifter);
19620 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
19621 const X86Subtarget *Subtarget,
19622 SelectionDAG &DAG) {
19623 MVT VT = Op.getSimpleValueType();
19624 MVT EltVT = VT.getVectorElementType();
19625 unsigned VecSize = VT.getSizeInBits();
19627 // Implement a lookup table in register by using an algorithm based on:
19628 // http://wm.ite.pl/articles/sse-popcount.html
19630 // The general idea is that every lower byte nibble in the input vector is an
19631 // index into a in-register pre-computed pop count table. We then split up the
19632 // input vector in two new ones: (1) a vector with only the shifted-right
19633 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
19634 // masked out higher ones) for each byte. PSHUB is used separately with both
19635 // to index the in-register table. Next, both are added and the result is a
19636 // i8 vector where each element contains the pop count for input byte.
19638 // To obtain the pop count for elements != i8, we follow up with the same
19639 // approach and use additional tricks as described below.
19641 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
19642 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
19643 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
19644 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
19646 int NumByteElts = VecSize / 8;
19647 MVT ByteVecVT = MVT::getVectorVT(MVT::i8, NumByteElts);
19648 SDValue In = DAG.getBitcast(ByteVecVT, Op);
19649 SmallVector<SDValue, 16> LUTVec;
19650 for (int i = 0; i < NumByteElts; ++i)
19651 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
19652 SDValue InRegLUT = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, LUTVec);
19653 SmallVector<SDValue, 16> Mask0F(NumByteElts,
19654 DAG.getConstant(0x0F, DL, MVT::i8));
19655 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, Mask0F);
19658 SmallVector<SDValue, 16> Four(NumByteElts, DAG.getConstant(4, DL, MVT::i8));
19659 SDValue FourV = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, Four);
19660 SDValue HighNibbles = DAG.getNode(ISD::SRL, DL, ByteVecVT, In, FourV);
19663 SDValue LowNibbles = DAG.getNode(ISD::AND, DL, ByteVecVT, In, M0F);
19665 // The input vector is used as the shuffle mask that index elements into the
19666 // LUT. After counting low and high nibbles, add the vector to obtain the
19667 // final pop count per i8 element.
19668 SDValue HighPopCnt =
19669 DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, HighNibbles);
19670 SDValue LowPopCnt =
19671 DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, LowNibbles);
19672 SDValue PopCnt = DAG.getNode(ISD::ADD, DL, ByteVecVT, HighPopCnt, LowPopCnt);
19674 if (EltVT == MVT::i8)
19677 return LowerHorizontalByteSum(PopCnt, VT, Subtarget, DAG);
19680 static SDValue LowerVectorCTPOPBitmath(SDValue Op, SDLoc DL,
19681 const X86Subtarget *Subtarget,
19682 SelectionDAG &DAG) {
19683 MVT VT = Op.getSimpleValueType();
19684 assert(VT.is128BitVector() &&
19685 "Only 128-bit vector bitmath lowering supported.");
19687 int VecSize = VT.getSizeInBits();
19688 MVT EltVT = VT.getVectorElementType();
19689 int Len = EltVT.getSizeInBits();
19691 // This is the vectorized version of the "best" algorithm from
19692 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19693 // with a minor tweak to use a series of adds + shifts instead of vector
19694 // multiplications. Implemented for all integer vector types. We only use
19695 // this when we don't have SSSE3 which allows a LUT-based lowering that is
19696 // much faster, even faster than using native popcnt instructions.
19698 auto GetShift = [&](unsigned OpCode, SDValue V, int Shifter) {
19699 MVT VT = V.getSimpleValueType();
19700 SmallVector<SDValue, 32> Shifters(
19701 VT.getVectorNumElements(),
19702 DAG.getConstant(Shifter, DL, VT.getVectorElementType()));
19703 return DAG.getNode(OpCode, DL, VT, V,
19704 DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Shifters));
19706 auto GetMask = [&](SDValue V, APInt Mask) {
19707 MVT VT = V.getSimpleValueType();
19708 SmallVector<SDValue, 32> Masks(
19709 VT.getVectorNumElements(),
19710 DAG.getConstant(Mask, DL, VT.getVectorElementType()));
19711 return DAG.getNode(ISD::AND, DL, VT, V,
19712 DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Masks));
19715 // We don't want to incur the implicit masks required to SRL vNi8 vectors on
19716 // x86, so set the SRL type to have elements at least i16 wide. This is
19717 // correct because all of our SRLs are followed immediately by a mask anyways
19718 // that handles any bits that sneak into the high bits of the byte elements.
19719 MVT SrlVT = Len > 8 ? VT : MVT::getVectorVT(MVT::i16, VecSize / 16);
19723 // v = v - ((v >> 1) & 0x55555555...)
19725 DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 1));
19726 SDValue And = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x55)));
19727 V = DAG.getNode(ISD::SUB, DL, VT, V, And);
19729 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19730 SDValue AndLHS = GetMask(V, APInt::getSplat(Len, APInt(8, 0x33)));
19731 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 2));
19732 SDValue AndRHS = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x33)));
19733 V = DAG.getNode(ISD::ADD, DL, VT, AndLHS, AndRHS);
19735 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19736 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 4));
19737 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, V, Srl);
19738 V = GetMask(Add, APInt::getSplat(Len, APInt(8, 0x0F)));
19740 // At this point, V contains the byte-wise population count, and we are
19741 // merely doing a horizontal sum if necessary to get the wider element
19743 if (EltVT == MVT::i8)
19746 return LowerHorizontalByteSum(
19747 DAG.getBitcast(MVT::getVectorVT(MVT::i8, VecSize / 8), V), VT, Subtarget,
19751 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19752 SelectionDAG &DAG) {
19753 MVT VT = Op.getSimpleValueType();
19754 // FIXME: Need to add AVX-512 support here!
19755 assert((VT.is256BitVector() || VT.is128BitVector()) &&
19756 "Unknown CTPOP type to handle");
19757 SDLoc DL(Op.getNode());
19758 SDValue Op0 = Op.getOperand(0);
19760 if (!Subtarget->hasSSSE3()) {
19761 // We can't use the fast LUT approach, so fall back on vectorized bitmath.
19762 assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!");
19763 return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
19766 if (VT.is256BitVector() && !Subtarget->hasInt256()) {
19767 unsigned NumElems = VT.getVectorNumElements();
19769 // Extract each 128-bit vector, compute pop count and concat the result.
19770 SDValue LHS = Extract128BitVector(Op0, 0, DAG, DL);
19771 SDValue RHS = Extract128BitVector(Op0, NumElems/2, DAG, DL);
19773 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
19774 LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
19775 LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
19778 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
19781 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19782 SelectionDAG &DAG) {
19783 assert(Op.getSimpleValueType().isVector() &&
19784 "We only do custom lowering for vector population count.");
19785 return LowerVectorCTPOP(Op, Subtarget, DAG);
19788 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19789 SDNode *Node = Op.getNode();
19791 EVT T = Node->getValueType(0);
19792 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19793 DAG.getConstant(0, dl, T), Node->getOperand(2));
19794 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19795 cast<AtomicSDNode>(Node)->getMemoryVT(),
19796 Node->getOperand(0),
19797 Node->getOperand(1), negOp,
19798 cast<AtomicSDNode>(Node)->getMemOperand(),
19799 cast<AtomicSDNode>(Node)->getOrdering(),
19800 cast<AtomicSDNode>(Node)->getSynchScope());
19803 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19804 SDNode *Node = Op.getNode();
19806 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19808 // Convert seq_cst store -> xchg
19809 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19810 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19811 // (The only way to get a 16-byte store is cmpxchg16b)
19812 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19813 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19814 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19815 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19816 cast<AtomicSDNode>(Node)->getMemoryVT(),
19817 Node->getOperand(0),
19818 Node->getOperand(1), Node->getOperand(2),
19819 cast<AtomicSDNode>(Node)->getMemOperand(),
19820 cast<AtomicSDNode>(Node)->getOrdering(),
19821 cast<AtomicSDNode>(Node)->getSynchScope());
19822 return Swap.getValue(1);
19824 // Other atomic stores have a simple pattern.
19828 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19829 MVT VT = Op.getNode()->getSimpleValueType(0);
19831 // Let legalize expand this if it isn't a legal type yet.
19832 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19835 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19838 bool ExtraOp = false;
19839 switch (Op.getOpcode()) {
19840 default: llvm_unreachable("Invalid code");
19841 case ISD::ADDC: Opc = X86ISD::ADD; break;
19842 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19843 case ISD::SUBC: Opc = X86ISD::SUB; break;
19844 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19848 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19850 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19851 Op.getOperand(1), Op.getOperand(2));
19854 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19855 SelectionDAG &DAG) {
19856 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19858 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19859 // which returns the values as { float, float } (in XMM0) or
19860 // { double, double } (which is returned in XMM0, XMM1).
19862 SDValue Arg = Op.getOperand(0);
19863 EVT ArgVT = Arg.getValueType();
19864 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19866 TargetLowering::ArgListTy Args;
19867 TargetLowering::ArgListEntry Entry;
19871 Entry.isSExt = false;
19872 Entry.isZExt = false;
19873 Args.push_back(Entry);
19875 bool isF64 = ArgVT == MVT::f64;
19876 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19877 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19878 // the results are returned via SRet in memory.
19879 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19880 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19882 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
19884 Type *RetTy = isF64
19885 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19886 : (Type*)VectorType::get(ArgTy, 4);
19888 TargetLowering::CallLoweringInfo CLI(DAG);
19889 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19890 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19892 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19895 // Returned in xmm0 and xmm1.
19896 return CallResult.first;
19898 // Returned in bits 0:31 and 32:64 xmm0.
19899 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19900 CallResult.first, DAG.getIntPtrConstant(0, dl));
19901 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19902 CallResult.first, DAG.getIntPtrConstant(1, dl));
19903 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19904 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19907 /// Widen a vector input to a vector of NVT. The
19908 /// input vector must have the same element type as NVT.
19909 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
19910 bool FillWithZeroes = false) {
19911 // Check if InOp already has the right width.
19912 MVT InVT = InOp.getSimpleValueType();
19916 if (InOp.isUndef())
19917 return DAG.getUNDEF(NVT);
19919 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
19920 "input and widen element type must match");
19922 unsigned InNumElts = InVT.getVectorNumElements();
19923 unsigned WidenNumElts = NVT.getVectorNumElements();
19924 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
19925 "Unexpected request for vector widening");
19927 EVT EltVT = NVT.getVectorElementType();
19930 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
19931 InOp.getNumOperands() == 2) {
19932 SDValue N1 = InOp.getOperand(1);
19933 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
19935 InOp = InOp.getOperand(0);
19936 InVT = InOp.getSimpleValueType();
19937 InNumElts = InVT.getVectorNumElements();
19940 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
19941 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
19942 SmallVector<SDValue, 16> Ops;
19943 for (unsigned i = 0; i < InNumElts; ++i)
19944 Ops.push_back(InOp.getOperand(i));
19946 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
19947 DAG.getUNDEF(EltVT);
19948 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
19949 Ops.push_back(FillVal);
19950 return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Ops);
19952 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
19954 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
19955 InOp, DAG.getIntPtrConstant(0, dl));
19958 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget *Subtarget,
19959 SelectionDAG &DAG) {
19960 assert(Subtarget->hasAVX512() &&
19961 "MGATHER/MSCATTER are supported on AVX-512 arch only");
19963 // X86 scatter kills mask register, so its type should be added to
19964 // the list of return values.
19965 // If the "scatter" has 2 return values, it is already handled.
19966 if (Op.getNode()->getNumValues() == 2)
19969 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
19970 SDValue Src = N->getValue();
19971 MVT VT = Src.getSimpleValueType();
19972 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
19975 SDValue NewScatter;
19976 SDValue Index = N->getIndex();
19977 SDValue Mask = N->getMask();
19978 SDValue Chain = N->getChain();
19979 SDValue BasePtr = N->getBasePtr();
19980 MVT MemVT = N->getMemoryVT().getSimpleVT();
19981 MVT IndexVT = Index.getSimpleValueType();
19982 MVT MaskVT = Mask.getSimpleValueType();
19984 if (MemVT.getScalarSizeInBits() < VT.getScalarSizeInBits()) {
19985 // The v2i32 value was promoted to v2i64.
19986 // Now we "redo" the type legalizer's work and widen the original
19987 // v2i32 value to v4i32. The original v2i32 is retrieved from v2i64
19989 assert((MemVT == MVT::v2i32 && VT == MVT::v2i64) &&
19990 "Unexpected memory type");
19991 int ShuffleMask[] = {0, 2, -1, -1};
19992 Src = DAG.getVectorShuffle(MVT::v4i32, dl, DAG.getBitcast(MVT::v4i32, Src),
19993 DAG.getUNDEF(MVT::v4i32), ShuffleMask);
19994 // Now we have 4 elements instead of 2.
19995 // Expand the index.
19996 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), 4);
19997 Index = ExtendToType(Index, NewIndexVT, DAG);
19999 // Expand the mask with zeroes
20000 // Mask may be <2 x i64> or <2 x i1> at this moment
20001 assert((MaskVT == MVT::v2i1 || MaskVT == MVT::v2i64) &&
20002 "Unexpected mask type");
20003 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), 4);
20004 Mask = ExtendToType(Mask, ExtMaskVT, DAG, true);
20008 unsigned NumElts = VT.getVectorNumElements();
20009 if (!Subtarget->hasVLX() && !VT.is512BitVector() &&
20010 !Index.getSimpleValueType().is512BitVector()) {
20011 // AVX512F supports only 512-bit vectors. Or data or index should
20012 // be 512 bit wide. If now the both index and data are 256-bit, but
20013 // the vector contains 8 elements, we just sign-extend the index
20014 if (IndexVT == MVT::v8i32)
20015 // Just extend index
20016 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
20018 // The minimal number of elts in scatter is 8
20021 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), NumElts);
20022 // Use original index here, do not modify the index twice
20023 Index = ExtendToType(N->getIndex(), NewIndexVT, DAG);
20024 if (IndexVT.getScalarType() == MVT::i32)
20025 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
20028 // At this point we have promoted mask operand
20029 assert(MaskVT.getScalarSizeInBits() >= 32 && "unexpected mask type");
20030 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), NumElts);
20031 // Use the original mask here, do not modify the mask twice
20032 Mask = ExtendToType(N->getMask(), ExtMaskVT, DAG, true);
20034 // The value that should be stored
20035 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts);
20036 Src = ExtendToType(Src, NewVT, DAG);
20039 // If the mask is "wide" at this point - truncate it to i1 vector
20040 MVT BitMaskVT = MVT::getVectorVT(MVT::i1, NumElts);
20041 Mask = DAG.getNode(ISD::TRUNCATE, dl, BitMaskVT, Mask);
20043 // The mask is killed by scatter, add it to the values
20044 SDVTList VTs = DAG.getVTList(BitMaskVT, MVT::Other);
20045 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index};
20046 NewScatter = DAG.getMaskedScatter(VTs, N->getMemoryVT(), dl, Ops,
20047 N->getMemOperand());
20048 DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
20049 return SDValue(NewScatter.getNode(), 0);
20052 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget *Subtarget,
20053 SelectionDAG &DAG) {
20055 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
20056 MVT VT = Op.getSimpleValueType();
20057 SDValue Mask = N->getMask();
20060 if (Subtarget->hasAVX512() && !Subtarget->hasVLX() &&
20061 !VT.is512BitVector() && Mask.getValueType() == MVT::v8i1) {
20062 // This operation is legal for targets with VLX, but without
20063 // VLX the vector should be widened to 512 bit
20064 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
20065 MVT WideDataVT = MVT::getVectorVT(VT.getScalarType(), NumEltsInWideVec);
20066 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
20067 SDValue Src0 = N->getSrc0();
20068 Src0 = ExtendToType(Src0, WideDataVT, DAG);
20069 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
20070 SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
20071 N->getBasePtr(), Mask, Src0,
20072 N->getMemoryVT(), N->getMemOperand(),
20073 N->getExtensionType());
20075 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
20076 NewLoad.getValue(0),
20077 DAG.getIntPtrConstant(0, dl));
20078 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
20079 return DAG.getMergeValues(RetOps, dl);
20084 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget *Subtarget,
20085 SelectionDAG &DAG) {
20086 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
20087 SDValue DataToStore = N->getValue();
20088 MVT VT = DataToStore.getSimpleValueType();
20089 SDValue Mask = N->getMask();
20092 if (Subtarget->hasAVX512() && !Subtarget->hasVLX() &&
20093 !VT.is512BitVector() && Mask.getValueType() == MVT::v8i1) {
20094 // This operation is legal for targets with VLX, but without
20095 // VLX the vector should be widened to 512 bit
20096 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
20097 MVT WideDataVT = MVT::getVectorVT(VT.getScalarType(), NumEltsInWideVec);
20098 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
20099 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
20100 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
20101 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
20102 Mask, N->getMemoryVT(), N->getMemOperand(),
20103 N->isTruncatingStore());
20108 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget *Subtarget,
20109 SelectionDAG &DAG) {
20110 assert(Subtarget->hasAVX512() &&
20111 "MGATHER/MSCATTER are supported on AVX-512 arch only");
20113 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
20115 MVT VT = Op.getSimpleValueType();
20116 SDValue Index = N->getIndex();
20117 SDValue Mask = N->getMask();
20118 SDValue Src0 = N->getValue();
20119 MVT IndexVT = Index.getSimpleValueType();
20120 MVT MaskVT = Mask.getSimpleValueType();
20122 unsigned NumElts = VT.getVectorNumElements();
20123 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
20125 if (!Subtarget->hasVLX() && !VT.is512BitVector() &&
20126 !Index.getSimpleValueType().is512BitVector()) {
20127 // AVX512F supports only 512-bit vectors. Or data or index should
20128 // be 512 bit wide. If now the both index and data are 256-bit, but
20129 // the vector contains 8 elements, we just sign-extend the index
20130 if (NumElts == 8) {
20131 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
20132 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
20133 N->getOperand(3), Index };
20134 DAG.UpdateNodeOperands(N, Ops);
20138 // Minimal number of elements in Gather
20141 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), NumElts);
20142 Index = ExtendToType(Index, NewIndexVT, DAG);
20143 if (IndexVT.getScalarType() == MVT::i32)
20144 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
20147 MVT MaskBitVT = MVT::getVectorVT(MVT::i1, NumElts);
20148 // At this point we have promoted mask operand
20149 assert(MaskVT.getScalarSizeInBits() >= 32 && "unexpected mask type");
20150 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), NumElts);
20151 Mask = ExtendToType(Mask, ExtMaskVT, DAG, true);
20152 Mask = DAG.getNode(ISD::TRUNCATE, dl, MaskBitVT, Mask);
20154 // The pass-thru value
20155 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts);
20156 Src0 = ExtendToType(Src0, NewVT, DAG);
20158 SDValue Ops[] = { N->getChain(), Src0, Mask, N->getBasePtr(), Index };
20159 SDValue NewGather = DAG.getMaskedGather(DAG.getVTList(NewVT, MVT::Other),
20160 N->getMemoryVT(), dl, Ops,
20161 N->getMemOperand());
20162 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
20163 NewGather.getValue(0),
20164 DAG.getIntPtrConstant(0, dl));
20165 SDValue RetOps[] = {Exract, NewGather.getValue(1)};
20166 return DAG.getMergeValues(RetOps, dl);
20171 SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
20172 SelectionDAG &DAG) const {
20173 // TODO: Eventually, the lowering of these nodes should be informed by or
20174 // deferred to the GC strategy for the function in which they appear. For
20175 // now, however, they must be lowered to something. Since they are logically
20176 // no-ops in the case of a null GC strategy (or a GC strategy which does not
20177 // require special handling for these nodes), lower them as literal NOOPs for
20179 SmallVector<SDValue, 2> Ops;
20181 Ops.push_back(Op.getOperand(0));
20182 if (Op->getGluedNode())
20183 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
20186 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
20187 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
20192 SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
20193 SelectionDAG &DAG) const {
20194 // TODO: Eventually, the lowering of these nodes should be informed by or
20195 // deferred to the GC strategy for the function in which they appear. For
20196 // now, however, they must be lowered to something. Since they are logically
20197 // no-ops in the case of a null GC strategy (or a GC strategy which does not
20198 // require special handling for these nodes), lower them as literal NOOPs for
20200 SmallVector<SDValue, 2> Ops;
20202 Ops.push_back(Op.getOperand(0));
20203 if (Op->getGluedNode())
20204 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
20207 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
20208 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
20213 /// LowerOperation - Provide custom lowering hooks for some operations.
20215 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
20216 switch (Op.getOpcode()) {
20217 default: llvm_unreachable("Should not custom lower this!");
20218 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
20219 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
20220 return LowerCMP_SWAP(Op, Subtarget, DAG);
20221 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
20222 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
20223 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
20224 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
20225 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
20226 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
20227 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
20228 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
20229 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
20230 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
20231 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
20232 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
20233 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
20234 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
20235 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
20236 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
20237 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
20238 case ISD::SHL_PARTS:
20239 case ISD::SRA_PARTS:
20240 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
20241 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
20242 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
20243 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
20244 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
20245 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
20246 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
20247 case ISD::SIGN_EXTEND_VECTOR_INREG:
20248 return LowerSIGN_EXTEND_VECTOR_INREG(Op, Subtarget, DAG);
20249 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
20250 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
20251 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
20252 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
20254 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
20255 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
20256 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
20257 case ISD::SETCC: return LowerSETCC(Op, DAG);
20258 case ISD::SETCCE: return LowerSETCCE(Op, DAG);
20259 case ISD::SELECT: return LowerSELECT(Op, DAG);
20260 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
20261 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
20262 case ISD::VASTART: return LowerVASTART(Op, DAG);
20263 case ISD::VAARG: return LowerVAARG(Op, DAG);
20264 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
20265 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
20266 case ISD::INTRINSIC_VOID:
20267 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
20268 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
20269 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
20270 case ISD::FRAME_TO_ARGS_OFFSET:
20271 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
20272 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
20273 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
20274 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
20275 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
20276 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
20277 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
20278 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
20279 case ISD::CTLZ: return LowerCTLZ(Op, Subtarget, DAG);
20280 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, Subtarget, DAG);
20282 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG);
20283 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
20284 case ISD::UMUL_LOHI:
20285 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
20286 case ISD::ROTL: return LowerRotate(Op, Subtarget, DAG);
20289 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
20295 case ISD::UMULO: return LowerXALUO(Op, DAG);
20296 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20297 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20301 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20302 case ISD::ADD: return LowerADD(Op, DAG);
20303 case ISD::SUB: return LowerSUB(Op, DAG);
20307 case ISD::UMIN: return LowerMINMAX(Op, DAG);
20308 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20309 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
20310 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
20311 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
20312 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
20313 case ISD::GC_TRANSITION_START:
20314 return LowerGC_TRANSITION_START(Op, DAG);
20315 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
20319 /// ReplaceNodeResults - Replace a node with an illegal result type
20320 /// with a new node built out of custom code.
20321 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20322 SmallVectorImpl<SDValue>&Results,
20323 SelectionDAG &DAG) const {
20325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20326 switch (N->getOpcode()) {
20328 llvm_unreachable("Do not know how to custom type legalize this operation!");
20329 case X86ISD::AVG: {
20330 // Legalize types for X86ISD::AVG by expanding vectors.
20331 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20333 auto InVT = N->getValueType(0);
20334 auto InVTSize = InVT.getSizeInBits();
20335 const unsigned RegSize =
20336 (InVTSize > 128) ? ((InVTSize > 256) ? 512 : 256) : 128;
20337 assert((!Subtarget->hasAVX512() || RegSize < 512) &&
20338 "512-bit vector requires AVX512");
20339 assert((!Subtarget->hasAVX2() || RegSize < 256) &&
20340 "256-bit vector requires AVX2");
20342 auto ElemVT = InVT.getVectorElementType();
20343 auto RegVT = EVT::getVectorVT(*DAG.getContext(), ElemVT,
20344 RegSize / ElemVT.getSizeInBits());
20345 assert(RegSize % InVT.getSizeInBits() == 0);
20346 unsigned NumConcat = RegSize / InVT.getSizeInBits();
20348 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
20349 Ops[0] = N->getOperand(0);
20350 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
20351 Ops[0] = N->getOperand(1);
20352 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
20354 SDValue Res = DAG.getNode(X86ISD::AVG, dl, RegVT, InVec0, InVec1);
20355 Results.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InVT, Res,
20356 DAG.getIntPtrConstant(0, dl)));
20359 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20360 case X86ISD::FMINC:
20362 case X86ISD::FMAXC:
20363 case X86ISD::FMAX: {
20364 EVT VT = N->getValueType(0);
20365 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
20366 SDValue UNDEF = DAG.getUNDEF(VT);
20367 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20368 N->getOperand(0), UNDEF);
20369 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20370 N->getOperand(1), UNDEF);
20371 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20374 case ISD::SIGN_EXTEND_INREG:
20379 // We don't want to expand or promote these.
20386 case ISD::UDIVREM: {
20387 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20388 Results.push_back(V);
20391 case ISD::FP_TO_SINT:
20392 case ISD::FP_TO_UINT: {
20393 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20395 std::pair<SDValue,SDValue> Vals =
20396 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20397 SDValue FIST = Vals.first, StackSlot = Vals.second;
20398 if (FIST.getNode()) {
20399 EVT VT = N->getValueType(0);
20400 // Return a load from the stack slot.
20401 if (StackSlot.getNode())
20402 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20403 MachinePointerInfo(),
20404 false, false, false, 0));
20406 Results.push_back(FIST);
20410 case ISD::UINT_TO_FP: {
20411 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20412 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20413 N->getValueType(0) != MVT::v2f32)
20415 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20417 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
20419 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20420 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20421 DAG.getBitcast(MVT::v2i64, VBias));
20422 Or = DAG.getBitcast(MVT::v2f64, Or);
20423 // TODO: Are there any fast-math-flags to propagate here?
20424 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20425 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20428 case ISD::FP_ROUND: {
20429 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20431 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20432 Results.push_back(V);
20435 case ISD::FP_EXTEND: {
20436 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
20437 // No other ValueType for FP_EXTEND should reach this point.
20438 assert(N->getValueType(0) == MVT::v2f32 &&
20439 "Do not know how to legalize this Node");
20442 case ISD::INTRINSIC_W_CHAIN: {
20443 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20445 default : llvm_unreachable("Do not know how to custom type "
20446 "legalize this intrinsic operation!");
20447 case Intrinsic::x86_rdtsc:
20448 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20450 case Intrinsic::x86_rdtscp:
20451 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20453 case Intrinsic::x86_rdpmc:
20454 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20457 case ISD::INTRINSIC_WO_CHAIN: {
20458 if (SDValue V = LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), Subtarget, DAG))
20459 Results.push_back(V);
20462 case ISD::READCYCLECOUNTER: {
20463 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20466 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20467 EVT T = N->getValueType(0);
20468 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20469 bool Regs64bit = T == MVT::i128;
20470 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20471 SDValue cpInL, cpInH;
20472 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20473 DAG.getConstant(0, dl, HalfT));
20474 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20475 DAG.getConstant(1, dl, HalfT));
20476 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20477 Regs64bit ? X86::RAX : X86::EAX,
20479 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20480 Regs64bit ? X86::RDX : X86::EDX,
20481 cpInH, cpInL.getValue(1));
20482 SDValue swapInL, swapInH;
20483 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20484 DAG.getConstant(0, dl, HalfT));
20485 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20486 DAG.getConstant(1, dl, HalfT));
20487 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20488 Regs64bit ? X86::RBX : X86::EBX,
20489 swapInL, cpInH.getValue(1));
20490 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20491 Regs64bit ? X86::RCX : X86::ECX,
20492 swapInH, swapInL.getValue(1));
20493 SDValue Ops[] = { swapInH.getValue(0),
20495 swapInH.getValue(1) };
20496 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20497 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20498 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20499 X86ISD::LCMPXCHG8_DAG;
20500 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20501 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20502 Regs64bit ? X86::RAX : X86::EAX,
20503 HalfT, Result.getValue(1));
20504 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20505 Regs64bit ? X86::RDX : X86::EDX,
20506 HalfT, cpOutL.getValue(2));
20507 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20509 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20510 MVT::i32, cpOutH.getValue(2));
20512 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20513 DAG.getConstant(X86::COND_E, dl, MVT::i8), EFLAGS);
20514 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20516 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20517 Results.push_back(Success);
20518 Results.push_back(EFLAGS.getValue(1));
20521 case ISD::ATOMIC_SWAP:
20522 case ISD::ATOMIC_LOAD_ADD:
20523 case ISD::ATOMIC_LOAD_SUB:
20524 case ISD::ATOMIC_LOAD_AND:
20525 case ISD::ATOMIC_LOAD_OR:
20526 case ISD::ATOMIC_LOAD_XOR:
20527 case ISD::ATOMIC_LOAD_NAND:
20528 case ISD::ATOMIC_LOAD_MIN:
20529 case ISD::ATOMIC_LOAD_MAX:
20530 case ISD::ATOMIC_LOAD_UMIN:
20531 case ISD::ATOMIC_LOAD_UMAX:
20532 case ISD::ATOMIC_LOAD: {
20533 // Delegate to generic TypeLegalization. Situations we can really handle
20534 // should have already been dealt with by AtomicExpandPass.cpp.
20537 case ISD::BITCAST: {
20538 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20539 EVT DstVT = N->getValueType(0);
20540 EVT SrcVT = N->getOperand(0)->getValueType(0);
20542 if (SrcVT != MVT::f64 ||
20543 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20546 unsigned NumElts = DstVT.getVectorNumElements();
20547 EVT SVT = DstVT.getVectorElementType();
20548 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20549 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20550 MVT::v2f64, N->getOperand(0));
20551 SDValue ToVecInt = DAG.getBitcast(WiderVT, Expanded);
20553 if (ExperimentalVectorWideningLegalization) {
20554 // If we are legalizing vectors by widening, we already have the desired
20555 // legal vector type, just return it.
20556 Results.push_back(ToVecInt);
20560 SmallVector<SDValue, 8> Elts;
20561 for (unsigned i = 0, e = NumElts; i != e; ++i)
20562 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20563 ToVecInt, DAG.getIntPtrConstant(i, dl)));
20565 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20570 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20571 switch ((X86ISD::NodeType)Opcode) {
20572 case X86ISD::FIRST_NUMBER: break;
20573 case X86ISD::BSF: return "X86ISD::BSF";
20574 case X86ISD::BSR: return "X86ISD::BSR";
20575 case X86ISD::SHLD: return "X86ISD::SHLD";
20576 case X86ISD::SHRD: return "X86ISD::SHRD";
20577 case X86ISD::FAND: return "X86ISD::FAND";
20578 case X86ISD::FANDN: return "X86ISD::FANDN";
20579 case X86ISD::FOR: return "X86ISD::FOR";
20580 case X86ISD::FXOR: return "X86ISD::FXOR";
20581 case X86ISD::FILD: return "X86ISD::FILD";
20582 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20583 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20584 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20585 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20586 case X86ISD::FLD: return "X86ISD::FLD";
20587 case X86ISD::FST: return "X86ISD::FST";
20588 case X86ISD::CALL: return "X86ISD::CALL";
20589 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20590 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20591 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20592 case X86ISD::BT: return "X86ISD::BT";
20593 case X86ISD::CMP: return "X86ISD::CMP";
20594 case X86ISD::COMI: return "X86ISD::COMI";
20595 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20596 case X86ISD::CMPM: return "X86ISD::CMPM";
20597 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20598 case X86ISD::CMPM_RND: return "X86ISD::CMPM_RND";
20599 case X86ISD::SETCC: return "X86ISD::SETCC";
20600 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20601 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20602 case X86ISD::FGETSIGNx86: return "X86ISD::FGETSIGNx86";
20603 case X86ISD::CMOV: return "X86ISD::CMOV";
20604 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20605 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20606 case X86ISD::IRET: return "X86ISD::IRET";
20607 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20608 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20609 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20610 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20611 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20612 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
20613 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
20614 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
20615 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20616 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20617 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20618 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20619 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20620 case X86ISD::MMX_PINSRW: return "X86ISD::MMX_PINSRW";
20621 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20622 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20623 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20624 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20625 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20626 case X86ISD::ADDUS: return "X86ISD::ADDUS";
20627 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20628 case X86ISD::HADD: return "X86ISD::HADD";
20629 case X86ISD::HSUB: return "X86ISD::HSUB";
20630 case X86ISD::FHADD: return "X86ISD::FHADD";
20631 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20632 case X86ISD::ABS: return "X86ISD::ABS";
20633 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
20634 case X86ISD::FMAX: return "X86ISD::FMAX";
20635 case X86ISD::FMAX_RND: return "X86ISD::FMAX_RND";
20636 case X86ISD::FMIN: return "X86ISD::FMIN";
20637 case X86ISD::FMIN_RND: return "X86ISD::FMIN_RND";
20638 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20639 case X86ISD::FMINC: return "X86ISD::FMINC";
20640 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20641 case X86ISD::FRCP: return "X86ISD::FRCP";
20642 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
20643 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
20644 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20645 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20646 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20647 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20648 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20649 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20650 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20651 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20652 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20653 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20654 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20655 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20656 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20657 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20658 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20659 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20660 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20661 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
20662 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
20663 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20664 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20665 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20666 case X86ISD::CVTDQ2PD: return "X86ISD::CVTDQ2PD";
20667 case X86ISD::CVTUDQ2PD: return "X86ISD::CVTUDQ2PD";
20668 case X86ISD::CVT2MASK: return "X86ISD::CVT2MASK";
20669 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20670 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20671 case X86ISD::VSHL: return "X86ISD::VSHL";
20672 case X86ISD::VSRL: return "X86ISD::VSRL";
20673 case X86ISD::VSRA: return "X86ISD::VSRA";
20674 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20675 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20676 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20677 case X86ISD::CMPP: return "X86ISD::CMPP";
20678 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20679 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20680 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20681 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20682 case X86ISD::ADD: return "X86ISD::ADD";
20683 case X86ISD::SUB: return "X86ISD::SUB";
20684 case X86ISD::ADC: return "X86ISD::ADC";
20685 case X86ISD::SBB: return "X86ISD::SBB";
20686 case X86ISD::SMUL: return "X86ISD::SMUL";
20687 case X86ISD::UMUL: return "X86ISD::UMUL";
20688 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20689 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20690 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20691 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20692 case X86ISD::INC: return "X86ISD::INC";
20693 case X86ISD::DEC: return "X86ISD::DEC";
20694 case X86ISD::OR: return "X86ISD::OR";
20695 case X86ISD::XOR: return "X86ISD::XOR";
20696 case X86ISD::AND: return "X86ISD::AND";
20697 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20698 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20699 case X86ISD::PTEST: return "X86ISD::PTEST";
20700 case X86ISD::TESTP: return "X86ISD::TESTP";
20701 case X86ISD::TESTM: return "X86ISD::TESTM";
20702 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20703 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20704 case X86ISD::KTEST: return "X86ISD::KTEST";
20705 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20706 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20707 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20708 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20709 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20710 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20711 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20712 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20713 case X86ISD::SHUF128: return "X86ISD::SHUF128";
20714 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20715 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20716 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20717 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20718 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20719 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20720 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20721 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20722 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20723 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20724 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20725 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20726 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20727 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20728 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
20729 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20730 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
20731 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20732 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20733 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20734 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20735 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20736 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20737 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
20738 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
20739 case X86ISD::VRANGE: return "X86ISD::VRANGE";
20740 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20741 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20742 case X86ISD::PSADBW: return "X86ISD::PSADBW";
20743 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
20744 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20745 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20746 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20747 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20748 case X86ISD::MFENCE: return "X86ISD::MFENCE";
20749 case X86ISD::SFENCE: return "X86ISD::SFENCE";
20750 case X86ISD::LFENCE: return "X86ISD::LFENCE";
20751 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20752 case X86ISD::SAHF: return "X86ISD::SAHF";
20753 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20754 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20755 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
20756 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
20757 case X86ISD::VPROT: return "X86ISD::VPROT";
20758 case X86ISD::VPROTI: return "X86ISD::VPROTI";
20759 case X86ISD::VPSHA: return "X86ISD::VPSHA";
20760 case X86ISD::VPSHL: return "X86ISD::VPSHL";
20761 case X86ISD::VPCOM: return "X86ISD::VPCOM";
20762 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
20763 case X86ISD::FMADD: return "X86ISD::FMADD";
20764 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20765 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20766 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20767 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20768 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20769 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
20770 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
20771 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
20772 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
20773 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
20774 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
20775 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
20776 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
20777 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
20778 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20779 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20780 case X86ISD::XTEST: return "X86ISD::XTEST";
20781 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20782 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20783 case X86ISD::SELECT: return "X86ISD::SELECT";
20784 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20785 case X86ISD::RCP28: return "X86ISD::RCP28";
20786 case X86ISD::EXP2: return "X86ISD::EXP2";
20787 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20788 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
20789 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
20790 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
20791 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
20792 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
20793 case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
20794 case X86ISD::SCALEF: return "X86ISD::SCALEF";
20795 case X86ISD::ADDS: return "X86ISD::ADDS";
20796 case X86ISD::SUBS: return "X86ISD::SUBS";
20797 case X86ISD::AVG: return "X86ISD::AVG";
20798 case X86ISD::MULHRS: return "X86ISD::MULHRS";
20799 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
20800 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
20801 case X86ISD::FP_TO_SINT_RND: return "X86ISD::FP_TO_SINT_RND";
20802 case X86ISD::FP_TO_UINT_RND: return "X86ISD::FP_TO_UINT_RND";
20803 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
20804 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
20809 // isLegalAddressingMode - Return true if the addressing mode represented
20810 // by AM is legal for this target, for a load/store of the specified type.
20811 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
20812 const AddrMode &AM, Type *Ty,
20813 unsigned AS) const {
20814 // X86 supports extremely general addressing modes.
20815 CodeModel::Model M = getTargetMachine().getCodeModel();
20816 Reloc::Model R = getTargetMachine().getRelocationModel();
20818 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20819 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20824 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20826 // If a reference to this global requires an extra load, we can't fold it.
20827 if (isGlobalStubReference(GVFlags))
20830 // If BaseGV requires a register for the PIC base, we cannot also have a
20831 // BaseReg specified.
20832 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20835 // If lower 4G is not available, then we must use rip-relative addressing.
20836 if ((M != CodeModel::Small || R != Reloc::Static) &&
20837 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20841 switch (AM.Scale) {
20847 // These scales always work.
20852 // These scales are formed with basereg+scalereg. Only accept if there is
20857 default: // Other stuff never works.
20864 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20865 unsigned Bits = Ty->getScalarSizeInBits();
20867 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20868 // particularly cheaper than those without.
20872 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20873 // variable shifts just as cheap as scalar ones.
20874 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20877 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20878 // fully general vector.
20882 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20883 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20885 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20886 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20887 return NumBits1 > NumBits2;
20890 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20891 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20894 if (!isTypeLegal(EVT::getEVT(Ty1)))
20897 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20899 // Assuming the caller doesn't have a zeroext or signext return parameter,
20900 // truncation all the way down to i1 is valid.
20904 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20905 return isInt<32>(Imm);
20908 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20909 // Can also use sub to handle negated immediates.
20910 return isInt<32>(Imm);
20913 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20914 if (!VT1.isInteger() || !VT2.isInteger())
20916 unsigned NumBits1 = VT1.getSizeInBits();
20917 unsigned NumBits2 = VT2.getSizeInBits();
20918 return NumBits1 > NumBits2;
20921 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20922 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20923 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20926 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20927 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20928 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20931 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20932 EVT VT1 = Val.getValueType();
20933 if (isZExtFree(VT1, VT2))
20936 if (Val.getOpcode() != ISD::LOAD)
20939 if (!VT1.isSimple() || !VT1.isInteger() ||
20940 !VT2.isSimple() || !VT2.isInteger())
20943 switch (VT1.getSimpleVT().SimpleTy) {
20948 // X86 has 8, 16, and 32-bit zero-extending loads.
20955 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20958 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20959 if (!Subtarget->hasAnyFMA())
20962 VT = VT.getScalarType();
20964 if (!VT.isSimple())
20967 switch (VT.getSimpleVT().SimpleTy) {
20978 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20979 // i16 instructions are longer (0x66 prefix) and potentially slower.
20980 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20983 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20984 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20985 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20986 /// are assumed to be legal.
20988 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20990 if (!VT.isSimple())
20993 // Not for i1 vectors
20994 if (VT.getSimpleVT().getScalarType() == MVT::i1)
20997 // Very little shuffling can be done for 64-bit vectors right now.
20998 if (VT.getSimpleVT().getSizeInBits() == 64)
21001 // We only care that the types being shuffled are legal. The lowering can
21002 // handle any possible shuffle mask that results.
21003 return isTypeLegal(VT.getSimpleVT());
21007 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
21009 // Just delegate to the generic legality, clear masks aren't special.
21010 return isShuffleMaskLegal(Mask, VT);
21013 //===----------------------------------------------------------------------===//
21014 // X86 Scheduler Hooks
21015 //===----------------------------------------------------------------------===//
21017 /// Utility function to emit xbegin specifying the start of an RTM region.
21018 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
21019 const TargetInstrInfo *TII) {
21020 DebugLoc DL = MI->getDebugLoc();
21022 const BasicBlock *BB = MBB->getBasicBlock();
21023 MachineFunction::iterator I = ++MBB->getIterator();
21025 // For the v = xbegin(), we generate
21036 MachineBasicBlock *thisMBB = MBB;
21037 MachineFunction *MF = MBB->getParent();
21038 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21039 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21040 MF->insert(I, mainMBB);
21041 MF->insert(I, sinkMBB);
21043 // Transfer the remainder of BB and its successor edges to sinkMBB.
21044 sinkMBB->splice(sinkMBB->begin(), MBB,
21045 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21046 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21050 // # fallthrough to mainMBB
21051 // # abortion to sinkMBB
21052 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
21053 thisMBB->addSuccessor(mainMBB);
21054 thisMBB->addSuccessor(sinkMBB);
21058 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
21059 mainMBB->addSuccessor(sinkMBB);
21062 // EAX is live into the sinkMBB
21063 sinkMBB->addLiveIn(X86::EAX);
21064 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21065 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
21068 MI->eraseFromParent();
21072 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
21073 // or XMM0_V32I8 in AVX all of this code can be replaced with that
21074 // in the .td file.
21075 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
21076 const TargetInstrInfo *TII) {
21078 switch (MI->getOpcode()) {
21079 default: llvm_unreachable("illegal opcode!");
21080 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
21081 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
21082 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
21083 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
21084 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
21085 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
21086 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
21087 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
21090 DebugLoc dl = MI->getDebugLoc();
21091 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
21093 unsigned NumArgs = MI->getNumOperands();
21094 for (unsigned i = 1; i < NumArgs; ++i) {
21095 MachineOperand &Op = MI->getOperand(i);
21096 if (!(Op.isReg() && Op.isImplicit()))
21097 MIB.addOperand(Op);
21099 if (MI->hasOneMemOperand())
21100 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
21102 BuildMI(*BB, MI, dl,
21103 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
21104 .addReg(X86::XMM0);
21106 MI->eraseFromParent();
21110 // FIXME: Custom handling because TableGen doesn't support multiple implicit
21111 // defs in an instruction pattern
21112 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
21113 const TargetInstrInfo *TII) {
21115 switch (MI->getOpcode()) {
21116 default: llvm_unreachable("illegal opcode!");
21117 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
21118 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
21119 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
21120 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
21121 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
21122 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
21123 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
21124 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
21127 DebugLoc dl = MI->getDebugLoc();
21128 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
21130 unsigned NumArgs = MI->getNumOperands(); // remove the results
21131 for (unsigned i = 1; i < NumArgs; ++i) {
21132 MachineOperand &Op = MI->getOperand(i);
21133 if (!(Op.isReg() && Op.isImplicit()))
21134 MIB.addOperand(Op);
21136 if (MI->hasOneMemOperand())
21137 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
21139 BuildMI(*BB, MI, dl,
21140 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
21143 MI->eraseFromParent();
21147 static MachineBasicBlock *EmitWRPKRU(MachineInstr *MI, MachineBasicBlock *BB,
21148 const X86Subtarget *Subtarget) {
21149 DebugLoc dl = MI->getDebugLoc();
21150 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21152 // insert input VAL into EAX
21153 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
21154 .addReg(MI->getOperand(0).getReg());
21155 // insert zero to ECX
21156 BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::ECX)
21159 // insert zero to EDX
21160 BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::EDX)
21163 // insert WRPKRU instruction
21164 BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr));
21166 MI->eraseFromParent(); // The pseudo is gone now.
21170 static MachineBasicBlock *EmitRDPKRU(MachineInstr *MI, MachineBasicBlock *BB,
21171 const X86Subtarget *Subtarget) {
21172 DebugLoc dl = MI->getDebugLoc();
21173 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21175 // insert zero to ECX
21176 BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::ECX)
21179 // insert RDPKRU instruction
21180 BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr));
21181 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
21184 MI->eraseFromParent(); // The pseudo is gone now.
21188 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
21189 const X86Subtarget *Subtarget) {
21190 DebugLoc dl = MI->getDebugLoc();
21191 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21192 // Address into RAX/EAX, other two args into ECX, EDX.
21193 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
21194 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
21195 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
21196 for (int i = 0; i < X86::AddrNumOperands; ++i)
21197 MIB.addOperand(MI->getOperand(i));
21199 unsigned ValOps = X86::AddrNumOperands;
21200 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
21201 .addReg(MI->getOperand(ValOps).getReg());
21202 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
21203 .addReg(MI->getOperand(ValOps+1).getReg());
21205 // The instruction doesn't actually take any operands though.
21206 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
21208 MI->eraseFromParent(); // The pseudo is gone now.
21212 MachineBasicBlock *
21213 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
21214 MachineBasicBlock *MBB) const {
21215 // Emit va_arg instruction on X86-64.
21217 // Operands to this pseudo-instruction:
21218 // 0 ) Output : destination address (reg)
21219 // 1-5) Input : va_list address (addr, i64mem)
21220 // 6 ) ArgSize : Size (in bytes) of vararg type
21221 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
21222 // 8 ) Align : Alignment of type
21223 // 9 ) EFLAGS (implicit-def)
21225 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
21226 static_assert(X86::AddrNumOperands == 5,
21227 "VAARG_64 assumes 5 address operands");
21229 unsigned DestReg = MI->getOperand(0).getReg();
21230 MachineOperand &Base = MI->getOperand(1);
21231 MachineOperand &Scale = MI->getOperand(2);
21232 MachineOperand &Index = MI->getOperand(3);
21233 MachineOperand &Disp = MI->getOperand(4);
21234 MachineOperand &Segment = MI->getOperand(5);
21235 unsigned ArgSize = MI->getOperand(6).getImm();
21236 unsigned ArgMode = MI->getOperand(7).getImm();
21237 unsigned Align = MI->getOperand(8).getImm();
21239 // Memory Reference
21240 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
21241 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21242 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21244 // Machine Information
21245 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21246 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
21247 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
21248 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
21249 DebugLoc DL = MI->getDebugLoc();
21251 // struct va_list {
21254 // i64 overflow_area (address)
21255 // i64 reg_save_area (address)
21257 // sizeof(va_list) = 24
21258 // alignment(va_list) = 8
21260 unsigned TotalNumIntRegs = 6;
21261 unsigned TotalNumXMMRegs = 8;
21262 bool UseGPOffset = (ArgMode == 1);
21263 bool UseFPOffset = (ArgMode == 2);
21264 unsigned MaxOffset = TotalNumIntRegs * 8 +
21265 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
21267 /* Align ArgSize to a multiple of 8 */
21268 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
21269 bool NeedsAlign = (Align > 8);
21271 MachineBasicBlock *thisMBB = MBB;
21272 MachineBasicBlock *overflowMBB;
21273 MachineBasicBlock *offsetMBB;
21274 MachineBasicBlock *endMBB;
21276 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
21277 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
21278 unsigned OffsetReg = 0;
21280 if (!UseGPOffset && !UseFPOffset) {
21281 // If we only pull from the overflow region, we don't create a branch.
21282 // We don't need to alter control flow.
21283 OffsetDestReg = 0; // unused
21284 OverflowDestReg = DestReg;
21286 offsetMBB = nullptr;
21287 overflowMBB = thisMBB;
21290 // First emit code to check if gp_offset (or fp_offset) is below the bound.
21291 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
21292 // If not, pull from overflow_area. (branch to overflowMBB)
21297 // offsetMBB overflowMBB
21302 // Registers for the PHI in endMBB
21303 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
21304 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
21306 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21307 MachineFunction *MF = MBB->getParent();
21308 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21309 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21310 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21312 MachineFunction::iterator MBBIter = ++MBB->getIterator();
21314 // Insert the new basic blocks
21315 MF->insert(MBBIter, offsetMBB);
21316 MF->insert(MBBIter, overflowMBB);
21317 MF->insert(MBBIter, endMBB);
21319 // Transfer the remainder of MBB and its successor edges to endMBB.
21320 endMBB->splice(endMBB->begin(), thisMBB,
21321 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
21322 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
21324 // Make offsetMBB and overflowMBB successors of thisMBB
21325 thisMBB->addSuccessor(offsetMBB);
21326 thisMBB->addSuccessor(overflowMBB);
21328 // endMBB is a successor of both offsetMBB and overflowMBB
21329 offsetMBB->addSuccessor(endMBB);
21330 overflowMBB->addSuccessor(endMBB);
21332 // Load the offset value into a register
21333 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21334 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
21338 .addDisp(Disp, UseFPOffset ? 4 : 0)
21339 .addOperand(Segment)
21340 .setMemRefs(MMOBegin, MMOEnd);
21342 // Check if there is enough room left to pull this argument.
21343 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
21345 .addImm(MaxOffset + 8 - ArgSizeA8);
21347 // Branch to "overflowMBB" if offset >= max
21348 // Fall through to "offsetMBB" otherwise
21349 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
21350 .addMBB(overflowMBB);
21353 // In offsetMBB, emit code to use the reg_save_area.
21355 assert(OffsetReg != 0);
21357 // Read the reg_save_area address.
21358 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
21359 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
21364 .addOperand(Segment)
21365 .setMemRefs(MMOBegin, MMOEnd);
21367 // Zero-extend the offset
21368 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
21369 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
21372 .addImm(X86::sub_32bit);
21374 // Add the offset to the reg_save_area to get the final address.
21375 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
21376 .addReg(OffsetReg64)
21377 .addReg(RegSaveReg);
21379 // Compute the offset for the next argument
21380 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21381 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
21383 .addImm(UseFPOffset ? 16 : 8);
21385 // Store it back into the va_list.
21386 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21390 .addDisp(Disp, UseFPOffset ? 4 : 0)
21391 .addOperand(Segment)
21392 .addReg(NextOffsetReg)
21393 .setMemRefs(MMOBegin, MMOEnd);
21396 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21401 // Emit code to use overflow area
21404 // Load the overflow_area address into a register.
21405 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21406 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21411 .addOperand(Segment)
21412 .setMemRefs(MMOBegin, MMOEnd);
21414 // If we need to align it, do so. Otherwise, just copy the address
21415 // to OverflowDestReg.
21417 // Align the overflow address
21418 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21419 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21421 // aligned_addr = (addr + (align-1)) & ~(align-1)
21422 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21423 .addReg(OverflowAddrReg)
21426 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21428 .addImm(~(uint64_t)(Align-1));
21430 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21431 .addReg(OverflowAddrReg);
21434 // Compute the next overflow address after this argument.
21435 // (the overflow address should be kept 8-byte aligned)
21436 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21437 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21438 .addReg(OverflowDestReg)
21439 .addImm(ArgSizeA8);
21441 // Store the new overflow address.
21442 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21447 .addOperand(Segment)
21448 .addReg(NextAddrReg)
21449 .setMemRefs(MMOBegin, MMOEnd);
21451 // If we branched, emit the PHI to the front of endMBB.
21453 BuildMI(*endMBB, endMBB->begin(), DL,
21454 TII->get(X86::PHI), DestReg)
21455 .addReg(OffsetDestReg).addMBB(offsetMBB)
21456 .addReg(OverflowDestReg).addMBB(overflowMBB);
21459 // Erase the pseudo instruction
21460 MI->eraseFromParent();
21465 MachineBasicBlock *
21466 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21468 MachineBasicBlock *MBB) const {
21469 // Emit code to save XMM registers to the stack. The ABI says that the
21470 // number of registers to save is given in %al, so it's theoretically
21471 // possible to do an indirect jump trick to avoid saving all of them,
21472 // however this code takes a simpler approach and just executes all
21473 // of the stores if %al is non-zero. It's less code, and it's probably
21474 // easier on the hardware branch predictor, and stores aren't all that
21475 // expensive anyway.
21477 // Create the new basic blocks. One block contains all the XMM stores,
21478 // and one block is the final destination regardless of whether any
21479 // stores were performed.
21480 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21481 MachineFunction *F = MBB->getParent();
21482 MachineFunction::iterator MBBIter = ++MBB->getIterator();
21483 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21484 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21485 F->insert(MBBIter, XMMSaveMBB);
21486 F->insert(MBBIter, EndMBB);
21488 // Transfer the remainder of MBB and its successor edges to EndMBB.
21489 EndMBB->splice(EndMBB->begin(), MBB,
21490 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21491 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21493 // The original block will now fall through to the XMM save block.
21494 MBB->addSuccessor(XMMSaveMBB);
21495 // The XMMSaveMBB will fall through to the end block.
21496 XMMSaveMBB->addSuccessor(EndMBB);
21498 // Now add the instructions.
21499 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21500 DebugLoc DL = MI->getDebugLoc();
21502 unsigned CountReg = MI->getOperand(0).getReg();
21503 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21504 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21506 if (!Subtarget->isCallingConvWin64(F->getFunction()->getCallingConv())) {
21507 // If %al is 0, branch around the XMM save block.
21508 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21509 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21510 MBB->addSuccessor(EndMBB);
21513 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21514 // that was just emitted, but clearly shouldn't be "saved".
21515 assert((MI->getNumOperands() <= 3 ||
21516 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21517 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21518 && "Expected last argument to be EFLAGS");
21519 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21520 // In the XMM save block, save all the XMM argument registers.
21521 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21522 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21523 MachineMemOperand *MMO = F->getMachineMemOperand(
21524 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
21525 MachineMemOperand::MOStore,
21526 /*Size=*/16, /*Align=*/16);
21527 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21528 .addFrameIndex(RegSaveFrameIndex)
21529 .addImm(/*Scale=*/1)
21530 .addReg(/*IndexReg=*/0)
21531 .addImm(/*Disp=*/Offset)
21532 .addReg(/*Segment=*/0)
21533 .addReg(MI->getOperand(i).getReg())
21534 .addMemOperand(MMO);
21537 MI->eraseFromParent(); // The pseudo instruction is gone now.
21542 // The EFLAGS operand of SelectItr might be missing a kill marker
21543 // because there were multiple uses of EFLAGS, and ISel didn't know
21544 // which to mark. Figure out whether SelectItr should have had a
21545 // kill marker, and set it if it should. Returns the correct kill
21547 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21548 MachineBasicBlock* BB,
21549 const TargetRegisterInfo* TRI) {
21550 // Scan forward through BB for a use/def of EFLAGS.
21551 MachineBasicBlock::iterator miI(std::next(SelectItr));
21552 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21553 const MachineInstr& mi = *miI;
21554 if (mi.readsRegister(X86::EFLAGS))
21556 if (mi.definesRegister(X86::EFLAGS))
21557 break; // Should have kill-flag - update below.
21560 // If we hit the end of the block, check whether EFLAGS is live into a
21562 if (miI == BB->end()) {
21563 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21564 sEnd = BB->succ_end();
21565 sItr != sEnd; ++sItr) {
21566 MachineBasicBlock* succ = *sItr;
21567 if (succ->isLiveIn(X86::EFLAGS))
21572 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21573 // out. SelectMI should have a kill flag on EFLAGS.
21574 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21578 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
21579 // together with other CMOV pseudo-opcodes into a single basic-block with
21580 // conditional jump around it.
21581 static bool isCMOVPseudo(MachineInstr *MI) {
21582 switch (MI->getOpcode()) {
21583 case X86::CMOV_FR32:
21584 case X86::CMOV_FR64:
21585 case X86::CMOV_GR8:
21586 case X86::CMOV_GR16:
21587 case X86::CMOV_GR32:
21588 case X86::CMOV_RFP32:
21589 case X86::CMOV_RFP64:
21590 case X86::CMOV_RFP80:
21591 case X86::CMOV_V2F64:
21592 case X86::CMOV_V2I64:
21593 case X86::CMOV_V4F32:
21594 case X86::CMOV_V4F64:
21595 case X86::CMOV_V4I64:
21596 case X86::CMOV_V16F32:
21597 case X86::CMOV_V8F32:
21598 case X86::CMOV_V8F64:
21599 case X86::CMOV_V8I64:
21600 case X86::CMOV_V8I1:
21601 case X86::CMOV_V16I1:
21602 case X86::CMOV_V32I1:
21603 case X86::CMOV_V64I1:
21611 MachineBasicBlock *
21612 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21613 MachineBasicBlock *BB) const {
21614 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21615 DebugLoc DL = MI->getDebugLoc();
21617 // To "insert" a SELECT_CC instruction, we actually have to insert the
21618 // diamond control-flow pattern. The incoming instruction knows the
21619 // destination vreg to set, the condition code register to branch on, the
21620 // true/false values to select between, and a branch opcode to use.
21621 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21622 MachineFunction::iterator It = ++BB->getIterator();
21627 // cmpTY ccX, r1, r2
21629 // fallthrough --> copy0MBB
21630 MachineBasicBlock *thisMBB = BB;
21631 MachineFunction *F = BB->getParent();
21633 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
21634 // as described above, by inserting a BB, and then making a PHI at the join
21635 // point to select the true and false operands of the CMOV in the PHI.
21637 // The code also handles two different cases of multiple CMOV opcodes
21641 // In this case, there are multiple CMOVs in a row, all which are based on
21642 // the same condition setting (or the exact opposite condition setting).
21643 // In this case we can lower all the CMOVs using a single inserted BB, and
21644 // then make a number of PHIs at the join point to model the CMOVs. The only
21645 // trickiness here, is that in a case like:
21647 // t2 = CMOV cond1 t1, f1
21648 // t3 = CMOV cond1 t2, f2
21650 // when rewriting this into PHIs, we have to perform some renaming on the
21651 // temps since you cannot have a PHI operand refer to a PHI result earlier
21652 // in the same block. The "simple" but wrong lowering would be:
21654 // t2 = PHI t1(BB1), f1(BB2)
21655 // t3 = PHI t2(BB1), f2(BB2)
21657 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
21658 // renaming is to note that on the path through BB1, t2 is really just a
21659 // copy of t1, and do that renaming, properly generating:
21661 // t2 = PHI t1(BB1), f1(BB2)
21662 // t3 = PHI t1(BB1), f2(BB2)
21664 // Case 2, we lower cascaded CMOVs such as
21666 // (CMOV (CMOV F, T, cc1), T, cc2)
21668 // to two successives branches. For that, we look for another CMOV as the
21669 // following instruction.
21671 // Without this, we would add a PHI between the two jumps, which ends up
21672 // creating a few copies all around. For instance, for
21674 // (sitofp (zext (fcmp une)))
21676 // we would generate:
21678 // ucomiss %xmm1, %xmm0
21679 // movss <1.0f>, %xmm0
21680 // movaps %xmm0, %xmm1
21682 // xorps %xmm1, %xmm1
21685 // movaps %xmm1, %xmm0
21689 // because this custom-inserter would have generated:
21701 // A: X = ...; Y = ...
21703 // C: Z = PHI [X, A], [Y, B]
21705 // E: PHI [X, C], [Z, D]
21707 // If we lower both CMOVs in a single step, we can instead generate:
21719 // A: X = ...; Y = ...
21721 // E: PHI [X, A], [X, C], [Y, D]
21723 // Which, in our sitofp/fcmp example, gives us something like:
21725 // ucomiss %xmm1, %xmm0
21726 // movss <1.0f>, %xmm0
21729 // xorps %xmm0, %xmm0
21733 MachineInstr *CascadedCMOV = nullptr;
21734 MachineInstr *LastCMOV = MI;
21735 X86::CondCode CC = X86::CondCode(MI->getOperand(3).getImm());
21736 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
21737 MachineBasicBlock::iterator NextMIIt =
21738 std::next(MachineBasicBlock::iterator(MI));
21740 // Check for case 1, where there are multiple CMOVs with the same condition
21741 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
21742 // number of jumps the most.
21744 if (isCMOVPseudo(MI)) {
21745 // See if we have a string of CMOVS with the same condition.
21746 while (NextMIIt != BB->end() &&
21747 isCMOVPseudo(NextMIIt) &&
21748 (NextMIIt->getOperand(3).getImm() == CC ||
21749 NextMIIt->getOperand(3).getImm() == OppCC)) {
21750 LastCMOV = &*NextMIIt;
21755 // This checks for case 2, but only do this if we didn't already find
21756 // case 1, as indicated by LastCMOV == MI.
21757 if (LastCMOV == MI &&
21758 NextMIIt != BB->end() && NextMIIt->getOpcode() == MI->getOpcode() &&
21759 NextMIIt->getOperand(2).getReg() == MI->getOperand(2).getReg() &&
21760 NextMIIt->getOperand(1).getReg() == MI->getOperand(0).getReg()) {
21761 CascadedCMOV = &*NextMIIt;
21764 MachineBasicBlock *jcc1MBB = nullptr;
21766 // If we have a cascaded CMOV, we lower it to two successive branches to
21767 // the same block. EFLAGS is used by both, so mark it as live in the second.
21768 if (CascadedCMOV) {
21769 jcc1MBB = F->CreateMachineBasicBlock(LLVM_BB);
21770 F->insert(It, jcc1MBB);
21771 jcc1MBB->addLiveIn(X86::EFLAGS);
21774 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21775 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21776 F->insert(It, copy0MBB);
21777 F->insert(It, sinkMBB);
21779 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21780 // live into the sink and copy blocks.
21781 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21783 MachineInstr *LastEFLAGSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
21784 if (!LastEFLAGSUser->killsRegister(X86::EFLAGS) &&
21785 !checkAndUpdateEFLAGSKill(LastEFLAGSUser, BB, TRI)) {
21786 copy0MBB->addLiveIn(X86::EFLAGS);
21787 sinkMBB->addLiveIn(X86::EFLAGS);
21790 // Transfer the remainder of BB and its successor edges to sinkMBB.
21791 sinkMBB->splice(sinkMBB->begin(), BB,
21792 std::next(MachineBasicBlock::iterator(LastCMOV)), BB->end());
21793 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21795 // Add the true and fallthrough blocks as its successors.
21796 if (CascadedCMOV) {
21797 // The fallthrough block may be jcc1MBB, if we have a cascaded CMOV.
21798 BB->addSuccessor(jcc1MBB);
21800 // In that case, jcc1MBB will itself fallthrough the copy0MBB, and
21801 // jump to the sinkMBB.
21802 jcc1MBB->addSuccessor(copy0MBB);
21803 jcc1MBB->addSuccessor(sinkMBB);
21805 BB->addSuccessor(copy0MBB);
21808 // The true block target of the first (or only) branch is always sinkMBB.
21809 BB->addSuccessor(sinkMBB);
21811 // Create the conditional branch instruction.
21812 unsigned Opc = X86::GetCondBranchFromCond(CC);
21813 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21815 if (CascadedCMOV) {
21816 unsigned Opc2 = X86::GetCondBranchFromCond(
21817 (X86::CondCode)CascadedCMOV->getOperand(3).getImm());
21818 BuildMI(jcc1MBB, DL, TII->get(Opc2)).addMBB(sinkMBB);
21822 // %FalseValue = ...
21823 // # fallthrough to sinkMBB
21824 copy0MBB->addSuccessor(sinkMBB);
21827 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21829 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
21830 MachineBasicBlock::iterator MIItEnd =
21831 std::next(MachineBasicBlock::iterator(LastCMOV));
21832 MachineBasicBlock::iterator SinkInsertionPoint = sinkMBB->begin();
21833 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
21834 MachineInstrBuilder MIB;
21836 // As we are creating the PHIs, we have to be careful if there is more than
21837 // one. Later CMOVs may reference the results of earlier CMOVs, but later
21838 // PHIs have to reference the individual true/false inputs from earlier PHIs.
21839 // That also means that PHI construction must work forward from earlier to
21840 // later, and that the code must maintain a mapping from earlier PHI's
21841 // destination registers, and the registers that went into the PHI.
21843 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
21844 unsigned DestReg = MIIt->getOperand(0).getReg();
21845 unsigned Op1Reg = MIIt->getOperand(1).getReg();
21846 unsigned Op2Reg = MIIt->getOperand(2).getReg();
21848 // If this CMOV we are generating is the opposite condition from
21849 // the jump we generated, then we have to swap the operands for the
21850 // PHI that is going to be generated.
21851 if (MIIt->getOperand(3).getImm() == OppCC)
21852 std::swap(Op1Reg, Op2Reg);
21854 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
21855 Op1Reg = RegRewriteTable[Op1Reg].first;
21857 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
21858 Op2Reg = RegRewriteTable[Op2Reg].second;
21860 MIB = BuildMI(*sinkMBB, SinkInsertionPoint, DL,
21861 TII->get(X86::PHI), DestReg)
21862 .addReg(Op1Reg).addMBB(copy0MBB)
21863 .addReg(Op2Reg).addMBB(thisMBB);
21865 // Add this PHI to the rewrite table.
21866 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
21869 // If we have a cascaded CMOV, the second Jcc provides the same incoming
21870 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
21871 if (CascadedCMOV) {
21872 MIB.addReg(MI->getOperand(2).getReg()).addMBB(jcc1MBB);
21873 // Copy the PHI result to the register defined by the second CMOV.
21874 BuildMI(*sinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
21875 DL, TII->get(TargetOpcode::COPY),
21876 CascadedCMOV->getOperand(0).getReg())
21877 .addReg(MI->getOperand(0).getReg());
21878 CascadedCMOV->eraseFromParent();
21881 // Now remove the CMOV(s).
21882 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; )
21883 (MIIt++)->eraseFromParent();
21888 MachineBasicBlock *
21889 X86TargetLowering::EmitLoweredAtomicFP(MachineInstr *MI,
21890 MachineBasicBlock *BB) const {
21891 // Combine the following atomic floating-point modification pattern:
21892 // a.store(reg OP a.load(acquire), release)
21893 // Transform them into:
21894 // OPss (%gpr), %xmm
21895 // movss %xmm, (%gpr)
21896 // Or sd equivalent for 64-bit operations.
21898 switch (MI->getOpcode()) {
21899 default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP");
21900 case X86::RELEASE_FADD32mr: MOp = X86::MOVSSmr; FOp = X86::ADDSSrm; break;
21901 case X86::RELEASE_FADD64mr: MOp = X86::MOVSDmr; FOp = X86::ADDSDrm; break;
21903 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21904 DebugLoc DL = MI->getDebugLoc();
21905 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
21906 MachineOperand MSrc = MI->getOperand(0);
21907 unsigned VSrc = MI->getOperand(5).getReg();
21908 const MachineOperand &Disp = MI->getOperand(3);
21909 MachineOperand ZeroDisp = MachineOperand::CreateImm(0);
21910 bool hasDisp = Disp.isGlobal() || Disp.isImm();
21911 if (hasDisp && MSrc.isReg())
21912 MSrc.setIsKill(false);
21913 MachineInstrBuilder MIM = BuildMI(*BB, MI, DL, TII->get(MOp))
21914 .addOperand(/*Base=*/MSrc)
21915 .addImm(/*Scale=*/1)
21916 .addReg(/*Index=*/0)
21917 .addDisp(hasDisp ? Disp : ZeroDisp, /*off=*/0)
21919 MachineInstr *MIO = BuildMI(*BB, (MachineInstr *)MIM, DL, TII->get(FOp),
21920 MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
21922 .addOperand(/*Base=*/MSrc)
21923 .addImm(/*Scale=*/1)
21924 .addReg(/*Index=*/0)
21925 .addDisp(hasDisp ? Disp : ZeroDisp, /*off=*/0)
21926 .addReg(/*Segment=*/0);
21927 MIM.addReg(MIO->getOperand(0).getReg(), RegState::Kill);
21928 MI->eraseFromParent(); // The pseudo instruction is gone now.
21932 MachineBasicBlock *
21933 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21934 MachineBasicBlock *BB) const {
21935 MachineFunction *MF = BB->getParent();
21936 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21937 DebugLoc DL = MI->getDebugLoc();
21938 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21940 assert(MF->shouldSplitStack());
21942 const bool Is64Bit = Subtarget->is64Bit();
21943 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21945 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21946 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21949 // ... [Till the alloca]
21950 // If stacklet is not large enough, jump to mallocMBB
21953 // Allocate by subtracting from RSP
21954 // Jump to continueMBB
21957 // Allocate by call to runtime
21961 // [rest of original BB]
21964 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21965 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21966 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21968 MachineRegisterInfo &MRI = MF->getRegInfo();
21969 const TargetRegisterClass *AddrRegClass =
21970 getRegClassFor(getPointerTy(MF->getDataLayout()));
21972 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21973 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21974 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21975 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21976 sizeVReg = MI->getOperand(1).getReg(),
21977 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21979 MachineFunction::iterator MBBIter = ++BB->getIterator();
21981 MF->insert(MBBIter, bumpMBB);
21982 MF->insert(MBBIter, mallocMBB);
21983 MF->insert(MBBIter, continueMBB);
21985 continueMBB->splice(continueMBB->begin(), BB,
21986 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21987 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21989 // Add code to the main basic block to check if the stack limit has been hit,
21990 // and if so, jump to mallocMBB otherwise to bumpMBB.
21991 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21992 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21993 .addReg(tmpSPVReg).addReg(sizeVReg);
21994 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21995 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21996 .addReg(SPLimitVReg);
21997 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21999 // bumpMBB simply decreases the stack pointer, since we know the current
22000 // stacklet has enough space.
22001 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
22002 .addReg(SPLimitVReg);
22003 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
22004 .addReg(SPLimitVReg);
22005 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
22007 // Calls into a routine in libgcc to allocate more space from the heap.
22008 const uint32_t *RegMask =
22009 Subtarget->getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
22011 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
22013 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
22014 .addExternalSymbol("__morestack_allocate_stack_space")
22015 .addRegMask(RegMask)
22016 .addReg(X86::RDI, RegState::Implicit)
22017 .addReg(X86::RAX, RegState::ImplicitDefine);
22018 } else if (Is64Bit) {
22019 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
22021 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
22022 .addExternalSymbol("__morestack_allocate_stack_space")
22023 .addRegMask(RegMask)
22024 .addReg(X86::EDI, RegState::Implicit)
22025 .addReg(X86::EAX, RegState::ImplicitDefine);
22027 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
22029 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
22030 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
22031 .addExternalSymbol("__morestack_allocate_stack_space")
22032 .addRegMask(RegMask)
22033 .addReg(X86::EAX, RegState::ImplicitDefine);
22037 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
22040 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
22041 .addReg(IsLP64 ? X86::RAX : X86::EAX);
22042 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
22044 // Set up the CFG correctly.
22045 BB->addSuccessor(bumpMBB);
22046 BB->addSuccessor(mallocMBB);
22047 mallocMBB->addSuccessor(continueMBB);
22048 bumpMBB->addSuccessor(continueMBB);
22050 // Take care of the PHI nodes.
22051 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
22052 MI->getOperand(0).getReg())
22053 .addReg(mallocPtrVReg).addMBB(mallocMBB)
22054 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
22056 // Delete the original pseudo instruction.
22057 MI->eraseFromParent();
22060 return continueMBB;
22063 MachineBasicBlock *
22064 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
22065 MachineBasicBlock *BB) const {
22066 assert(!Subtarget->isTargetMachO());
22067 DebugLoc DL = MI->getDebugLoc();
22068 MachineInstr *ResumeMI = Subtarget->getFrameLowering()->emitStackProbe(
22069 *BB->getParent(), *BB, MI, DL, false);
22070 MachineBasicBlock *ResumeBB = ResumeMI->getParent();
22071 MI->eraseFromParent(); // The pseudo instruction is gone now.
22075 MachineBasicBlock *
22076 X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
22077 MachineBasicBlock *BB) const {
22078 MachineFunction *MF = BB->getParent();
22079 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
22080 MachineBasicBlock *TargetMBB = MI->getOperand(0).getMBB();
22081 DebugLoc DL = MI->getDebugLoc();
22083 assert(!isAsynchronousEHPersonality(
22084 classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
22085 "SEH does not use catchret!");
22087 // Only 32-bit EH needs to worry about manually restoring stack pointers.
22088 if (!Subtarget->is32Bit())
22091 // C++ EH creates a new target block to hold the restore code, and wires up
22092 // the new block to the return destination with a normal JMP_4.
22093 MachineBasicBlock *RestoreMBB =
22094 MF->CreateMachineBasicBlock(BB->getBasicBlock());
22095 assert(BB->succ_size() == 1);
22096 MF->insert(std::next(BB->getIterator()), RestoreMBB);
22097 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
22098 BB->addSuccessor(RestoreMBB);
22099 MI->getOperand(0).setMBB(RestoreMBB);
22101 auto RestoreMBBI = RestoreMBB->begin();
22102 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
22103 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
22107 MachineBasicBlock *
22108 X86TargetLowering::EmitLoweredCatchPad(MachineInstr *MI,
22109 MachineBasicBlock *BB) const {
22110 MachineFunction *MF = BB->getParent();
22111 const Constant *PerFn = MF->getFunction()->getPersonalityFn();
22112 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
22113 // Only 32-bit SEH requires special handling for catchpad.
22114 if (IsSEH && Subtarget->is32Bit()) {
22115 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
22116 DebugLoc DL = MI->getDebugLoc();
22117 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
22119 MI->eraseFromParent();
22123 MachineBasicBlock *
22124 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
22125 MachineBasicBlock *BB) const {
22126 // This is pretty easy. We're taking the value that we received from
22127 // our load from the relocation, sticking it in either RDI (x86-64)
22128 // or EAX and doing an indirect call. The return value will then
22129 // be in the normal return register.
22130 MachineFunction *F = BB->getParent();
22131 const X86InstrInfo *TII = Subtarget->getInstrInfo();
22132 DebugLoc DL = MI->getDebugLoc();
22134 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
22135 assert(MI->getOperand(3).isGlobal() && "This should be a global");
22137 // Get a register mask for the lowered call.
22138 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
22139 // proper register mask.
22140 const uint32_t *RegMask =
22141 Subtarget->is64Bit() ?
22142 Subtarget->getRegisterInfo()->getDarwinTLSCallPreservedMask() :
22143 Subtarget->getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
22144 if (Subtarget->is64Bit()) {
22145 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
22146 TII->get(X86::MOV64rm), X86::RDI)
22148 .addImm(0).addReg(0)
22149 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
22150 MI->getOperand(3).getTargetFlags())
22152 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
22153 addDirectMem(MIB, X86::RDI);
22154 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
22155 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
22156 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
22157 TII->get(X86::MOV32rm), X86::EAX)
22159 .addImm(0).addReg(0)
22160 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
22161 MI->getOperand(3).getTargetFlags())
22163 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
22164 addDirectMem(MIB, X86::EAX);
22165 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
22167 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
22168 TII->get(X86::MOV32rm), X86::EAX)
22169 .addReg(TII->getGlobalBaseReg(F))
22170 .addImm(0).addReg(0)
22171 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
22172 MI->getOperand(3).getTargetFlags())
22174 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
22175 addDirectMem(MIB, X86::EAX);
22176 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
22179 MI->eraseFromParent(); // The pseudo instruction is gone now.
22183 MachineBasicBlock *
22184 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
22185 MachineBasicBlock *MBB) const {
22186 DebugLoc DL = MI->getDebugLoc();
22187 MachineFunction *MF = MBB->getParent();
22188 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
22189 MachineRegisterInfo &MRI = MF->getRegInfo();
22191 const BasicBlock *BB = MBB->getBasicBlock();
22192 MachineFunction::iterator I = ++MBB->getIterator();
22194 // Memory Reference
22195 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
22196 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
22199 unsigned MemOpndSlot = 0;
22201 unsigned CurOp = 0;
22203 DstReg = MI->getOperand(CurOp++).getReg();
22204 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
22205 assert(RC->hasType(MVT::i32) && "Invalid destination!");
22206 unsigned mainDstReg = MRI.createVirtualRegister(RC);
22207 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
22209 MemOpndSlot = CurOp;
22211 MVT PVT = getPointerTy(MF->getDataLayout());
22212 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
22213 "Invalid Pointer Size!");
22215 // For v = setjmp(buf), we generate
22218 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
22219 // SjLjSetup restoreMBB
22225 // v = phi(main, restore)
22228 // if base pointer being used, load it from frame
22231 MachineBasicBlock *thisMBB = MBB;
22232 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
22233 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
22234 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
22235 MF->insert(I, mainMBB);
22236 MF->insert(I, sinkMBB);
22237 MF->push_back(restoreMBB);
22238 restoreMBB->setHasAddressTaken();
22240 MachineInstrBuilder MIB;
22242 // Transfer the remainder of BB and its successor edges to sinkMBB.
22243 sinkMBB->splice(sinkMBB->begin(), MBB,
22244 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
22245 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
22248 unsigned PtrStoreOpc = 0;
22249 unsigned LabelReg = 0;
22250 const int64_t LabelOffset = 1 * PVT.getStoreSize();
22251 Reloc::Model RM = MF->getTarget().getRelocationModel();
22252 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
22253 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
22255 // Prepare IP either in reg or imm.
22256 if (!UseImmLabel) {
22257 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
22258 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
22259 LabelReg = MRI.createVirtualRegister(PtrRC);
22260 if (Subtarget->is64Bit()) {
22261 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
22265 .addMBB(restoreMBB)
22268 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
22269 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
22270 .addReg(XII->getGlobalBaseReg(MF))
22273 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
22277 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
22279 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
22280 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
22281 if (i == X86::AddrDisp)
22282 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
22284 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
22287 MIB.addReg(LabelReg);
22289 MIB.addMBB(restoreMBB);
22290 MIB.setMemRefs(MMOBegin, MMOEnd);
22292 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
22293 .addMBB(restoreMBB);
22295 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
22296 MIB.addRegMask(RegInfo->getNoPreservedMask());
22297 thisMBB->addSuccessor(mainMBB);
22298 thisMBB->addSuccessor(restoreMBB);
22302 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
22303 mainMBB->addSuccessor(sinkMBB);
22306 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
22307 TII->get(X86::PHI), DstReg)
22308 .addReg(mainDstReg).addMBB(mainMBB)
22309 .addReg(restoreDstReg).addMBB(restoreMBB);
22312 if (RegInfo->hasBasePointer(*MF)) {
22313 const bool Uses64BitFramePtr =
22314 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
22315 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
22316 X86FI->setRestoreBasePointer(MF);
22317 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
22318 unsigned BasePtr = RegInfo->getBaseRegister();
22319 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
22320 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
22321 FramePtr, true, X86FI->getRestoreBasePointerOffset())
22322 .setMIFlag(MachineInstr::FrameSetup);
22324 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
22325 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
22326 restoreMBB->addSuccessor(sinkMBB);
22328 MI->eraseFromParent();
22332 MachineBasicBlock *
22333 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
22334 MachineBasicBlock *MBB) const {
22335 DebugLoc DL = MI->getDebugLoc();
22336 MachineFunction *MF = MBB->getParent();
22337 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
22338 MachineRegisterInfo &MRI = MF->getRegInfo();
22340 // Memory Reference
22341 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
22342 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
22344 MVT PVT = getPointerTy(MF->getDataLayout());
22345 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
22346 "Invalid Pointer Size!");
22348 const TargetRegisterClass *RC =
22349 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
22350 unsigned Tmp = MRI.createVirtualRegister(RC);
22351 // Since FP is only updated here but NOT referenced, it's treated as GPR.
22352 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
22353 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
22354 unsigned SP = RegInfo->getStackRegister();
22356 MachineInstrBuilder MIB;
22358 const int64_t LabelOffset = 1 * PVT.getStoreSize();
22359 const int64_t SPOffset = 2 * PVT.getStoreSize();
22361 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
22362 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
22365 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
22366 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
22367 MIB.addOperand(MI->getOperand(i));
22368 MIB.setMemRefs(MMOBegin, MMOEnd);
22370 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
22371 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
22372 if (i == X86::AddrDisp)
22373 MIB.addDisp(MI->getOperand(i), LabelOffset);
22375 MIB.addOperand(MI->getOperand(i));
22377 MIB.setMemRefs(MMOBegin, MMOEnd);
22379 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
22380 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
22381 if (i == X86::AddrDisp)
22382 MIB.addDisp(MI->getOperand(i), SPOffset);
22384 MIB.addOperand(MI->getOperand(i));
22386 MIB.setMemRefs(MMOBegin, MMOEnd);
22388 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
22390 MI->eraseFromParent();
22394 // Replace 213-type (isel default) FMA3 instructions with 231-type for
22395 // accumulator loops. Writing back to the accumulator allows the coalescer
22396 // to remove extra copies in the loop.
22397 // FIXME: Do this on AVX512. We don't support 231 variants yet (PR23937).
22398 MachineBasicBlock *
22399 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
22400 MachineBasicBlock *MBB) const {
22401 MachineOperand &AddendOp = MI->getOperand(3);
22403 // Bail out early if the addend isn't a register - we can't switch these.
22404 if (!AddendOp.isReg())
22407 MachineFunction &MF = *MBB->getParent();
22408 MachineRegisterInfo &MRI = MF.getRegInfo();
22410 // Check whether the addend is defined by a PHI:
22411 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
22412 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
22413 if (!AddendDef.isPHI())
22416 // Look for the following pattern:
22418 // %addend = phi [%entry, 0], [%loop, %result]
22420 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
22424 // %addend = phi [%entry, 0], [%loop, %result]
22426 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
22428 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
22429 assert(AddendDef.getOperand(i).isReg());
22430 MachineOperand PHISrcOp = AddendDef.getOperand(i);
22431 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
22432 if (&PHISrcInst == MI) {
22433 // Found a matching instruction.
22434 unsigned NewFMAOpc = 0;
22435 switch (MI->getOpcode()) {
22436 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
22437 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
22438 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
22439 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
22440 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
22441 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
22442 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
22443 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
22444 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
22445 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
22446 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
22447 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
22448 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
22449 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
22450 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
22451 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
22452 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
22453 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
22454 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
22455 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
22457 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
22458 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
22459 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
22460 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
22461 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
22462 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
22463 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
22464 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
22465 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
22466 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
22467 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
22468 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
22469 default: llvm_unreachable("Unrecognized FMA variant.");
22472 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
22473 MachineInstrBuilder MIB =
22474 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
22475 .addOperand(MI->getOperand(0))
22476 .addOperand(MI->getOperand(3))
22477 .addOperand(MI->getOperand(2))
22478 .addOperand(MI->getOperand(1));
22479 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
22480 MI->eraseFromParent();
22487 MachineBasicBlock *
22488 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
22489 MachineBasicBlock *BB) const {
22490 switch (MI->getOpcode()) {
22491 default: llvm_unreachable("Unexpected instr type to insert");
22492 case X86::TAILJMPd64:
22493 case X86::TAILJMPr64:
22494 case X86::TAILJMPm64:
22495 case X86::TAILJMPd64_REX:
22496 case X86::TAILJMPr64_REX:
22497 case X86::TAILJMPm64_REX:
22498 llvm_unreachable("TAILJMP64 would not be touched here.");
22499 case X86::TCRETURNdi64:
22500 case X86::TCRETURNri64:
22501 case X86::TCRETURNmi64:
22503 case X86::WIN_ALLOCA:
22504 return EmitLoweredWinAlloca(MI, BB);
22505 case X86::CATCHRET:
22506 return EmitLoweredCatchRet(MI, BB);
22507 case X86::CATCHPAD:
22508 return EmitLoweredCatchPad(MI, BB);
22509 case X86::SEG_ALLOCA_32:
22510 case X86::SEG_ALLOCA_64:
22511 return EmitLoweredSegAlloca(MI, BB);
22512 case X86::TLSCall_32:
22513 case X86::TLSCall_64:
22514 return EmitLoweredTLSCall(MI, BB);
22515 case X86::CMOV_FR32:
22516 case X86::CMOV_FR64:
22517 case X86::CMOV_FR128:
22518 case X86::CMOV_GR8:
22519 case X86::CMOV_GR16:
22520 case X86::CMOV_GR32:
22521 case X86::CMOV_RFP32:
22522 case X86::CMOV_RFP64:
22523 case X86::CMOV_RFP80:
22524 case X86::CMOV_V2F64:
22525 case X86::CMOV_V2I64:
22526 case X86::CMOV_V4F32:
22527 case X86::CMOV_V4F64:
22528 case X86::CMOV_V4I64:
22529 case X86::CMOV_V16F32:
22530 case X86::CMOV_V8F32:
22531 case X86::CMOV_V8F64:
22532 case X86::CMOV_V8I64:
22533 case X86::CMOV_V8I1:
22534 case X86::CMOV_V16I1:
22535 case X86::CMOV_V32I1:
22536 case X86::CMOV_V64I1:
22537 return EmitLoweredSelect(MI, BB);
22539 case X86::RELEASE_FADD32mr:
22540 case X86::RELEASE_FADD64mr:
22541 return EmitLoweredAtomicFP(MI, BB);
22543 case X86::FP32_TO_INT16_IN_MEM:
22544 case X86::FP32_TO_INT32_IN_MEM:
22545 case X86::FP32_TO_INT64_IN_MEM:
22546 case X86::FP64_TO_INT16_IN_MEM:
22547 case X86::FP64_TO_INT32_IN_MEM:
22548 case X86::FP64_TO_INT64_IN_MEM:
22549 case X86::FP80_TO_INT16_IN_MEM:
22550 case X86::FP80_TO_INT32_IN_MEM:
22551 case X86::FP80_TO_INT64_IN_MEM: {
22552 MachineFunction *F = BB->getParent();
22553 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
22554 DebugLoc DL = MI->getDebugLoc();
22556 // Change the floating point control register to use "round towards zero"
22557 // mode when truncating to an integer value.
22558 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
22559 addFrameReference(BuildMI(*BB, MI, DL,
22560 TII->get(X86::FNSTCW16m)), CWFrameIdx);
22562 // Load the old value of the high byte of the control word...
22564 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
22565 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
22568 // Set the high part to be round to zero...
22569 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
22572 // Reload the modified control word now...
22573 addFrameReference(BuildMI(*BB, MI, DL,
22574 TII->get(X86::FLDCW16m)), CWFrameIdx);
22576 // Restore the memory image of control word to original value
22577 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
22580 // Get the X86 opcode to use.
22582 switch (MI->getOpcode()) {
22583 default: llvm_unreachable("illegal opcode!");
22584 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
22585 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
22586 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
22587 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
22588 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
22589 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
22590 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
22591 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
22592 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
22596 MachineOperand &Op = MI->getOperand(0);
22598 AM.BaseType = X86AddressMode::RegBase;
22599 AM.Base.Reg = Op.getReg();
22601 AM.BaseType = X86AddressMode::FrameIndexBase;
22602 AM.Base.FrameIndex = Op.getIndex();
22604 Op = MI->getOperand(1);
22606 AM.Scale = Op.getImm();
22607 Op = MI->getOperand(2);
22609 AM.IndexReg = Op.getImm();
22610 Op = MI->getOperand(3);
22611 if (Op.isGlobal()) {
22612 AM.GV = Op.getGlobal();
22614 AM.Disp = Op.getImm();
22616 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
22617 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
22619 // Reload the original control word now.
22620 addFrameReference(BuildMI(*BB, MI, DL,
22621 TII->get(X86::FLDCW16m)), CWFrameIdx);
22623 MI->eraseFromParent(); // The pseudo instruction is gone now.
22626 // String/text processing lowering.
22627 case X86::PCMPISTRM128REG:
22628 case X86::VPCMPISTRM128REG:
22629 case X86::PCMPISTRM128MEM:
22630 case X86::VPCMPISTRM128MEM:
22631 case X86::PCMPESTRM128REG:
22632 case X86::VPCMPESTRM128REG:
22633 case X86::PCMPESTRM128MEM:
22634 case X86::VPCMPESTRM128MEM:
22635 assert(Subtarget->hasSSE42() &&
22636 "Target must have SSE4.2 or AVX features enabled");
22637 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
22639 // String/text processing lowering.
22640 case X86::PCMPISTRIREG:
22641 case X86::VPCMPISTRIREG:
22642 case X86::PCMPISTRIMEM:
22643 case X86::VPCMPISTRIMEM:
22644 case X86::PCMPESTRIREG:
22645 case X86::VPCMPESTRIREG:
22646 case X86::PCMPESTRIMEM:
22647 case X86::VPCMPESTRIMEM:
22648 assert(Subtarget->hasSSE42() &&
22649 "Target must have SSE4.2 or AVX features enabled");
22650 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
22652 // Thread synchronization.
22654 return EmitMonitor(MI, BB, Subtarget);
22657 return EmitWRPKRU(MI, BB, Subtarget);
22659 return EmitRDPKRU(MI, BB, Subtarget);
22662 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
22664 case X86::VASTART_SAVE_XMM_REGS:
22665 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
22667 case X86::VAARG_64:
22668 return EmitVAARG64WithCustomInserter(MI, BB);
22670 case X86::EH_SjLj_SetJmp32:
22671 case X86::EH_SjLj_SetJmp64:
22672 return emitEHSjLjSetJmp(MI, BB);
22674 case X86::EH_SjLj_LongJmp32:
22675 case X86::EH_SjLj_LongJmp64:
22676 return emitEHSjLjLongJmp(MI, BB);
22678 case TargetOpcode::STATEPOINT:
22679 // As an implementation detail, STATEPOINT shares the STACKMAP format at
22680 // this point in the process. We diverge later.
22681 return emitPatchPoint(MI, BB);
22683 case TargetOpcode::STACKMAP:
22684 case TargetOpcode::PATCHPOINT:
22685 return emitPatchPoint(MI, BB);
22687 case X86::VFMADDPDr213r:
22688 case X86::VFMADDPSr213r:
22689 case X86::VFMADDSDr213r:
22690 case X86::VFMADDSSr213r:
22691 case X86::VFMSUBPDr213r:
22692 case X86::VFMSUBPSr213r:
22693 case X86::VFMSUBSDr213r:
22694 case X86::VFMSUBSSr213r:
22695 case X86::VFNMADDPDr213r:
22696 case X86::VFNMADDPSr213r:
22697 case X86::VFNMADDSDr213r:
22698 case X86::VFNMADDSSr213r:
22699 case X86::VFNMSUBPDr213r:
22700 case X86::VFNMSUBPSr213r:
22701 case X86::VFNMSUBSDr213r:
22702 case X86::VFNMSUBSSr213r:
22703 case X86::VFMADDSUBPDr213r:
22704 case X86::VFMADDSUBPSr213r:
22705 case X86::VFMSUBADDPDr213r:
22706 case X86::VFMSUBADDPSr213r:
22707 case X86::VFMADDPDr213rY:
22708 case X86::VFMADDPSr213rY:
22709 case X86::VFMSUBPDr213rY:
22710 case X86::VFMSUBPSr213rY:
22711 case X86::VFNMADDPDr213rY:
22712 case X86::VFNMADDPSr213rY:
22713 case X86::VFNMSUBPDr213rY:
22714 case X86::VFNMSUBPSr213rY:
22715 case X86::VFMADDSUBPDr213rY:
22716 case X86::VFMADDSUBPSr213rY:
22717 case X86::VFMSUBADDPDr213rY:
22718 case X86::VFMSUBADDPSr213rY:
22719 return emitFMA3Instr(MI, BB);
22723 //===----------------------------------------------------------------------===//
22724 // X86 Optimization Hooks
22725 //===----------------------------------------------------------------------===//
22727 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22730 const SelectionDAG &DAG,
22731 unsigned Depth) const {
22732 unsigned BitWidth = KnownZero.getBitWidth();
22733 unsigned Opc = Op.getOpcode();
22734 assert((Opc >= ISD::BUILTIN_OP_END ||
22735 Opc == ISD::INTRINSIC_WO_CHAIN ||
22736 Opc == ISD::INTRINSIC_W_CHAIN ||
22737 Opc == ISD::INTRINSIC_VOID) &&
22738 "Should use MaskedValueIsZero if you don't know whether Op"
22739 " is a target node!");
22741 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22755 // These nodes' second result is a boolean.
22756 if (Op.getResNo() == 0)
22759 case X86ISD::SETCC:
22760 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22762 case ISD::INTRINSIC_WO_CHAIN: {
22763 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22764 unsigned NumLoBits = 0;
22767 case Intrinsic::x86_sse_movmsk_ps:
22768 case Intrinsic::x86_avx_movmsk_ps_256:
22769 case Intrinsic::x86_sse2_movmsk_pd:
22770 case Intrinsic::x86_avx_movmsk_pd_256:
22771 case Intrinsic::x86_mmx_pmovmskb:
22772 case Intrinsic::x86_sse2_pmovmskb_128:
22773 case Intrinsic::x86_avx2_pmovmskb: {
22774 // High bits of movmskp{s|d}, pmovmskb are known zero.
22776 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22777 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22778 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22779 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22780 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22781 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22782 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22783 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22785 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22794 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22796 const SelectionDAG &,
22797 unsigned Depth) const {
22798 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22799 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22800 return Op.getValueType().getScalarSizeInBits();
22806 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22807 /// node is a GlobalAddress + offset.
22808 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22809 const GlobalValue* &GA,
22810 int64_t &Offset) const {
22811 if (N->getOpcode() == X86ISD::Wrapper) {
22812 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22813 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22814 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22818 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22821 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22822 /// FIXME: This could be expanded to support 512 bit vectors as well.
22823 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22824 TargetLowering::DAGCombinerInfo &DCI,
22825 const X86Subtarget* Subtarget) {
22827 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22828 SDValue V1 = SVOp->getOperand(0);
22829 SDValue V2 = SVOp->getOperand(1);
22830 MVT VT = SVOp->getSimpleValueType(0);
22831 unsigned NumElems = VT.getVectorNumElements();
22833 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22834 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22838 // V UNDEF BUILD_VECTOR UNDEF
22840 // CONCAT_VECTOR CONCAT_VECTOR
22843 // RESULT: V + zero extended
22845 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22846 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22847 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22850 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22853 // To match the shuffle mask, the first half of the mask should
22854 // be exactly the first vector, and all the rest a splat with the
22855 // first element of the second one.
22856 for (unsigned i = 0; i != NumElems/2; ++i)
22857 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22858 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22861 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22862 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22863 if (Ld->hasNUsesOfValue(1, 0)) {
22864 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22865 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22867 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22869 Ld->getPointerInfo(),
22870 Ld->getAlignment(),
22871 false/*isVolatile*/, true/*ReadMem*/,
22872 false/*WriteMem*/);
22874 // Make sure the newly-created LOAD is in the same position as Ld in
22875 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22876 // and update uses of Ld's output chain to use the TokenFactor.
22877 if (Ld->hasAnyUseOfValue(1)) {
22878 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22879 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22880 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22881 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22882 SDValue(ResNode.getNode(), 1));
22885 return DAG.getBitcast(VT, ResNode);
22889 // Emit a zeroed vector and insert the desired subvector on its
22891 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22892 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22893 return DCI.CombineTo(N, InsV);
22899 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22902 /// This is the leaf of the recursive combinine below. When we have found some
22903 /// chain of single-use x86 shuffle instructions and accumulated the combined
22904 /// shuffle mask represented by them, this will try to pattern match that mask
22905 /// into either a single instruction if there is a special purpose instruction
22906 /// for this operation, or into a PSHUFB instruction which is a fully general
22907 /// instruction but should only be used to replace chains over a certain depth.
22908 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22909 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22910 TargetLowering::DAGCombinerInfo &DCI,
22911 const X86Subtarget *Subtarget) {
22912 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22914 // Find the operand that enters the chain. Note that multiple uses are OK
22915 // here, we're not going to remove the operand we find.
22916 SDValue Input = Op.getOperand(0);
22917 while (Input.getOpcode() == ISD::BITCAST)
22918 Input = Input.getOperand(0);
22920 MVT VT = Input.getSimpleValueType();
22921 MVT RootVT = Root.getSimpleValueType();
22924 if (Mask.size() == 1) {
22925 int Index = Mask[0];
22926 assert((Index >= 0 || Index == SM_SentinelUndef ||
22927 Index == SM_SentinelZero) &&
22928 "Invalid shuffle index found!");
22930 // We may end up with an accumulated mask of size 1 as a result of
22931 // widening of shuffle operands (see function canWidenShuffleElements).
22932 // If the only shuffle index is equal to SM_SentinelZero then propagate
22933 // a zero vector. Otherwise, the combine shuffle mask is a no-op shuffle
22934 // mask, and therefore the entire chain of shuffles can be folded away.
22935 if (Index == SM_SentinelZero)
22936 DCI.CombineTo(Root.getNode(), getZeroVector(RootVT, Subtarget, DAG, DL));
22938 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Input),
22943 // Use the float domain if the operand type is a floating point type.
22944 bool FloatDomain = VT.isFloatingPoint();
22946 // For floating point shuffles, we don't have free copies in the shuffle
22947 // instructions or the ability to load as part of the instruction, so
22948 // canonicalize their shuffles to UNPCK or MOV variants.
22950 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22951 // vectors because it can have a load folded into it that UNPCK cannot. This
22952 // doesn't preclude something switching to the shorter encoding post-RA.
22954 // FIXME: Should teach these routines about AVX vector widths.
22955 if (FloatDomain && VT.is128BitVector()) {
22956 if (Mask.equals({0, 0}) || Mask.equals({1, 1})) {
22957 bool Lo = Mask.equals({0, 0});
22960 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22961 // is no slower than UNPCKLPD but has the option to fold the input operand
22962 // into even an unaligned memory load.
22963 if (Lo && Subtarget->hasSSE3()) {
22964 Shuffle = X86ISD::MOVDDUP;
22965 ShuffleVT = MVT::v2f64;
22967 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22968 // than the UNPCK variants.
22969 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22970 ShuffleVT = MVT::v4f32;
22972 if (Depth == 1 && Root->getOpcode() == Shuffle)
22973 return false; // Nothing to do!
22974 Op = DAG.getBitcast(ShuffleVT, Input);
22975 DCI.AddToWorklist(Op.getNode());
22976 if (Shuffle == X86ISD::MOVDDUP)
22977 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22979 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22980 DCI.AddToWorklist(Op.getNode());
22981 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
22985 if (Subtarget->hasSSE3() &&
22986 (Mask.equals({0, 0, 2, 2}) || Mask.equals({1, 1, 3, 3}))) {
22987 bool Lo = Mask.equals({0, 0, 2, 2});
22988 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22989 MVT ShuffleVT = MVT::v4f32;
22990 if (Depth == 1 && Root->getOpcode() == Shuffle)
22991 return false; // Nothing to do!
22992 Op = DAG.getBitcast(ShuffleVT, Input);
22993 DCI.AddToWorklist(Op.getNode());
22994 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22995 DCI.AddToWorklist(Op.getNode());
22996 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
23000 if (Mask.equals({0, 0, 1, 1}) || Mask.equals({2, 2, 3, 3})) {
23001 bool Lo = Mask.equals({0, 0, 1, 1});
23002 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
23003 MVT ShuffleVT = MVT::v4f32;
23004 if (Depth == 1 && Root->getOpcode() == Shuffle)
23005 return false; // Nothing to do!
23006 Op = DAG.getBitcast(ShuffleVT, Input);
23007 DCI.AddToWorklist(Op.getNode());
23008 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
23009 DCI.AddToWorklist(Op.getNode());
23010 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
23016 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
23017 // variants as none of these have single-instruction variants that are
23018 // superior to the UNPCK formulation.
23019 if (!FloatDomain && VT.is128BitVector() &&
23020 (Mask.equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
23021 Mask.equals({4, 4, 5, 5, 6, 6, 7, 7}) ||
23022 Mask.equals({0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}) ||
23024 {8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}))) {
23025 bool Lo = Mask[0] == 0;
23026 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
23027 if (Depth == 1 && Root->getOpcode() == Shuffle)
23028 return false; // Nothing to do!
23030 switch (Mask.size()) {
23032 ShuffleVT = MVT::v8i16;
23035 ShuffleVT = MVT::v16i8;
23038 llvm_unreachable("Impossible mask size!");
23040 Op = DAG.getBitcast(ShuffleVT, Input);
23041 DCI.AddToWorklist(Op.getNode());
23042 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
23043 DCI.AddToWorklist(Op.getNode());
23044 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
23049 // Don't try to re-form single instruction chains under any circumstances now
23050 // that we've done encoding canonicalization for them.
23054 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
23055 // can replace them with a single PSHUFB instruction profitably. Intel's
23056 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
23057 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
23058 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
23059 SmallVector<SDValue, 16> PSHUFBMask;
23060 int NumBytes = VT.getSizeInBits() / 8;
23061 int Ratio = NumBytes / Mask.size();
23062 for (int i = 0; i < NumBytes; ++i) {
23063 if (Mask[i / Ratio] == SM_SentinelUndef) {
23064 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
23067 int M = Mask[i / Ratio] != SM_SentinelZero
23068 ? Ratio * Mask[i / Ratio] + i % Ratio
23070 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
23072 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
23073 Op = DAG.getBitcast(ByteVT, Input);
23074 DCI.AddToWorklist(Op.getNode());
23075 SDValue PSHUFBMaskOp =
23076 DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVT, PSHUFBMask);
23077 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
23078 Op = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Op, PSHUFBMaskOp);
23079 DCI.AddToWorklist(Op.getNode());
23080 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
23085 // Failed to find any combines.
23089 /// \brief Fully generic combining of x86 shuffle instructions.
23091 /// This should be the last combine run over the x86 shuffle instructions. Once
23092 /// they have been fully optimized, this will recursively consider all chains
23093 /// of single-use shuffle instructions, build a generic model of the cumulative
23094 /// shuffle operation, and check for simpler instructions which implement this
23095 /// operation. We use this primarily for two purposes:
23097 /// 1) Collapse generic shuffles to specialized single instructions when
23098 /// equivalent. In most cases, this is just an encoding size win, but
23099 /// sometimes we will collapse multiple generic shuffles into a single
23100 /// special-purpose shuffle.
23101 /// 2) Look for sequences of shuffle instructions with 3 or more total
23102 /// instructions, and replace them with the slightly more expensive SSSE3
23103 /// PSHUFB instruction if available. We do this as the last combining step
23104 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
23105 /// a suitable short sequence of other instructions. The PHUFB will either
23106 /// use a register or have to read from memory and so is slightly (but only
23107 /// slightly) more expensive than the other shuffle instructions.
23109 /// Because this is inherently a quadratic operation (for each shuffle in
23110 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
23111 /// This should never be an issue in practice as the shuffle lowering doesn't
23112 /// produce sequences of more than 8 instructions.
23114 /// FIXME: We will currently miss some cases where the redundant shuffling
23115 /// would simplify under the threshold for PSHUFB formation because of
23116 /// combine-ordering. To fix this, we should do the redundant instruction
23117 /// combining in this recursive walk.
23118 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
23119 ArrayRef<int> RootMask,
23120 int Depth, bool HasPSHUFB,
23122 TargetLowering::DAGCombinerInfo &DCI,
23123 const X86Subtarget *Subtarget) {
23124 // Bound the depth of our recursive combine because this is ultimately
23125 // quadratic in nature.
23129 // Directly rip through bitcasts to find the underlying operand.
23130 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
23131 Op = Op.getOperand(0);
23133 MVT VT = Op.getSimpleValueType();
23134 if (!VT.isVector())
23135 return false; // Bail if we hit a non-vector.
23137 assert(Root.getSimpleValueType().isVector() &&
23138 "Shuffles operate on vector types!");
23139 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
23140 "Can only combine shuffles of the same vector register size.");
23142 if (!isTargetShuffle(Op.getOpcode()))
23144 SmallVector<int, 16> OpMask;
23146 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
23147 // We only can combine unary shuffles which we can decode the mask for.
23148 if (!HaveMask || !IsUnary)
23151 assert(VT.getVectorNumElements() == OpMask.size() &&
23152 "Different mask size from vector size!");
23153 assert(((RootMask.size() > OpMask.size() &&
23154 RootMask.size() % OpMask.size() == 0) ||
23155 (OpMask.size() > RootMask.size() &&
23156 OpMask.size() % RootMask.size() == 0) ||
23157 OpMask.size() == RootMask.size()) &&
23158 "The smaller number of elements must divide the larger.");
23159 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
23160 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
23161 assert(((RootRatio == 1 && OpRatio == 1) ||
23162 (RootRatio == 1) != (OpRatio == 1)) &&
23163 "Must not have a ratio for both incoming and op masks!");
23165 SmallVector<int, 16> Mask;
23166 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
23168 // Merge this shuffle operation's mask into our accumulated mask. Note that
23169 // this shuffle's mask will be the first applied to the input, followed by the
23170 // root mask to get us all the way to the root value arrangement. The reason
23171 // for this order is that we are recursing up the operation chain.
23172 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
23173 int RootIdx = i / RootRatio;
23174 if (RootMask[RootIdx] < 0) {
23175 // This is a zero or undef lane, we're done.
23176 Mask.push_back(RootMask[RootIdx]);
23180 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
23181 int OpIdx = RootMaskedIdx / OpRatio;
23182 if (OpMask[OpIdx] < 0) {
23183 // The incoming lanes are zero or undef, it doesn't matter which ones we
23185 Mask.push_back(OpMask[OpIdx]);
23189 // Ok, we have non-zero lanes, map them through.
23190 Mask.push_back(OpMask[OpIdx] * OpRatio +
23191 RootMaskedIdx % OpRatio);
23194 // See if we can recurse into the operand to combine more things.
23195 switch (Op.getOpcode()) {
23196 case X86ISD::PSHUFB:
23198 case X86ISD::PSHUFD:
23199 case X86ISD::PSHUFHW:
23200 case X86ISD::PSHUFLW:
23201 if (Op.getOperand(0).hasOneUse() &&
23202 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
23203 HasPSHUFB, DAG, DCI, Subtarget))
23207 case X86ISD::UNPCKL:
23208 case X86ISD::UNPCKH:
23209 assert(Op.getOperand(0) == Op.getOperand(1) &&
23210 "We only combine unary shuffles!");
23211 // We can't check for single use, we have to check that this shuffle is the
23213 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
23214 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
23215 HasPSHUFB, DAG, DCI, Subtarget))
23220 // Minor canonicalization of the accumulated shuffle mask to make it easier
23221 // to match below. All this does is detect masks with squential pairs of
23222 // elements, and shrink them to the half-width mask. It does this in a loop
23223 // so it will reduce the size of the mask to the minimal width mask which
23224 // performs an equivalent shuffle.
23225 SmallVector<int, 16> WidenedMask;
23226 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
23227 Mask = std::move(WidenedMask);
23228 WidenedMask.clear();
23231 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
23235 /// \brief Get the PSHUF-style mask from PSHUF node.
23237 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
23238 /// PSHUF-style masks that can be reused with such instructions.
23239 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
23240 MVT VT = N.getSimpleValueType();
23241 SmallVector<int, 4> Mask;
23243 bool HaveMask = getTargetShuffleMask(N.getNode(), VT, Mask, IsUnary);
23247 // If we have more than 128-bits, only the low 128-bits of shuffle mask
23248 // matter. Check that the upper masks are repeats and remove them.
23249 if (VT.getSizeInBits() > 128) {
23250 int LaneElts = 128 / VT.getScalarSizeInBits();
23252 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
23253 for (int j = 0; j < LaneElts; ++j)
23254 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
23255 "Mask doesn't repeat in high 128-bit lanes!");
23257 Mask.resize(LaneElts);
23260 switch (N.getOpcode()) {
23261 case X86ISD::PSHUFD:
23263 case X86ISD::PSHUFLW:
23266 case X86ISD::PSHUFHW:
23267 Mask.erase(Mask.begin(), Mask.begin() + 4);
23268 for (int &M : Mask)
23272 llvm_unreachable("No valid shuffle instruction found!");
23276 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
23278 /// We walk up the chain and look for a combinable shuffle, skipping over
23279 /// shuffles that we could hoist this shuffle's transformation past without
23280 /// altering anything.
23282 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
23284 TargetLowering::DAGCombinerInfo &DCI) {
23285 assert(N.getOpcode() == X86ISD::PSHUFD &&
23286 "Called with something other than an x86 128-bit half shuffle!");
23289 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
23290 // of the shuffles in the chain so that we can form a fresh chain to replace
23292 SmallVector<SDValue, 8> Chain;
23293 SDValue V = N.getOperand(0);
23294 for (; V.hasOneUse(); V = V.getOperand(0)) {
23295 switch (V.getOpcode()) {
23297 return SDValue(); // Nothing combined!
23300 // Skip bitcasts as we always know the type for the target specific
23304 case X86ISD::PSHUFD:
23305 // Found another dword shuffle.
23308 case X86ISD::PSHUFLW:
23309 // Check that the low words (being shuffled) are the identity in the
23310 // dword shuffle, and the high words are self-contained.
23311 if (Mask[0] != 0 || Mask[1] != 1 ||
23312 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
23315 Chain.push_back(V);
23318 case X86ISD::PSHUFHW:
23319 // Check that the high words (being shuffled) are the identity in the
23320 // dword shuffle, and the low words are self-contained.
23321 if (Mask[2] != 2 || Mask[3] != 3 ||
23322 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
23325 Chain.push_back(V);
23328 case X86ISD::UNPCKL:
23329 case X86ISD::UNPCKH:
23330 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
23331 // shuffle into a preceding word shuffle.
23332 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
23333 V.getSimpleValueType().getVectorElementType() != MVT::i16)
23336 // Search for a half-shuffle which we can combine with.
23337 unsigned CombineOp =
23338 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
23339 if (V.getOperand(0) != V.getOperand(1) ||
23340 !V->isOnlyUserOf(V.getOperand(0).getNode()))
23342 Chain.push_back(V);
23343 V = V.getOperand(0);
23345 switch (V.getOpcode()) {
23347 return SDValue(); // Nothing to combine.
23349 case X86ISD::PSHUFLW:
23350 case X86ISD::PSHUFHW:
23351 if (V.getOpcode() == CombineOp)
23354 Chain.push_back(V);
23358 V = V.getOperand(0);
23362 } while (V.hasOneUse());
23365 // Break out of the loop if we break out of the switch.
23369 if (!V.hasOneUse())
23370 // We fell out of the loop without finding a viable combining instruction.
23373 // Merge this node's mask and our incoming mask.
23374 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
23375 for (int &M : Mask)
23377 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
23378 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
23380 // Rebuild the chain around this new shuffle.
23381 while (!Chain.empty()) {
23382 SDValue W = Chain.pop_back_val();
23384 if (V.getValueType() != W.getOperand(0).getValueType())
23385 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
23387 switch (W.getOpcode()) {
23389 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
23391 case X86ISD::UNPCKL:
23392 case X86ISD::UNPCKH:
23393 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
23396 case X86ISD::PSHUFD:
23397 case X86ISD::PSHUFLW:
23398 case X86ISD::PSHUFHW:
23399 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
23403 if (V.getValueType() != N.getValueType())
23404 V = DAG.getBitcast(N.getValueType(), V);
23406 // Return the new chain to replace N.
23410 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or
23413 /// We walk up the chain, skipping shuffles of the other half and looking
23414 /// through shuffles which switch halves trying to find a shuffle of the same
23415 /// pair of dwords.
23416 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
23418 TargetLowering::DAGCombinerInfo &DCI) {
23420 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
23421 "Called with something other than an x86 128-bit half shuffle!");
23423 unsigned CombineOpcode = N.getOpcode();
23425 // Walk up a single-use chain looking for a combinable shuffle.
23426 SDValue V = N.getOperand(0);
23427 for (; V.hasOneUse(); V = V.getOperand(0)) {
23428 switch (V.getOpcode()) {
23430 return false; // Nothing combined!
23433 // Skip bitcasts as we always know the type for the target specific
23437 case X86ISD::PSHUFLW:
23438 case X86ISD::PSHUFHW:
23439 if (V.getOpcode() == CombineOpcode)
23442 // Other-half shuffles are no-ops.
23445 // Break out of the loop if we break out of the switch.
23449 if (!V.hasOneUse())
23450 // We fell out of the loop without finding a viable combining instruction.
23453 // Combine away the bottom node as its shuffle will be accumulated into
23454 // a preceding shuffle.
23455 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
23457 // Record the old value.
23460 // Merge this node's mask and our incoming mask (adjusted to account for all
23461 // the pshufd instructions encountered).
23462 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
23463 for (int &M : Mask)
23465 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
23466 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
23468 // Check that the shuffles didn't cancel each other out. If not, we need to
23469 // combine to the new one.
23471 // Replace the combinable shuffle with the combined one, updating all users
23472 // so that we re-evaluate the chain here.
23473 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
23478 /// \brief Try to combine x86 target specific shuffles.
23479 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
23480 TargetLowering::DAGCombinerInfo &DCI,
23481 const X86Subtarget *Subtarget) {
23483 MVT VT = N.getSimpleValueType();
23484 SmallVector<int, 4> Mask;
23486 switch (N.getOpcode()) {
23487 case X86ISD::PSHUFD:
23488 case X86ISD::PSHUFLW:
23489 case X86ISD::PSHUFHW:
23490 Mask = getPSHUFShuffleMask(N);
23491 assert(Mask.size() == 4);
23493 case X86ISD::UNPCKL: {
23494 // Combine X86ISD::UNPCKL and ISD::VECTOR_SHUFFLE into X86ISD::UNPCKH, in
23495 // which X86ISD::UNPCKL has a ISD::UNDEF operand, and ISD::VECTOR_SHUFFLE
23496 // moves upper half elements into the lower half part. For example:
23498 // t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1,
23500 // t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
23502 // will be combined to:
23504 // t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
23506 // This is only for 128-bit vectors. From SSE4.1 onward this combine may not
23507 // happen due to advanced instructions.
23508 if (!VT.is128BitVector())
23511 auto Op0 = N.getOperand(0);
23512 auto Op1 = N.getOperand(1);
23513 if (Op0.getOpcode() == ISD::UNDEF &&
23514 Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) {
23515 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op1.getNode())->getMask();
23517 unsigned NumElts = VT.getVectorNumElements();
23518 SmallVector<int, 8> ExpectedMask(NumElts, -1);
23519 std::iota(ExpectedMask.begin(), ExpectedMask.begin() + NumElts / 2,
23522 auto ShufOp = Op1.getOperand(0);
23523 if (isShuffleEquivalent(Op1, ShufOp, Mask, ExpectedMask))
23524 return DAG.getNode(X86ISD::UNPCKH, DL, VT, N.getOperand(0), ShufOp);
23532 // Nuke no-op shuffles that show up after combining.
23533 if (isNoopShuffleMask(Mask))
23534 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
23536 // Look for simplifications involving one or two shuffle instructions.
23537 SDValue V = N.getOperand(0);
23538 switch (N.getOpcode()) {
23541 case X86ISD::PSHUFLW:
23542 case X86ISD::PSHUFHW:
23543 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
23545 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
23546 return SDValue(); // We combined away this shuffle, so we're done.
23548 // See if this reduces to a PSHUFD which is no more expensive and can
23549 // combine with more operations. Note that it has to at least flip the
23550 // dwords as otherwise it would have been removed as a no-op.
23551 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
23552 int DMask[] = {0, 1, 2, 3};
23553 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
23554 DMask[DOffset + 0] = DOffset + 1;
23555 DMask[DOffset + 1] = DOffset + 0;
23556 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
23557 V = DAG.getBitcast(DVT, V);
23558 DCI.AddToWorklist(V.getNode());
23559 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
23560 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
23561 DCI.AddToWorklist(V.getNode());
23562 return DAG.getBitcast(VT, V);
23565 // Look for shuffle patterns which can be implemented as a single unpack.
23566 // FIXME: This doesn't handle the location of the PSHUFD generically, and
23567 // only works when we have a PSHUFD followed by two half-shuffles.
23568 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
23569 (V.getOpcode() == X86ISD::PSHUFLW ||
23570 V.getOpcode() == X86ISD::PSHUFHW) &&
23571 V.getOpcode() != N.getOpcode() &&
23573 SDValue D = V.getOperand(0);
23574 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
23575 D = D.getOperand(0);
23576 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
23577 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
23578 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
23579 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
23580 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
23582 for (int i = 0; i < 4; ++i) {
23583 WordMask[i + NOffset] = Mask[i] + NOffset;
23584 WordMask[i + VOffset] = VMask[i] + VOffset;
23586 // Map the word mask through the DWord mask.
23588 for (int i = 0; i < 8; ++i)
23589 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
23590 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
23591 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
23592 // We can replace all three shuffles with an unpack.
23593 V = DAG.getBitcast(VT, D.getOperand(0));
23594 DCI.AddToWorklist(V.getNode());
23595 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
23604 case X86ISD::PSHUFD:
23605 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
23614 /// \brief Try to combine a shuffle into a target-specific add-sub node.
23616 /// We combine this directly on the abstract vector shuffle nodes so it is
23617 /// easier to generically match. We also insert dummy vector shuffle nodes for
23618 /// the operands which explicitly discard the lanes which are unused by this
23619 /// operation to try to flow through the rest of the combiner the fact that
23620 /// they're unused.
23621 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
23623 EVT VT = N->getValueType(0);
23625 // We only handle target-independent shuffles.
23626 // FIXME: It would be easy and harmless to use the target shuffle mask
23627 // extraction tool to support more.
23628 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
23631 auto *SVN = cast<ShuffleVectorSDNode>(N);
23632 SmallVector<int, 8> Mask;
23633 for (int M : SVN->getMask())
23636 SDValue V1 = N->getOperand(0);
23637 SDValue V2 = N->getOperand(1);
23639 // We require the first shuffle operand to be the FSUB node, and the second to
23640 // be the FADD node.
23641 if (V1.getOpcode() == ISD::FADD && V2.getOpcode() == ISD::FSUB) {
23642 ShuffleVectorSDNode::commuteMask(Mask);
23644 } else if (V1.getOpcode() != ISD::FSUB || V2.getOpcode() != ISD::FADD)
23647 // If there are other uses of these operations we can't fold them.
23648 if (!V1->hasOneUse() || !V2->hasOneUse())
23651 // Ensure that both operations have the same operands. Note that we can
23652 // commute the FADD operands.
23653 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
23654 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
23655 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
23658 // We're looking for blends between FADD and FSUB nodes. We insist on these
23659 // nodes being lined up in a specific expected pattern.
23660 if (!(isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
23661 isShuffleEquivalent(V1, V2, Mask, {0, 5, 2, 7}) ||
23662 isShuffleEquivalent(V1, V2, Mask, {0, 9, 2, 11, 4, 13, 6, 15})))
23665 // Only specific types are legal at this point, assert so we notice if and
23666 // when these change.
23667 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
23668 VT == MVT::v4f64) &&
23669 "Unknown vector type encountered!");
23671 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
23674 /// PerformShuffleCombine - Performs several different shuffle combines.
23675 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
23676 TargetLowering::DAGCombinerInfo &DCI,
23677 const X86Subtarget *Subtarget) {
23679 SDValue N0 = N->getOperand(0);
23680 SDValue N1 = N->getOperand(1);
23681 EVT VT = N->getValueType(0);
23683 // Don't create instructions with illegal types after legalize types has run.
23684 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23685 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
23688 // If we have legalized the vector types, look for blends of FADD and FSUB
23689 // nodes that we can fuse into an ADDSUB node.
23690 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
23691 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
23694 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
23695 if (TLI.isTypeLegal(VT) && Subtarget->hasFp256() && VT.is256BitVector() &&
23696 N->getOpcode() == ISD::VECTOR_SHUFFLE)
23697 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
23699 // During Type Legalization, when promoting illegal vector types,
23700 // the backend might introduce new shuffle dag nodes and bitcasts.
23702 // This code performs the following transformation:
23703 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
23704 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
23706 // We do this only if both the bitcast and the BINOP dag nodes have
23707 // one use. Also, perform this transformation only if the new binary
23708 // operation is legal. This is to avoid introducing dag nodes that
23709 // potentially need to be further expanded (or custom lowered) into a
23710 // less optimal sequence of dag nodes.
23711 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
23712 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
23713 N0.getOpcode() == ISD::BITCAST) {
23714 SDValue BC0 = N0.getOperand(0);
23715 EVT SVT = BC0.getValueType();
23716 unsigned Opcode = BC0.getOpcode();
23717 unsigned NumElts = VT.getVectorNumElements();
23719 if (BC0.hasOneUse() && SVT.isVector() &&
23720 SVT.getVectorNumElements() * 2 == NumElts &&
23721 TLI.isOperationLegal(Opcode, VT)) {
23722 bool CanFold = false;
23734 unsigned SVTNumElts = SVT.getVectorNumElements();
23735 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
23736 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
23737 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
23738 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
23739 CanFold = SVOp->getMaskElt(i) < 0;
23742 SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
23743 SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
23744 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
23745 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
23750 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23751 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23752 // consecutive, non-overlapping, and in the right order.
23753 SmallVector<SDValue, 16> Elts;
23754 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23755 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23757 if (SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true))
23760 if (isTargetShuffle(N->getOpcode())) {
23762 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23763 if (Shuffle.getNode())
23766 // Try recursively combining arbitrary sequences of x86 shuffle
23767 // instructions into higher-order shuffles. We do this after combining
23768 // specific PSHUF instruction sequences into their minimal form so that we
23769 // can evaluate how many specialized shuffle instructions are involved in
23770 // a particular chain.
23771 SmallVector<int, 1> NonceMask; // Just a placeholder.
23772 NonceMask.push_back(0);
23773 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23774 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23776 return SDValue(); // This routine will use CombineTo to replace N.
23782 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23783 /// specific shuffle of a load can be folded into a single element load.
23784 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23785 /// shuffles have been custom lowered so we need to handle those here.
23786 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23787 TargetLowering::DAGCombinerInfo &DCI) {
23788 if (DCI.isBeforeLegalizeOps())
23791 SDValue InVec = N->getOperand(0);
23792 SDValue EltNo = N->getOperand(1);
23794 if (!isa<ConstantSDNode>(EltNo))
23797 EVT OriginalVT = InVec.getValueType();
23799 if (InVec.getOpcode() == ISD::BITCAST) {
23800 // Don't duplicate a load with other uses.
23801 if (!InVec.hasOneUse())
23803 EVT BCVT = InVec.getOperand(0).getValueType();
23804 if (!BCVT.isVector() ||
23805 BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23807 InVec = InVec.getOperand(0);
23810 EVT CurrentVT = InVec.getValueType();
23812 if (!isTargetShuffle(InVec.getOpcode()))
23815 // Don't duplicate a load with other uses.
23816 if (!InVec.hasOneUse())
23819 SmallVector<int, 16> ShuffleMask;
23821 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23822 ShuffleMask, UnaryShuffle))
23825 // Select the input vector, guarding against out of range extract vector.
23826 unsigned NumElems = CurrentVT.getVectorNumElements();
23827 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23828 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23829 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23830 : InVec.getOperand(1);
23832 // If inputs to shuffle are the same for both ops, then allow 2 uses
23833 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23834 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23836 if (LdNode.getOpcode() == ISD::BITCAST) {
23837 // Don't duplicate a load with other uses.
23838 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23841 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23842 LdNode = LdNode.getOperand(0);
23845 if (!ISD::isNormalLoad(LdNode.getNode()))
23848 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23850 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23853 EVT EltVT = N->getValueType(0);
23854 // If there's a bitcast before the shuffle, check if the load type and
23855 // alignment is valid.
23856 unsigned Align = LN0->getAlignment();
23857 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23858 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
23859 EltVT.getTypeForEVT(*DAG.getContext()));
23861 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23864 // All checks match so transform back to vector_shuffle so that DAG combiner
23865 // can finish the job
23868 // Create shuffle node taking into account the case that its a unary shuffle
23869 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23870 : InVec.getOperand(1);
23871 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23872 InVec.getOperand(0), Shuffle,
23874 Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
23875 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23879 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
23880 const X86Subtarget *Subtarget) {
23881 SDValue N0 = N->getOperand(0);
23882 EVT VT = N->getValueType(0);
23884 // Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23885 // special and don't usually play with other vector types, it's better to
23886 // handle them early to be sure we emit efficient code by avoiding
23887 // store-load conversions.
23888 if (VT == MVT::x86mmx && N0.getOpcode() == ISD::BUILD_VECTOR &&
23889 N0.getValueType() == MVT::v2i32 &&
23890 isNullConstant(N0.getOperand(1))) {
23891 SDValue N00 = N0->getOperand(0);
23892 if (N00.getValueType() == MVT::i32)
23893 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(N00), VT, N00);
23896 // Convert a bitcasted integer logic operation that has one bitcasted
23897 // floating-point operand and one constant operand into a floating-point
23898 // logic operation. This may create a load of the constant, but that is
23899 // cheaper than materializing the constant in an integer register and
23900 // transferring it to an SSE register or transferring the SSE operand to
23901 // integer register and back.
23903 switch (N0.getOpcode()) {
23904 case ISD::AND: FPOpcode = X86ISD::FAND; break;
23905 case ISD::OR: FPOpcode = X86ISD::FOR; break;
23906 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
23907 default: return SDValue();
23909 if (((Subtarget->hasSSE1() && VT == MVT::f32) ||
23910 (Subtarget->hasSSE2() && VT == MVT::f64)) &&
23911 isa<ConstantSDNode>(N0.getOperand(1)) &&
23912 N0.getOperand(0).getOpcode() == ISD::BITCAST &&
23913 N0.getOperand(0).getOperand(0).getValueType() == VT) {
23914 SDValue N000 = N0.getOperand(0).getOperand(0);
23915 SDValue FPConst = DAG.getBitcast(VT, N0.getOperand(1));
23916 return DAG.getNode(FPOpcode, SDLoc(N0), VT, N000, FPConst);
23922 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23923 /// generation and convert it from being a bunch of shuffles and extracts
23924 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23925 /// storing the value and loading scalars back, while for x64 we should
23926 /// use 64-bit extracts and shifts.
23927 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23928 TargetLowering::DAGCombinerInfo &DCI) {
23929 if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
23932 SDValue InputVector = N->getOperand(0);
23933 SDLoc dl(InputVector);
23934 // Detect mmx to i32 conversion through a v2i32 elt extract.
23935 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23936 N->getValueType(0) == MVT::i32 &&
23937 InputVector.getValueType() == MVT::v2i32) {
23939 // The bitcast source is a direct mmx result.
23940 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23941 if (MMXSrc.getValueType() == MVT::x86mmx)
23942 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23943 N->getValueType(0),
23944 InputVector.getNode()->getOperand(0));
23946 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23947 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23948 MMXSrc.getValueType() == MVT::i64) {
23949 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23950 if (MMXSrcOp.hasOneUse() && MMXSrcOp.getOpcode() == ISD::BITCAST &&
23951 MMXSrcOp.getValueType() == MVT::v1i64 &&
23952 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23953 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23954 N->getValueType(0), MMXSrcOp.getOperand(0));
23958 EVT VT = N->getValueType(0);
23960 if (VT == MVT::i1 && isa<ConstantSDNode>(N->getOperand(1)) &&
23961 InputVector.getOpcode() == ISD::BITCAST &&
23962 isa<ConstantSDNode>(InputVector.getOperand(0))) {
23963 uint64_t ExtractedElt =
23964 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
23965 uint64_t InputValue =
23966 cast<ConstantSDNode>(InputVector.getOperand(0))->getZExtValue();
23967 uint64_t Res = (InputValue >> ExtractedElt) & 1;
23968 return DAG.getConstant(Res, dl, MVT::i1);
23970 // Only operate on vectors of 4 elements, where the alternative shuffling
23971 // gets to be more expensive.
23972 if (InputVector.getValueType() != MVT::v4i32)
23975 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23976 // single use which is a sign-extend or zero-extend, and all elements are
23978 SmallVector<SDNode *, 4> Uses;
23979 unsigned ExtractedElements = 0;
23980 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23981 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23982 if (UI.getUse().getResNo() != InputVector.getResNo())
23985 SDNode *Extract = *UI;
23986 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23989 if (Extract->getValueType(0) != MVT::i32)
23991 if (!Extract->hasOneUse())
23993 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23994 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23996 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23999 // Record which element was extracted.
24000 ExtractedElements |=
24001 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
24003 Uses.push_back(Extract);
24006 // If not all the elements were used, this may not be worthwhile.
24007 if (ExtractedElements != 15)
24010 // Ok, we've now decided to do the transformation.
24011 // If 64-bit shifts are legal, use the extract-shift sequence,
24012 // otherwise bounce the vector off the cache.
24013 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24016 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
24017 SDValue Cst = DAG.getBitcast(MVT::v2i64, InputVector);
24018 auto &DL = DAG.getDataLayout();
24019 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy(DL);
24020 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
24021 DAG.getConstant(0, dl, VecIdxTy));
24022 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
24023 DAG.getConstant(1, dl, VecIdxTy));
24025 SDValue ShAmt = DAG.getConstant(
24026 32, dl, DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64, DL));
24027 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
24028 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
24029 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
24030 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
24031 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
24032 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
24034 // Store the value to a temporary stack slot.
24035 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
24036 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
24037 MachinePointerInfo(), false, false, 0);
24039 EVT ElementType = InputVector.getValueType().getVectorElementType();
24040 unsigned EltSize = ElementType.getSizeInBits() / 8;
24042 // Replace each use (extract) with a load of the appropriate element.
24043 for (unsigned i = 0; i < 4; ++i) {
24044 uint64_t Offset = EltSize * i;
24045 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
24046 SDValue OffsetVal = DAG.getConstant(Offset, dl, PtrVT);
24048 SDValue ScalarAddr =
24049 DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, OffsetVal);
24051 // Load the scalar.
24052 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
24053 ScalarAddr, MachinePointerInfo(),
24054 false, false, false, 0);
24059 // Replace the extracts
24060 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
24061 UE = Uses.end(); UI != UE; ++UI) {
24062 SDNode *Extract = *UI;
24064 SDValue Idx = Extract->getOperand(1);
24065 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
24066 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
24069 // The replacement was made in place; don't return anything.
24074 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
24075 const X86Subtarget *Subtarget) {
24077 SDValue Cond = N->getOperand(0);
24078 SDValue LHS = N->getOperand(1);
24079 SDValue RHS = N->getOperand(2);
24081 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
24082 SDValue CondSrc = Cond->getOperand(0);
24083 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
24084 Cond = CondSrc->getOperand(0);
24087 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
24090 // A vselect where all conditions and data are constants can be optimized into
24091 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
24092 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
24093 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
24096 unsigned MaskValue = 0;
24097 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
24100 MVT VT = N->getSimpleValueType(0);
24101 unsigned NumElems = VT.getVectorNumElements();
24102 SmallVector<int, 8> ShuffleMask(NumElems, -1);
24103 for (unsigned i = 0; i < NumElems; ++i) {
24104 // Be sure we emit undef where we can.
24105 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
24106 ShuffleMask[i] = -1;
24108 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
24111 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24112 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
24114 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
24117 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
24119 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
24120 TargetLowering::DAGCombinerInfo &DCI,
24121 const X86Subtarget *Subtarget) {
24123 SDValue Cond = N->getOperand(0);
24124 // Get the LHS/RHS of the select.
24125 SDValue LHS = N->getOperand(1);
24126 SDValue RHS = N->getOperand(2);
24127 EVT VT = LHS.getValueType();
24128 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24130 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
24131 // instructions match the semantics of the common C idiom x<y?x:y but not
24132 // x<=y?x:y, because of how they handle negative zero (which can be
24133 // ignored in unsafe-math mode).
24134 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
24135 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
24136 VT != MVT::f80 && VT != MVT::f128 &&
24137 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
24138 (Subtarget->hasSSE2() ||
24139 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
24140 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
24142 unsigned Opcode = 0;
24143 // Check for x CC y ? x : y.
24144 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
24145 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
24149 // Converting this to a min would handle NaNs incorrectly, and swapping
24150 // the operands would cause it to handle comparisons between positive
24151 // and negative zero incorrectly.
24152 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
24153 if (!DAG.getTarget().Options.UnsafeFPMath &&
24154 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
24156 std::swap(LHS, RHS);
24158 Opcode = X86ISD::FMIN;
24161 // Converting this to a min would handle comparisons between positive
24162 // and negative zero incorrectly.
24163 if (!DAG.getTarget().Options.UnsafeFPMath &&
24164 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
24166 Opcode = X86ISD::FMIN;
24169 // Converting this to a min would handle both negative zeros and NaNs
24170 // incorrectly, but we can swap the operands to fix both.
24171 std::swap(LHS, RHS);
24175 Opcode = X86ISD::FMIN;
24179 // Converting this to a max would handle comparisons between positive
24180 // and negative zero incorrectly.
24181 if (!DAG.getTarget().Options.UnsafeFPMath &&
24182 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
24184 Opcode = X86ISD::FMAX;
24187 // Converting this to a max would handle NaNs incorrectly, and swapping
24188 // the operands would cause it to handle comparisons between positive
24189 // and negative zero incorrectly.
24190 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
24191 if (!DAG.getTarget().Options.UnsafeFPMath &&
24192 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
24194 std::swap(LHS, RHS);
24196 Opcode = X86ISD::FMAX;
24199 // Converting this to a max would handle both negative zeros and NaNs
24200 // incorrectly, but we can swap the operands to fix both.
24201 std::swap(LHS, RHS);
24205 Opcode = X86ISD::FMAX;
24208 // Check for x CC y ? y : x -- a min/max with reversed arms.
24209 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
24210 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
24214 // Converting this to a min would handle comparisons between positive
24215 // and negative zero incorrectly, and swapping the operands would
24216 // cause it to handle NaNs incorrectly.
24217 if (!DAG.getTarget().Options.UnsafeFPMath &&
24218 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
24219 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
24221 std::swap(LHS, RHS);
24223 Opcode = X86ISD::FMIN;
24226 // Converting this to a min would handle NaNs incorrectly.
24227 if (!DAG.getTarget().Options.UnsafeFPMath &&
24228 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
24230 Opcode = X86ISD::FMIN;
24233 // Converting this to a min would handle both negative zeros and NaNs
24234 // incorrectly, but we can swap the operands to fix both.
24235 std::swap(LHS, RHS);
24239 Opcode = X86ISD::FMIN;
24243 // Converting this to a max would handle NaNs incorrectly.
24244 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
24246 Opcode = X86ISD::FMAX;
24249 // Converting this to a max would handle comparisons between positive
24250 // and negative zero incorrectly, and swapping the operands would
24251 // cause it to handle NaNs incorrectly.
24252 if (!DAG.getTarget().Options.UnsafeFPMath &&
24253 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
24254 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
24256 std::swap(LHS, RHS);
24258 Opcode = X86ISD::FMAX;
24261 // Converting this to a max would handle both negative zeros and NaNs
24262 // incorrectly, but we can swap the operands to fix both.
24263 std::swap(LHS, RHS);
24267 Opcode = X86ISD::FMAX;
24273 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
24276 EVT CondVT = Cond.getValueType();
24277 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
24278 CondVT.getVectorElementType() == MVT::i1) {
24279 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
24280 // lowering on KNL. In this case we convert it to
24281 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
24282 // The same situation for all 128 and 256-bit vectors of i8 and i16.
24283 // Since SKX these selects have a proper lowering.
24284 EVT OpVT = LHS.getValueType();
24285 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
24286 (OpVT.getVectorElementType() == MVT::i8 ||
24287 OpVT.getVectorElementType() == MVT::i16) &&
24288 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
24289 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
24290 DCI.AddToWorklist(Cond.getNode());
24291 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
24294 // If this is a select between two integer constants, try to do some
24296 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
24297 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
24298 // Don't do this for crazy integer types.
24299 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
24300 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
24301 // so that TrueC (the true value) is larger than FalseC.
24302 bool NeedsCondInvert = false;
24304 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
24305 // Efficiently invertible.
24306 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
24307 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
24308 isa<ConstantSDNode>(Cond.getOperand(1))))) {
24309 NeedsCondInvert = true;
24310 std::swap(TrueC, FalseC);
24313 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
24314 if (FalseC->getAPIntValue() == 0 &&
24315 TrueC->getAPIntValue().isPowerOf2()) {
24316 if (NeedsCondInvert) // Invert the condition if needed.
24317 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
24318 DAG.getConstant(1, DL, Cond.getValueType()));
24320 // Zero extend the condition if needed.
24321 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
24323 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24324 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
24325 DAG.getConstant(ShAmt, DL, MVT::i8));
24328 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
24329 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24330 if (NeedsCondInvert) // Invert the condition if needed.
24331 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
24332 DAG.getConstant(1, DL, Cond.getValueType()));
24334 // Zero extend the condition if needed.
24335 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24336 FalseC->getValueType(0), Cond);
24337 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24338 SDValue(FalseC, 0));
24341 // Optimize cases that will turn into an LEA instruction. This requires
24342 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24343 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24344 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24345 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24347 bool isFastMultiplier = false;
24349 switch ((unsigned char)Diff) {
24351 case 1: // result = add base, cond
24352 case 2: // result = lea base( , cond*2)
24353 case 3: // result = lea base(cond, cond*2)
24354 case 4: // result = lea base( , cond*4)
24355 case 5: // result = lea base(cond, cond*4)
24356 case 8: // result = lea base( , cond*8)
24357 case 9: // result = lea base(cond, cond*8)
24358 isFastMultiplier = true;
24363 if (isFastMultiplier) {
24364 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24365 if (NeedsCondInvert) // Invert the condition if needed.
24366 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
24367 DAG.getConstant(1, DL, Cond.getValueType()));
24369 // Zero extend the condition if needed.
24370 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24372 // Scale the condition by the difference.
24374 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24375 DAG.getConstant(Diff, DL,
24376 Cond.getValueType()));
24378 // Add the base if non-zero.
24379 if (FalseC->getAPIntValue() != 0)
24380 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24381 SDValue(FalseC, 0));
24388 // Canonicalize max and min:
24389 // (x > y) ? x : y -> (x >= y) ? x : y
24390 // (x < y) ? x : y -> (x <= y) ? x : y
24391 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
24392 // the need for an extra compare
24393 // against zero. e.g.
24394 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
24396 // testl %edi, %edi
24398 // cmovgl %edi, %eax
24402 // cmovsl %eax, %edi
24403 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
24404 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
24405 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
24406 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
24411 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
24412 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
24413 Cond.getOperand(0), Cond.getOperand(1), NewCC);
24414 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
24419 // Early exit check
24420 if (!TLI.isTypeLegal(VT))
24423 // Match VSELECTs into subs with unsigned saturation.
24424 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
24425 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
24426 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
24427 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
24428 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
24430 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
24431 // left side invert the predicate to simplify logic below.
24433 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
24435 CC = ISD::getSetCCInverse(CC, true);
24436 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
24440 if (Other.getNode() && Other->getNumOperands() == 2 &&
24441 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
24442 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
24443 SDValue CondRHS = Cond->getOperand(1);
24445 // Look for a general sub with unsigned saturation first.
24446 // x >= y ? x-y : 0 --> subus x, y
24447 // x > y ? x-y : 0 --> subus x, y
24448 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
24449 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
24450 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
24452 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
24453 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
24454 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
24455 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
24456 // If the RHS is a constant we have to reverse the const
24457 // canonicalization.
24458 // x > C-1 ? x+-C : 0 --> subus x, C
24459 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
24460 CondRHSConst->getAPIntValue() ==
24461 (-OpRHSConst->getAPIntValue() - 1))
24462 return DAG.getNode(
24463 X86ISD::SUBUS, DL, VT, OpLHS,
24464 DAG.getConstant(-OpRHSConst->getAPIntValue(), DL, VT));
24466 // Another special case: If C was a sign bit, the sub has been
24467 // canonicalized into a xor.
24468 // FIXME: Would it be better to use computeKnownBits to determine
24469 // whether it's safe to decanonicalize the xor?
24470 // x s< 0 ? x^C : 0 --> subus x, C
24471 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
24472 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
24473 OpRHSConst->getAPIntValue().isSignBit())
24474 // Note that we have to rebuild the RHS constant here to ensure we
24475 // don't rely on particular values of undef lanes.
24476 return DAG.getNode(
24477 X86ISD::SUBUS, DL, VT, OpLHS,
24478 DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT));
24483 // Simplify vector selection if condition value type matches vselect
24485 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
24486 assert(Cond.getValueType().isVector() &&
24487 "vector select expects a vector selector!");
24489 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
24490 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
24492 // Try invert the condition if true value is not all 1s and false value
24494 if (!TValIsAllOnes && !FValIsAllZeros &&
24495 // Check if the selector will be produced by CMPP*/PCMP*
24496 Cond.getOpcode() == ISD::SETCC &&
24497 // Check if SETCC has already been promoted
24498 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
24500 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
24501 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
24503 if (TValIsAllZeros || FValIsAllOnes) {
24504 SDValue CC = Cond.getOperand(2);
24505 ISD::CondCode NewCC =
24506 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
24507 Cond.getOperand(0).getValueType().isInteger());
24508 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
24509 std::swap(LHS, RHS);
24510 TValIsAllOnes = FValIsAllOnes;
24511 FValIsAllZeros = TValIsAllZeros;
24515 if (TValIsAllOnes || FValIsAllZeros) {
24518 if (TValIsAllOnes && FValIsAllZeros)
24520 else if (TValIsAllOnes)
24522 DAG.getNode(ISD::OR, DL, CondVT, Cond, DAG.getBitcast(CondVT, RHS));
24523 else if (FValIsAllZeros)
24524 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
24525 DAG.getBitcast(CondVT, LHS));
24527 return DAG.getBitcast(VT, Ret);
24531 // We should generate an X86ISD::BLENDI from a vselect if its argument
24532 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
24533 // constants. This specific pattern gets generated when we split a
24534 // selector for a 512 bit vector in a machine without AVX512 (but with
24535 // 256-bit vectors), during legalization:
24537 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
24539 // Iff we find this pattern and the build_vectors are built from
24540 // constants, we translate the vselect into a shuffle_vector that we
24541 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
24542 if ((N->getOpcode() == ISD::VSELECT ||
24543 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
24544 !DCI.isBeforeLegalize() && !VT.is512BitVector()) {
24545 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
24546 if (Shuffle.getNode())
24550 // If this is a *dynamic* select (non-constant condition) and we can match
24551 // this node with one of the variable blend instructions, restructure the
24552 // condition so that the blends can use the high bit of each element and use
24553 // SimplifyDemandedBits to simplify the condition operand.
24554 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
24555 !DCI.isBeforeLegalize() &&
24556 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
24557 unsigned BitWidth = Cond.getValueType().getScalarSizeInBits();
24559 // Don't optimize vector selects that map to mask-registers.
24563 // We can only handle the cases where VSELECT is directly legal on the
24564 // subtarget. We custom lower VSELECT nodes with constant conditions and
24565 // this makes it hard to see whether a dynamic VSELECT will correctly
24566 // lower, so we both check the operation's status and explicitly handle the
24567 // cases where a *dynamic* blend will fail even though a constant-condition
24568 // blend could be custom lowered.
24569 // FIXME: We should find a better way to handle this class of problems.
24570 // Potentially, we should combine constant-condition vselect nodes
24571 // pre-legalization into shuffles and not mark as many types as custom
24573 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
24575 // FIXME: We don't support i16-element blends currently. We could and
24576 // should support them by making *all* the bits in the condition be set
24577 // rather than just the high bit and using an i8-element blend.
24578 if (VT.getVectorElementType() == MVT::i16)
24580 // Dynamic blending was only available from SSE4.1 onward.
24581 if (VT.is128BitVector() && !Subtarget->hasSSE41())
24583 // Byte blends are only available in AVX2
24584 if (VT == MVT::v32i8 && !Subtarget->hasAVX2())
24587 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
24588 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
24590 APInt KnownZero, KnownOne;
24591 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
24592 DCI.isBeforeLegalizeOps());
24593 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
24594 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
24596 // If we changed the computation somewhere in the DAG, this change
24597 // will affect all users of Cond.
24598 // Make sure it is fine and update all the nodes so that we do not
24599 // use the generic VSELECT anymore. Otherwise, we may perform
24600 // wrong optimizations as we messed up with the actual expectation
24601 // for the vector boolean values.
24602 if (Cond != TLO.Old) {
24603 // Check all uses of that condition operand to check whether it will be
24604 // consumed by non-BLEND instructions, which may depend on all bits are
24606 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24608 if (I->getOpcode() != ISD::VSELECT)
24609 // TODO: Add other opcodes eventually lowered into BLEND.
24612 // Update all the users of the condition, before committing the change,
24613 // so that the VSELECT optimizations that expect the correct vector
24614 // boolean value will not be triggered.
24615 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24617 DAG.ReplaceAllUsesOfValueWith(
24619 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
24620 Cond, I->getOperand(1), I->getOperand(2)));
24621 DCI.CommitTargetLoweringOpt(TLO);
24624 // At this point, only Cond is changed. Change the condition
24625 // just for N to keep the opportunity to optimize all other
24626 // users their own way.
24627 DAG.ReplaceAllUsesOfValueWith(
24629 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
24630 TLO.New, N->getOperand(1), N->getOperand(2)));
24638 // Check whether a boolean test is testing a boolean value generated by
24639 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
24642 // Simplify the following patterns:
24643 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
24644 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
24645 // to (Op EFLAGS Cond)
24647 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
24648 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
24649 // to (Op EFLAGS !Cond)
24651 // where Op could be BRCOND or CMOV.
24653 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
24654 // Quit if not CMP and SUB with its value result used.
24655 if (Cmp.getOpcode() != X86ISD::CMP &&
24656 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
24659 // Quit if not used as a boolean value.
24660 if (CC != X86::COND_E && CC != X86::COND_NE)
24663 // Check CMP operands. One of them should be 0 or 1 and the other should be
24664 // an SetCC or extended from it.
24665 SDValue Op1 = Cmp.getOperand(0);
24666 SDValue Op2 = Cmp.getOperand(1);
24669 const ConstantSDNode* C = nullptr;
24670 bool needOppositeCond = (CC == X86::COND_E);
24671 bool checkAgainstTrue = false; // Is it a comparison against 1?
24673 if ((C = dyn_cast<ConstantSDNode>(Op1)))
24675 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
24677 else // Quit if all operands are not constants.
24680 if (C->getZExtValue() == 1) {
24681 needOppositeCond = !needOppositeCond;
24682 checkAgainstTrue = true;
24683 } else if (C->getZExtValue() != 0)
24684 // Quit if the constant is neither 0 or 1.
24687 bool truncatedToBoolWithAnd = false;
24688 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24689 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24690 SetCC.getOpcode() == ISD::TRUNCATE ||
24691 SetCC.getOpcode() == ISD::AND) {
24692 if (SetCC.getOpcode() == ISD::AND) {
24694 if (isOneConstant(SetCC.getOperand(0)))
24696 if (isOneConstant(SetCC.getOperand(1)))
24700 SetCC = SetCC.getOperand(OpIdx);
24701 truncatedToBoolWithAnd = true;
24703 SetCC = SetCC.getOperand(0);
24706 switch (SetCC.getOpcode()) {
24707 case X86ISD::SETCC_CARRY:
24708 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24709 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24710 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24711 // truncated to i1 using 'and'.
24712 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24714 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24715 "Invalid use of SETCC_CARRY!");
24717 case X86ISD::SETCC:
24718 // Set the condition code or opposite one if necessary.
24719 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24720 if (needOppositeCond)
24721 CC = X86::GetOppositeBranchCondition(CC);
24722 return SetCC.getOperand(1);
24723 case X86ISD::CMOV: {
24724 // Check whether false/true value has canonical one, i.e. 0 or 1.
24725 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24726 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24727 // Quit if true value is not a constant.
24730 // Quit if false value is not a constant.
24732 SDValue Op = SetCC.getOperand(0);
24733 // Skip 'zext' or 'trunc' node.
24734 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24735 Op.getOpcode() == ISD::TRUNCATE)
24736 Op = Op.getOperand(0);
24737 // A special case for rdrand/rdseed, where 0 is set if false cond is
24739 if ((Op.getOpcode() != X86ISD::RDRAND &&
24740 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24743 // Quit if false value is not the constant 0 or 1.
24744 bool FValIsFalse = true;
24745 if (FVal && FVal->getZExtValue() != 0) {
24746 if (FVal->getZExtValue() != 1)
24748 // If FVal is 1, opposite cond is needed.
24749 needOppositeCond = !needOppositeCond;
24750 FValIsFalse = false;
24752 // Quit if TVal is not the constant opposite of FVal.
24753 if (FValIsFalse && TVal->getZExtValue() != 1)
24755 if (!FValIsFalse && TVal->getZExtValue() != 0)
24757 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24758 if (needOppositeCond)
24759 CC = X86::GetOppositeBranchCondition(CC);
24760 return SetCC.getOperand(3);
24767 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
24769 /// (X86or (X86setcc) (X86setcc))
24770 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
24771 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
24772 X86::CondCode &CC1, SDValue &Flags,
24774 if (Cond->getOpcode() == X86ISD::CMP) {
24775 if (!isNullConstant(Cond->getOperand(1)))
24778 Cond = Cond->getOperand(0);
24783 SDValue SetCC0, SetCC1;
24784 switch (Cond->getOpcode()) {
24785 default: return false;
24792 SetCC0 = Cond->getOperand(0);
24793 SetCC1 = Cond->getOperand(1);
24797 // Make sure we have SETCC nodes, using the same flags value.
24798 if (SetCC0.getOpcode() != X86ISD::SETCC ||
24799 SetCC1.getOpcode() != X86ISD::SETCC ||
24800 SetCC0->getOperand(1) != SetCC1->getOperand(1))
24803 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
24804 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
24805 Flags = SetCC0->getOperand(1);
24809 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24810 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24811 TargetLowering::DAGCombinerInfo &DCI,
24812 const X86Subtarget *Subtarget) {
24815 // If the flag operand isn't dead, don't touch this CMOV.
24816 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24819 SDValue FalseOp = N->getOperand(0);
24820 SDValue TrueOp = N->getOperand(1);
24821 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24822 SDValue Cond = N->getOperand(3);
24824 if (CC == X86::COND_E || CC == X86::COND_NE) {
24825 switch (Cond.getOpcode()) {
24829 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24830 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24831 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24837 Flags = checkBoolTestSetCCCombine(Cond, CC);
24838 if (Flags.getNode() &&
24839 // Extra check as FCMOV only supports a subset of X86 cond.
24840 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24841 SDValue Ops[] = { FalseOp, TrueOp,
24842 DAG.getConstant(CC, DL, MVT::i8), Flags };
24843 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24846 // If this is a select between two integer constants, try to do some
24847 // optimizations. Note that the operands are ordered the opposite of SELECT
24849 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24850 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24851 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24852 // larger than FalseC (the false value).
24853 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24854 CC = X86::GetOppositeBranchCondition(CC);
24855 std::swap(TrueC, FalseC);
24856 std::swap(TrueOp, FalseOp);
24859 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24860 // This is efficient for any integer data type (including i8/i16) and
24862 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24863 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24864 DAG.getConstant(CC, DL, MVT::i8), Cond);
24866 // Zero extend the condition if needed.
24867 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24869 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24870 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24871 DAG.getConstant(ShAmt, DL, MVT::i8));
24872 if (N->getNumValues() == 2) // Dead flag value?
24873 return DCI.CombineTo(N, Cond, SDValue());
24877 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24878 // for any integer data type, including i8/i16.
24879 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24880 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24881 DAG.getConstant(CC, DL, MVT::i8), Cond);
24883 // Zero extend the condition if needed.
24884 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24885 FalseC->getValueType(0), Cond);
24886 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24887 SDValue(FalseC, 0));
24889 if (N->getNumValues() == 2) // Dead flag value?
24890 return DCI.CombineTo(N, Cond, SDValue());
24894 // Optimize cases that will turn into an LEA instruction. This requires
24895 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24896 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24897 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24898 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24900 bool isFastMultiplier = false;
24902 switch ((unsigned char)Diff) {
24904 case 1: // result = add base, cond
24905 case 2: // result = lea base( , cond*2)
24906 case 3: // result = lea base(cond, cond*2)
24907 case 4: // result = lea base( , cond*4)
24908 case 5: // result = lea base(cond, cond*4)
24909 case 8: // result = lea base( , cond*8)
24910 case 9: // result = lea base(cond, cond*8)
24911 isFastMultiplier = true;
24916 if (isFastMultiplier) {
24917 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24918 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24919 DAG.getConstant(CC, DL, MVT::i8), Cond);
24920 // Zero extend the condition if needed.
24921 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24923 // Scale the condition by the difference.
24925 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24926 DAG.getConstant(Diff, DL, Cond.getValueType()));
24928 // Add the base if non-zero.
24929 if (FalseC->getAPIntValue() != 0)
24930 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24931 SDValue(FalseC, 0));
24932 if (N->getNumValues() == 2) // Dead flag value?
24933 return DCI.CombineTo(N, Cond, SDValue());
24940 // Handle these cases:
24941 // (select (x != c), e, c) -> select (x != c), e, x),
24942 // (select (x == c), c, e) -> select (x == c), x, e)
24943 // where the c is an integer constant, and the "select" is the combination
24944 // of CMOV and CMP.
24946 // The rationale for this change is that the conditional-move from a constant
24947 // needs two instructions, however, conditional-move from a register needs
24948 // only one instruction.
24950 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24951 // some instruction-combining opportunities. This opt needs to be
24952 // postponed as late as possible.
24954 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24955 // the DCI.xxxx conditions are provided to postpone the optimization as
24956 // late as possible.
24958 ConstantSDNode *CmpAgainst = nullptr;
24959 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24960 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24961 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24963 if (CC == X86::COND_NE &&
24964 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24965 CC = X86::GetOppositeBranchCondition(CC);
24966 std::swap(TrueOp, FalseOp);
24969 if (CC == X86::COND_E &&
24970 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24971 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24972 DAG.getConstant(CC, DL, MVT::i8), Cond };
24973 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24978 // Fold and/or of setcc's to double CMOV:
24979 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
24980 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
24982 // This combine lets us generate:
24983 // cmovcc1 (jcc1 if we don't have CMOV)
24989 // cmovne (jne if we don't have CMOV)
24990 // When we can't use the CMOV instruction, it might increase branch
24992 // When we can use CMOV, or when there is no mispredict, this improves
24993 // throughput and reduces register pressure.
24995 if (CC == X86::COND_NE) {
24997 X86::CondCode CC0, CC1;
24999 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
25001 std::swap(FalseOp, TrueOp);
25002 CC0 = X86::GetOppositeBranchCondition(CC0);
25003 CC1 = X86::GetOppositeBranchCondition(CC1);
25006 SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
25008 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), LOps);
25009 SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
25010 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
25011 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SDValue(CMOV.getNode(), 1));
25019 /// PerformMulCombine - Optimize a single multiply with constant into two
25020 /// in order to implement it with two cheaper instructions, e.g.
25021 /// LEA + SHL, LEA + LEA.
25022 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
25023 TargetLowering::DAGCombinerInfo &DCI) {
25024 // An imul is usually smaller than the alternative sequence.
25025 if (DAG.getMachineFunction().getFunction()->optForMinSize())
25028 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
25031 EVT VT = N->getValueType(0);
25032 if (VT != MVT::i64 && VT != MVT::i32)
25035 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
25038 uint64_t MulAmt = C->getZExtValue();
25039 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
25042 uint64_t MulAmt1 = 0;
25043 uint64_t MulAmt2 = 0;
25044 if ((MulAmt % 9) == 0) {
25046 MulAmt2 = MulAmt / 9;
25047 } else if ((MulAmt % 5) == 0) {
25049 MulAmt2 = MulAmt / 5;
25050 } else if ((MulAmt % 3) == 0) {
25052 MulAmt2 = MulAmt / 3;
25058 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
25060 if (isPowerOf2_64(MulAmt2) &&
25061 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
25062 // If second multiplifer is pow2, issue it first. We want the multiply by
25063 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
25065 std::swap(MulAmt1, MulAmt2);
25067 if (isPowerOf2_64(MulAmt1))
25068 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
25069 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
25071 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
25072 DAG.getConstant(MulAmt1, DL, VT));
25074 if (isPowerOf2_64(MulAmt2))
25075 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
25076 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
25078 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
25079 DAG.getConstant(MulAmt2, DL, VT));
25083 assert(MulAmt != 0 && MulAmt != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX)
25084 && "Both cases that could cause potential overflows should have "
25085 "already been handled.");
25086 if (isPowerOf2_64(MulAmt - 1))
25087 // (mul x, 2^N + 1) => (add (shl x, N), x)
25088 NewMul = DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
25089 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
25090 DAG.getConstant(Log2_64(MulAmt - 1), DL,
25093 else if (isPowerOf2_64(MulAmt + 1))
25094 // (mul x, 2^N - 1) => (sub (shl x, N), x)
25095 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getNode(ISD::SHL, DL, VT,
25097 DAG.getConstant(Log2_64(MulAmt + 1),
25098 DL, MVT::i8)), N->getOperand(0));
25102 // Do not add new nodes to DAG combiner worklist.
25103 DCI.CombineTo(N, NewMul, false);
25108 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
25109 SDValue N0 = N->getOperand(0);
25110 SDValue N1 = N->getOperand(1);
25111 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
25112 EVT VT = N0.getValueType();
25114 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
25115 // since the result of setcc_c is all zero's or all ones.
25116 if (VT.isInteger() && !VT.isVector() &&
25117 N1C && N0.getOpcode() == ISD::AND &&
25118 N0.getOperand(1).getOpcode() == ISD::Constant) {
25119 SDValue N00 = N0.getOperand(0);
25120 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
25121 APInt ShAmt = N1C->getAPIntValue();
25122 Mask = Mask.shl(ShAmt);
25123 bool MaskOK = false;
25124 // We can handle cases concerning bit-widening nodes containing setcc_c if
25125 // we carefully interrogate the mask to make sure we are semantics
25127 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
25128 // of the underlying setcc_c operation if the setcc_c was zero extended.
25129 // Consider the following example:
25130 // zext(setcc_c) -> i32 0x0000FFFF
25131 // c1 -> i32 0x0000FFFF
25132 // c2 -> i32 0x00000001
25133 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
25134 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
25135 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25137 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
25138 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
25140 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
25141 N00.getOpcode() == ISD::ANY_EXTEND) &&
25142 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
25143 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
25145 if (MaskOK && Mask != 0) {
25147 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
25151 // Hardware support for vector shifts is sparse which makes us scalarize the
25152 // vector operations in many cases. Also, on sandybridge ADD is faster than
25154 // (shl V, 1) -> add V,V
25155 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
25156 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
25157 assert(N0.getValueType().isVector() && "Invalid vector shift type");
25158 // We shift all of the values by one. In many cases we do not have
25159 // hardware support for this operation. This is better expressed as an ADD
25161 if (N1SplatC->getAPIntValue() == 1)
25162 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
25168 static SDValue PerformSRACombine(SDNode *N, SelectionDAG &DAG) {
25169 SDValue N0 = N->getOperand(0);
25170 SDValue N1 = N->getOperand(1);
25171 EVT VT = N0.getValueType();
25172 unsigned Size = VT.getSizeInBits();
25174 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
25175 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
25176 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
25177 // depending on sign of (SarConst - [56,48,32,24,16])
25179 // sexts in X86 are MOVs. The MOVs have the same code size
25180 // as above SHIFTs (only SHIFT on 1 has lower code size).
25181 // However the MOVs have 2 advantages to a SHIFT:
25182 // 1. MOVs can write to a register that differs from source
25183 // 2. MOVs accept memory operands
25185 if (!VT.isInteger() || VT.isVector() || N1.getOpcode() != ISD::Constant ||
25186 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
25187 N0.getOperand(1).getOpcode() != ISD::Constant)
25190 SDValue N00 = N0.getOperand(0);
25191 SDValue N01 = N0.getOperand(1);
25192 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
25193 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
25194 EVT CVT = N1.getValueType();
25196 if (SarConst.isNegative())
25199 for (MVT SVT : MVT::integer_valuetypes()) {
25200 unsigned ShiftSize = SVT.getSizeInBits();
25201 // skipping types without corresponding sext/zext and
25202 // ShlConst that is not one of [56,48,32,24,16]
25203 if (ShiftSize < 8 || ShiftSize > 64 || ShlConst != Size - ShiftSize)
25207 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
25208 SarConst = SarConst - (Size - ShiftSize);
25211 else if (SarConst.isNegative())
25212 return DAG.getNode(ISD::SHL, DL, VT, NN,
25213 DAG.getConstant(-SarConst, DL, CVT));
25215 return DAG.getNode(ISD::SRA, DL, VT, NN,
25216 DAG.getConstant(SarConst, DL, CVT));
25221 /// \brief Returns a vector of 0s if the node in input is a vector logical
25222 /// shift by a constant amount which is known to be bigger than or equal
25223 /// to the vector element size in bits.
25224 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
25225 const X86Subtarget *Subtarget) {
25226 EVT VT = N->getValueType(0);
25228 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
25229 (!Subtarget->hasInt256() ||
25230 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
25233 SDValue Amt = N->getOperand(1);
25235 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
25236 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
25237 APInt ShiftAmt = AmtSplat->getAPIntValue();
25238 unsigned MaxAmount =
25239 VT.getSimpleVT().getVectorElementType().getSizeInBits();
25241 // SSE2/AVX2 logical shifts always return a vector of 0s
25242 // if the shift amount is bigger than or equal to
25243 // the element size. The constant shift amount will be
25244 // encoded as a 8-bit immediate.
25245 if (ShiftAmt.trunc(8).uge(MaxAmount))
25246 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, DL);
25252 /// PerformShiftCombine - Combine shifts.
25253 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
25254 TargetLowering::DAGCombinerInfo &DCI,
25255 const X86Subtarget *Subtarget) {
25256 if (N->getOpcode() == ISD::SHL)
25257 if (SDValue V = PerformSHLCombine(N, DAG))
25260 if (N->getOpcode() == ISD::SRA)
25261 if (SDValue V = PerformSRACombine(N, DAG))
25264 // Try to fold this logical shift into a zero vector.
25265 if (N->getOpcode() != ISD::SRA)
25266 if (SDValue V = performShiftToAllZeros(N, DAG, Subtarget))
25272 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
25273 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
25274 // and friends. Likewise for OR -> CMPNEQSS.
25275 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
25276 TargetLowering::DAGCombinerInfo &DCI,
25277 const X86Subtarget *Subtarget) {
25280 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
25281 // we're requiring SSE2 for both.
25282 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
25283 SDValue N0 = N->getOperand(0);
25284 SDValue N1 = N->getOperand(1);
25285 SDValue CMP0 = N0->getOperand(1);
25286 SDValue CMP1 = N1->getOperand(1);
25289 // The SETCCs should both refer to the same CMP.
25290 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
25293 SDValue CMP00 = CMP0->getOperand(0);
25294 SDValue CMP01 = CMP0->getOperand(1);
25295 EVT VT = CMP00.getValueType();
25297 if (VT == MVT::f32 || VT == MVT::f64) {
25298 bool ExpectingFlags = false;
25299 // Check for any users that want flags:
25300 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
25301 !ExpectingFlags && UI != UE; ++UI)
25302 switch (UI->getOpcode()) {
25307 ExpectingFlags = true;
25309 case ISD::CopyToReg:
25310 case ISD::SIGN_EXTEND:
25311 case ISD::ZERO_EXTEND:
25312 case ISD::ANY_EXTEND:
25316 if (!ExpectingFlags) {
25317 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
25318 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
25320 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
25321 X86::CondCode tmp = cc0;
25326 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
25327 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
25328 // FIXME: need symbolic constants for these magic numbers.
25329 // See X86ATTInstPrinter.cpp:printSSECC().
25330 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
25331 if (Subtarget->hasAVX512()) {
25332 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
25334 DAG.getConstant(x86cc, DL, MVT::i8));
25335 if (N->getValueType(0) != MVT::i1)
25336 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
25340 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
25341 CMP00.getValueType(), CMP00, CMP01,
25342 DAG.getConstant(x86cc, DL,
25345 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
25346 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
25348 if (is64BitFP && !Subtarget->is64Bit()) {
25349 // On a 32-bit target, we cannot bitcast the 64-bit float to a
25350 // 64-bit integer, since that's not a legal type. Since
25351 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
25352 // bits, but can do this little dance to extract the lowest 32 bits
25353 // and work with those going forward.
25354 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
25356 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
25357 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
25358 Vector32, DAG.getIntPtrConstant(0, DL));
25362 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
25363 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
25364 DAG.getConstant(1, DL, IntVT));
25365 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
25367 return OneBitOfTruth;
25375 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
25376 /// so it can be folded inside ANDNP.
25377 static bool CanFoldXORWithAllOnes(const SDNode *N) {
25378 EVT VT = N->getValueType(0);
25380 // Match direct AllOnes for 128 and 256-bit vectors
25381 if (ISD::isBuildVectorAllOnes(N))
25384 // Look through a bit convert.
25385 if (N->getOpcode() == ISD::BITCAST)
25386 N = N->getOperand(0).getNode();
25388 // Sometimes the operand may come from a insert_subvector building a 256-bit
25390 if (VT.is256BitVector() &&
25391 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
25392 SDValue V1 = N->getOperand(0);
25393 SDValue V2 = N->getOperand(1);
25395 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
25396 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
25397 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
25398 ISD::isBuildVectorAllOnes(V2.getNode()))
25405 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
25406 // register. In most cases we actually compare or select YMM-sized registers
25407 // and mixing the two types creates horrible code. This method optimizes
25408 // some of the transition sequences.
25409 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
25410 TargetLowering::DAGCombinerInfo &DCI,
25411 const X86Subtarget *Subtarget) {
25412 EVT VT = N->getValueType(0);
25413 if (!VT.is256BitVector())
25416 assert((N->getOpcode() == ISD::ANY_EXTEND ||
25417 N->getOpcode() == ISD::ZERO_EXTEND ||
25418 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
25420 SDValue Narrow = N->getOperand(0);
25421 EVT NarrowVT = Narrow->getValueType(0);
25422 if (!NarrowVT.is128BitVector())
25425 if (Narrow->getOpcode() != ISD::XOR &&
25426 Narrow->getOpcode() != ISD::AND &&
25427 Narrow->getOpcode() != ISD::OR)
25430 SDValue N0 = Narrow->getOperand(0);
25431 SDValue N1 = Narrow->getOperand(1);
25434 // The Left side has to be a trunc.
25435 if (N0.getOpcode() != ISD::TRUNCATE)
25438 // The type of the truncated inputs.
25439 EVT WideVT = N0->getOperand(0)->getValueType(0);
25443 // The right side has to be a 'trunc' or a constant vector.
25444 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
25445 ConstantSDNode *RHSConstSplat = nullptr;
25446 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
25447 RHSConstSplat = RHSBV->getConstantSplatNode();
25448 if (!RHSTrunc && !RHSConstSplat)
25451 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25453 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
25456 // Set N0 and N1 to hold the inputs to the new wide operation.
25457 N0 = N0->getOperand(0);
25458 if (RHSConstSplat) {
25459 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getVectorElementType(),
25460 SDValue(RHSConstSplat, 0));
25461 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
25462 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
25463 } else if (RHSTrunc) {
25464 N1 = N1->getOperand(0);
25467 // Generate the wide operation.
25468 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
25469 unsigned Opcode = N->getOpcode();
25471 case ISD::ANY_EXTEND:
25473 case ISD::ZERO_EXTEND: {
25474 unsigned InBits = NarrowVT.getScalarSizeInBits();
25475 APInt Mask = APInt::getAllOnesValue(InBits);
25476 Mask = Mask.zext(VT.getScalarSizeInBits());
25477 return DAG.getNode(ISD::AND, DL, VT,
25478 Op, DAG.getConstant(Mask, DL, VT));
25480 case ISD::SIGN_EXTEND:
25481 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
25482 Op, DAG.getValueType(NarrowVT));
25484 llvm_unreachable("Unexpected opcode");
25488 static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
25489 TargetLowering::DAGCombinerInfo &DCI,
25490 const X86Subtarget *Subtarget) {
25491 SDValue N0 = N->getOperand(0);
25492 SDValue N1 = N->getOperand(1);
25495 // A vector zext_in_reg may be represented as a shuffle,
25496 // feeding into a bitcast (this represents anyext) feeding into
25497 // an and with a mask.
25498 // We'd like to try to combine that into a shuffle with zero
25499 // plus a bitcast, removing the and.
25500 if (N0.getOpcode() != ISD::BITCAST ||
25501 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
25504 // The other side of the AND should be a splat of 2^C, where C
25505 // is the number of bits in the source type.
25506 if (N1.getOpcode() == ISD::BITCAST)
25507 N1 = N1.getOperand(0);
25508 if (N1.getOpcode() != ISD::BUILD_VECTOR)
25510 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
25512 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
25513 EVT SrcType = Shuffle->getValueType(0);
25515 // We expect a single-source shuffle
25516 if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
25519 unsigned SrcSize = SrcType.getScalarSizeInBits();
25521 APInt SplatValue, SplatUndef;
25522 unsigned SplatBitSize;
25524 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
25525 SplatBitSize, HasAnyUndefs))
25528 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
25529 // Make sure the splat matches the mask we expect
25530 if (SplatBitSize > ResSize ||
25531 (SplatValue + 1).exactLogBase2() != (int)SrcSize)
25534 // Make sure the input and output size make sense
25535 if (SrcSize >= ResSize || ResSize % SrcSize)
25538 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
25539 // The number of u's between each two values depends on the ratio between
25540 // the source and dest type.
25541 unsigned ZextRatio = ResSize / SrcSize;
25542 bool IsZext = true;
25543 for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
25544 if (i % ZextRatio) {
25545 if (Shuffle->getMaskElt(i) > 0) {
25551 if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
25552 // Expected element number
25562 // Ok, perform the transformation - replace the shuffle with
25563 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
25564 // (instead of undef) where the k elements come from the zero vector.
25565 SmallVector<int, 8> Mask;
25566 unsigned NumElems = SrcType.getVectorNumElements();
25567 for (unsigned i = 0; i < NumElems; ++i)
25569 Mask.push_back(NumElems);
25571 Mask.push_back(i / ZextRatio);
25573 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
25574 Shuffle->getOperand(0), DAG.getConstant(0, DL, SrcType), Mask);
25575 return DAG.getBitcast(N0.getValueType(), NewShuffle);
25578 /// If both input operands of a logic op are being cast from floating point
25579 /// types, try to convert this into a floating point logic node to avoid
25580 /// unnecessary moves from SSE to integer registers.
25581 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
25582 const X86Subtarget *Subtarget) {
25583 unsigned FPOpcode = ISD::DELETED_NODE;
25584 if (N->getOpcode() == ISD::AND)
25585 FPOpcode = X86ISD::FAND;
25586 else if (N->getOpcode() == ISD::OR)
25587 FPOpcode = X86ISD::FOR;
25588 else if (N->getOpcode() == ISD::XOR)
25589 FPOpcode = X86ISD::FXOR;
25591 assert(FPOpcode != ISD::DELETED_NODE &&
25592 "Unexpected input node for FP logic conversion");
25594 EVT VT = N->getValueType(0);
25595 SDValue N0 = N->getOperand(0);
25596 SDValue N1 = N->getOperand(1);
25598 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
25599 ((Subtarget->hasSSE1() && VT == MVT::i32) ||
25600 (Subtarget->hasSSE2() && VT == MVT::i64))) {
25601 SDValue N00 = N0.getOperand(0);
25602 SDValue N10 = N1.getOperand(0);
25603 EVT N00Type = N00.getValueType();
25604 EVT N10Type = N10.getValueType();
25605 if (N00Type.isFloatingPoint() && N10Type.isFloatingPoint()) {
25606 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
25607 return DAG.getBitcast(VT, FPLogic);
25613 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
25614 TargetLowering::DAGCombinerInfo &DCI,
25615 const X86Subtarget *Subtarget) {
25616 if (DCI.isBeforeLegalizeOps())
25619 if (SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget))
25622 if (SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget))
25625 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
25628 EVT VT = N->getValueType(0);
25629 SDValue N0 = N->getOperand(0);
25630 SDValue N1 = N->getOperand(1);
25633 // Create BEXTR instructions
25634 // BEXTR is ((X >> imm) & (2**size-1))
25635 if (VT == MVT::i32 || VT == MVT::i64) {
25636 // Check for BEXTR.
25637 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
25638 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
25639 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
25640 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25641 if (MaskNode && ShiftNode) {
25642 uint64_t Mask = MaskNode->getZExtValue();
25643 uint64_t Shift = ShiftNode->getZExtValue();
25644 if (isMask_64(Mask)) {
25645 uint64_t MaskSize = countPopulation(Mask);
25646 if (Shift + MaskSize <= VT.getSizeInBits())
25647 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
25648 DAG.getConstant(Shift | (MaskSize << 8), DL,
25657 // Want to form ANDNP nodes:
25658 // 1) In the hopes of then easily combining them with OR and AND nodes
25659 // to form PBLEND/PSIGN.
25660 // 2) To match ANDN packed intrinsics
25661 if (VT != MVT::v2i64 && VT != MVT::v4i64)
25664 // Check LHS for vnot
25665 if (N0.getOpcode() == ISD::XOR &&
25666 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
25667 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
25668 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
25670 // Check RHS for vnot
25671 if (N1.getOpcode() == ISD::XOR &&
25672 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
25673 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
25674 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
25679 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
25680 TargetLowering::DAGCombinerInfo &DCI,
25681 const X86Subtarget *Subtarget) {
25682 if (DCI.isBeforeLegalizeOps())
25685 if (SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget))
25688 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
25691 SDValue N0 = N->getOperand(0);
25692 SDValue N1 = N->getOperand(1);
25693 EVT VT = N->getValueType(0);
25695 // look for psign/blend
25696 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
25697 if (!Subtarget->hasSSSE3() ||
25698 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
25701 // Canonicalize pandn to RHS
25702 if (N0.getOpcode() == X86ISD::ANDNP)
25704 // or (and (m, y), (pandn m, x))
25705 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
25706 SDValue Mask = N1.getOperand(0);
25707 SDValue X = N1.getOperand(1);
25709 if (N0.getOperand(0) == Mask)
25710 Y = N0.getOperand(1);
25711 if (N0.getOperand(1) == Mask)
25712 Y = N0.getOperand(0);
25714 // Check to see if the mask appeared in both the AND and ANDNP and
25718 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
25719 // Look through mask bitcast.
25720 if (Mask.getOpcode() == ISD::BITCAST)
25721 Mask = Mask.getOperand(0);
25722 if (X.getOpcode() == ISD::BITCAST)
25723 X = X.getOperand(0);
25724 if (Y.getOpcode() == ISD::BITCAST)
25725 Y = Y.getOperand(0);
25727 EVT MaskVT = Mask.getValueType();
25729 // Validate that the Mask operand is a vector sra node.
25730 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
25731 // there is no psrai.b
25732 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
25733 unsigned SraAmt = ~0;
25734 if (Mask.getOpcode() == ISD::SRA) {
25735 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
25736 if (auto *AmtConst = AmtBV->getConstantSplatNode())
25737 SraAmt = AmtConst->getZExtValue();
25738 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
25739 SDValue SraC = Mask.getOperand(1);
25740 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
25742 if ((SraAmt + 1) != EltBits)
25747 // Now we know we at least have a plendvb with the mask val. See if
25748 // we can form a psignb/w/d.
25749 // psign = x.type == y.type == mask.type && y = sub(0, x);
25750 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
25751 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
25752 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
25753 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
25754 "Unsupported VT for PSIGN");
25755 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
25756 return DAG.getBitcast(VT, Mask);
25758 // PBLENDVB only available on SSE 4.1
25759 if (!Subtarget->hasSSE41())
25762 MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
25764 X = DAG.getBitcast(BlendVT, X);
25765 Y = DAG.getBitcast(BlendVT, Y);
25766 Mask = DAG.getBitcast(BlendVT, Mask);
25767 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
25768 return DAG.getBitcast(VT, Mask);
25772 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
25775 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
25776 bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
25778 // SHLD/SHRD instructions have lower register pressure, but on some
25779 // platforms they have higher latency than the equivalent
25780 // series of shifts/or that would otherwise be generated.
25781 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
25782 // have higher latencies and we are not optimizing for size.
25783 if (!OptForSize && Subtarget->isSHLDSlow())
25786 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
25788 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
25790 if (!N0.hasOneUse() || !N1.hasOneUse())
25793 SDValue ShAmt0 = N0.getOperand(1);
25794 if (ShAmt0.getValueType() != MVT::i8)
25796 SDValue ShAmt1 = N1.getOperand(1);
25797 if (ShAmt1.getValueType() != MVT::i8)
25799 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
25800 ShAmt0 = ShAmt0.getOperand(0);
25801 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
25802 ShAmt1 = ShAmt1.getOperand(0);
25805 unsigned Opc = X86ISD::SHLD;
25806 SDValue Op0 = N0.getOperand(0);
25807 SDValue Op1 = N1.getOperand(0);
25808 if (ShAmt0.getOpcode() == ISD::SUB) {
25809 Opc = X86ISD::SHRD;
25810 std::swap(Op0, Op1);
25811 std::swap(ShAmt0, ShAmt1);
25814 unsigned Bits = VT.getSizeInBits();
25815 if (ShAmt1.getOpcode() == ISD::SUB) {
25816 SDValue Sum = ShAmt1.getOperand(0);
25817 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
25818 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
25819 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
25820 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
25821 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
25822 return DAG.getNode(Opc, DL, VT,
25824 DAG.getNode(ISD::TRUNCATE, DL,
25827 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25828 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25830 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25831 return DAG.getNode(Opc, DL, VT,
25832 N0.getOperand(0), N1.getOperand(0),
25833 DAG.getNode(ISD::TRUNCATE, DL,
25840 // Generate NEG and CMOV for integer abs.
25841 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25842 EVT VT = N->getValueType(0);
25844 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25845 // 8-bit integer abs to NEG and CMOV.
25846 if (VT.isInteger() && VT.getSizeInBits() == 8)
25849 SDValue N0 = N->getOperand(0);
25850 SDValue N1 = N->getOperand(1);
25853 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25854 // and change it to SUB and CMOV.
25855 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25856 N0.getOpcode() == ISD::ADD &&
25857 N0.getOperand(1) == N1 &&
25858 N1.getOpcode() == ISD::SRA &&
25859 N1.getOperand(0) == N0.getOperand(0))
25860 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25861 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25862 // Generate SUB & CMOV.
25863 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25864 DAG.getConstant(0, DL, VT), N0.getOperand(0));
25866 SDValue Ops[] = { N0.getOperand(0), Neg,
25867 DAG.getConstant(X86::COND_GE, DL, MVT::i8),
25868 SDValue(Neg.getNode(), 1) };
25869 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25874 // Try to turn tests against the signbit in the form of:
25875 // XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
25878 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
25879 // This is only worth doing if the output type is i8.
25880 if (N->getValueType(0) != MVT::i8)
25883 SDValue N0 = N->getOperand(0);
25884 SDValue N1 = N->getOperand(1);
25886 // We should be performing an xor against a truncated shift.
25887 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
25890 // Make sure we are performing an xor against one.
25891 if (!isOneConstant(N1))
25894 // SetCC on x86 zero extends so only act on this if it's a logical shift.
25895 SDValue Shift = N0.getOperand(0);
25896 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
25899 // Make sure we are truncating from one of i16, i32 or i64.
25900 EVT ShiftTy = Shift.getValueType();
25901 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
25904 // Make sure the shift amount extracts the sign bit.
25905 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
25906 Shift.getConstantOperandVal(1) != ShiftTy.getSizeInBits() - 1)
25909 // Create a greater-than comparison against -1.
25910 // N.B. Using SETGE against 0 works but we want a canonical looking
25911 // comparison, using SETGT matches up with what TranslateX86CC.
25913 SDValue ShiftOp = Shift.getOperand(0);
25914 EVT ShiftOpTy = ShiftOp.getValueType();
25915 SDValue Cond = DAG.getSetCC(DL, MVT::i8, ShiftOp,
25916 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
25920 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25921 TargetLowering::DAGCombinerInfo &DCI,
25922 const X86Subtarget *Subtarget) {
25923 if (DCI.isBeforeLegalizeOps())
25926 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
25929 if (Subtarget->hasCMov())
25930 if (SDValue RV = performIntegerAbsCombine(N, DAG))
25933 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
25939 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
25940 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
25941 /// X86ISD::AVG instruction.
25942 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
25943 const X86Subtarget *Subtarget, SDLoc DL) {
25944 if (!VT.isVector() || !VT.isSimple())
25946 EVT InVT = In.getValueType();
25947 unsigned NumElems = VT.getVectorNumElements();
25949 EVT ScalarVT = VT.getVectorElementType();
25950 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
25951 isPowerOf2_32(NumElems)))
25954 // InScalarVT is the intermediate type in AVG pattern and it should be greater
25955 // than the original input type (i8/i16).
25956 EVT InScalarVT = InVT.getVectorElementType();
25957 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
25960 if (Subtarget->hasAVX512()) {
25961 if (VT.getSizeInBits() > 512)
25963 } else if (Subtarget->hasAVX2()) {
25964 if (VT.getSizeInBits() > 256)
25967 if (VT.getSizeInBits() > 128)
25971 // Detect the following pattern:
25973 // %1 = zext <N x i8> %a to <N x i32>
25974 // %2 = zext <N x i8> %b to <N x i32>
25975 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
25976 // %4 = add nuw nsw <N x i32> %3, %2
25977 // %5 = lshr <N x i32> %N, <i32 1 x N>
25978 // %6 = trunc <N x i32> %5 to <N x i8>
25980 // In AVX512, the last instruction can also be a trunc store.
25982 if (In.getOpcode() != ISD::SRL)
25985 // A lambda checking the given SDValue is a constant vector and each element
25986 // is in the range [Min, Max].
25987 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
25988 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
25989 if (!BV || !BV->isConstant())
25991 for (unsigned i = 0, e = V.getNumOperands(); i < e; i++) {
25992 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(i));
25995 uint64_t Val = C->getZExtValue();
25996 if (Val < Min || Val > Max)
26002 // Check if each element of the vector is left-shifted by one.
26003 auto LHS = In.getOperand(0);
26004 auto RHS = In.getOperand(1);
26005 if (!IsConstVectorInRange(RHS, 1, 1))
26007 if (LHS.getOpcode() != ISD::ADD)
26010 // Detect a pattern of a + b + 1 where the order doesn't matter.
26011 SDValue Operands[3];
26012 Operands[0] = LHS.getOperand(0);
26013 Operands[1] = LHS.getOperand(1);
26015 // Take care of the case when one of the operands is a constant vector whose
26016 // element is in the range [1, 256].
26017 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
26018 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
26019 Operands[0].getOperand(0).getValueType() == VT) {
26020 // The pattern is detected. Subtract one from the constant vector, then
26021 // demote it and emit X86ISD::AVG instruction.
26022 SDValue One = DAG.getConstant(1, DL, InScalarVT);
26023 SDValue Ones = DAG.getNode(ISD::BUILD_VECTOR, DL, InVT,
26024 SmallVector<SDValue, 8>(NumElems, One));
26025 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], Ones);
26026 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
26027 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
26031 if (Operands[0].getOpcode() == ISD::ADD)
26032 std::swap(Operands[0], Operands[1]);
26033 else if (Operands[1].getOpcode() != ISD::ADD)
26035 Operands[2] = Operands[1].getOperand(0);
26036 Operands[1] = Operands[1].getOperand(1);
26038 // Now we have three operands of two additions. Check that one of them is a
26039 // constant vector with ones, and the other two are promoted from i8/i16.
26040 for (int i = 0; i < 3; ++i) {
26041 if (!IsConstVectorInRange(Operands[i], 1, 1))
26043 std::swap(Operands[i], Operands[2]);
26045 // Check if Operands[0] and Operands[1] are results of type promotion.
26046 for (int j = 0; j < 2; ++j)
26047 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
26048 Operands[j].getOperand(0).getValueType() != VT)
26051 // The pattern is detected, emit X86ISD::AVG instruction.
26052 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
26053 Operands[1].getOperand(0));
26059 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
26060 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
26061 TargetLowering::DAGCombinerInfo &DCI,
26062 const X86Subtarget *Subtarget) {
26063 LoadSDNode *Ld = cast<LoadSDNode>(N);
26064 EVT RegVT = Ld->getValueType(0);
26065 EVT MemVT = Ld->getMemoryVT();
26067 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26069 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
26070 // into two 16-byte operations.
26071 ISD::LoadExtType Ext = Ld->getExtensionType();
26073 unsigned AddressSpace = Ld->getAddressSpace();
26074 unsigned Alignment = Ld->getAlignment();
26075 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
26076 Ext == ISD::NON_EXTLOAD &&
26077 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
26078 AddressSpace, Alignment, &Fast) && !Fast) {
26079 unsigned NumElems = RegVT.getVectorNumElements();
26083 SDValue Ptr = Ld->getBasePtr();
26084 SDValue Increment =
26085 DAG.getConstant(16, dl, TLI.getPointerTy(DAG.getDataLayout()));
26087 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
26089 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
26090 Ld->getPointerInfo(), Ld->isVolatile(),
26091 Ld->isNonTemporal(), Ld->isInvariant(),
26093 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
26094 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
26095 Ld->getPointerInfo(), Ld->isVolatile(),
26096 Ld->isNonTemporal(), Ld->isInvariant(),
26097 std::min(16U, Alignment));
26098 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
26100 Load2.getValue(1));
26102 SDValue NewVec = DAG.getUNDEF(RegVT);
26103 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
26104 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
26105 return DCI.CombineTo(N, NewVec, TF, true);
26111 /// PerformMLOADCombine - Resolve extending loads
26112 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
26113 TargetLowering::DAGCombinerInfo &DCI,
26114 const X86Subtarget *Subtarget) {
26115 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
26116 if (Mld->getExtensionType() != ISD::SEXTLOAD)
26119 EVT VT = Mld->getValueType(0);
26120 unsigned NumElems = VT.getVectorNumElements();
26121 EVT LdVT = Mld->getMemoryVT();
26124 assert(LdVT != VT && "Cannot extend to the same type");
26125 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
26126 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
26127 // From, To sizes and ElemCount must be pow of two
26128 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
26129 "Unexpected size for extending masked load");
26131 unsigned SizeRatio = ToSz / FromSz;
26132 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
26134 // Create a type on which we perform the shuffle
26135 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
26136 LdVT.getScalarType(), NumElems*SizeRatio);
26137 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
26139 // Convert Src0 value
26140 SDValue WideSrc0 = DAG.getBitcast(WideVecVT, Mld->getSrc0());
26141 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
26142 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
26143 for (unsigned i = 0; i != NumElems; ++i)
26144 ShuffleVec[i] = i * SizeRatio;
26146 // Can't shuffle using an illegal type.
26147 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
26148 "WideVecVT should be legal");
26149 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
26150 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
26152 // Prepare the new mask
26154 SDValue Mask = Mld->getMask();
26155 if (Mask.getValueType() == VT) {
26156 // Mask and original value have the same type
26157 NewMask = DAG.getBitcast(WideVecVT, Mask);
26158 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
26159 for (unsigned i = 0; i != NumElems; ++i)
26160 ShuffleVec[i] = i * SizeRatio;
26161 for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i)
26162 ShuffleVec[i] = NumElems * SizeRatio;
26163 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
26164 DAG.getConstant(0, dl, WideVecVT),
26168 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
26169 unsigned WidenNumElts = NumElems*SizeRatio;
26170 unsigned MaskNumElts = VT.getVectorNumElements();
26171 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
26174 unsigned NumConcat = WidenNumElts / MaskNumElts;
26175 SmallVector<SDValue, 16> Ops(NumConcat);
26176 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
26178 for (unsigned i = 1; i != NumConcat; ++i)
26181 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
26184 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
26185 Mld->getBasePtr(), NewMask, WideSrc0,
26186 Mld->getMemoryVT(), Mld->getMemOperand(),
26188 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
26189 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
26191 /// PerformMSTORECombine - Resolve truncating stores
26192 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
26193 const X86Subtarget *Subtarget) {
26194 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
26195 if (!Mst->isTruncatingStore())
26198 EVT VT = Mst->getValue().getValueType();
26199 unsigned NumElems = VT.getVectorNumElements();
26200 EVT StVT = Mst->getMemoryVT();
26203 assert(StVT != VT && "Cannot truncate to the same type");
26204 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
26205 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
26207 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26209 // The truncating store is legal in some cases. For example
26210 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
26211 // are designated for truncate store.
26212 // In this case we don't need any further transformations.
26213 if (TLI.isTruncStoreLegal(VT, StVT))
26216 // From, To sizes and ElemCount must be pow of two
26217 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
26218 "Unexpected size for truncating masked store");
26219 // We are going to use the original vector elt for storing.
26220 // Accumulated smaller vector elements must be a multiple of the store size.
26221 assert (((NumElems * FromSz) % ToSz) == 0 &&
26222 "Unexpected ratio for truncating masked store");
26224 unsigned SizeRatio = FromSz / ToSz;
26225 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
26227 // Create a type on which we perform the shuffle
26228 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
26229 StVT.getScalarType(), NumElems*SizeRatio);
26231 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
26233 SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue());
26234 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
26235 for (unsigned i = 0; i != NumElems; ++i)
26236 ShuffleVec[i] = i * SizeRatio;
26238 // Can't shuffle using an illegal type.
26239 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
26240 "WideVecVT should be legal");
26242 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
26243 DAG.getUNDEF(WideVecVT),
26247 SDValue Mask = Mst->getMask();
26248 if (Mask.getValueType() == VT) {
26249 // Mask and original value have the same type
26250 NewMask = DAG.getBitcast(WideVecVT, Mask);
26251 for (unsigned i = 0; i != NumElems; ++i)
26252 ShuffleVec[i] = i * SizeRatio;
26253 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
26254 ShuffleVec[i] = NumElems*SizeRatio;
26255 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
26256 DAG.getConstant(0, dl, WideVecVT),
26260 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
26261 unsigned WidenNumElts = NumElems*SizeRatio;
26262 unsigned MaskNumElts = VT.getVectorNumElements();
26263 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
26266 unsigned NumConcat = WidenNumElts / MaskNumElts;
26267 SmallVector<SDValue, 16> Ops(NumConcat);
26268 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
26270 for (unsigned i = 1; i != NumConcat; ++i)
26273 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
26276 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal,
26277 Mst->getBasePtr(), NewMask, StVT,
26278 Mst->getMemOperand(), false);
26280 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
26281 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
26282 const X86Subtarget *Subtarget) {
26283 StoreSDNode *St = cast<StoreSDNode>(N);
26284 EVT VT = St->getValue().getValueType();
26285 EVT StVT = St->getMemoryVT();
26287 SDValue StoredVal = St->getOperand(1);
26288 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26290 // If we are saving a concatenation of two XMM registers and 32-byte stores
26291 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
26293 unsigned AddressSpace = St->getAddressSpace();
26294 unsigned Alignment = St->getAlignment();
26295 if (VT.is256BitVector() && StVT == VT &&
26296 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
26297 AddressSpace, Alignment, &Fast) && !Fast) {
26298 unsigned NumElems = VT.getVectorNumElements();
26302 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
26303 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
26306 DAG.getConstant(16, dl, TLI.getPointerTy(DAG.getDataLayout()));
26307 SDValue Ptr0 = St->getBasePtr();
26308 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
26310 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
26311 St->getPointerInfo(), St->isVolatile(),
26312 St->isNonTemporal(), Alignment);
26313 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
26314 St->getPointerInfo(), St->isVolatile(),
26315 St->isNonTemporal(),
26316 std::min(16U, Alignment));
26317 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
26320 // Optimize trunc store (of multiple scalars) to shuffle and store.
26321 // First, pack all of the elements in one place. Next, store to memory
26322 // in fewer chunks.
26323 if (St->isTruncatingStore() && VT.isVector()) {
26324 // Check if we can detect an AVG pattern from the truncation. If yes,
26325 // replace the trunc store by a normal store with the result of X86ISD::AVG
26328 detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG, Subtarget, dl);
26330 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
26331 St->getPointerInfo(), St->isVolatile(),
26332 St->isNonTemporal(), St->getAlignment());
26334 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26335 unsigned NumElems = VT.getVectorNumElements();
26336 assert(StVT != VT && "Cannot truncate to the same type");
26337 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
26338 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
26340 // The truncating store is legal in some cases. For example
26341 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
26342 // are designated for truncate store.
26343 // In this case we don't need any further transformations.
26344 if (TLI.isTruncStoreLegal(VT, StVT))
26347 // From, To sizes and ElemCount must be pow of two
26348 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
26349 // We are going to use the original vector elt for storing.
26350 // Accumulated smaller vector elements must be a multiple of the store size.
26351 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
26353 unsigned SizeRatio = FromSz / ToSz;
26355 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
26357 // Create a type on which we perform the shuffle
26358 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
26359 StVT.getScalarType(), NumElems*SizeRatio);
26361 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
26363 SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue());
26364 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
26365 for (unsigned i = 0; i != NumElems; ++i)
26366 ShuffleVec[i] = i * SizeRatio;
26368 // Can't shuffle using an illegal type.
26369 if (!TLI.isTypeLegal(WideVecVT))
26372 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
26373 DAG.getUNDEF(WideVecVT),
26375 // At this point all of the data is stored at the bottom of the
26376 // register. We now need to save it to mem.
26378 // Find the largest store unit
26379 MVT StoreType = MVT::i8;
26380 for (MVT Tp : MVT::integer_valuetypes()) {
26381 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
26385 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
26386 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
26387 (64 <= NumElems * ToSz))
26388 StoreType = MVT::f64;
26390 // Bitcast the original vector into a vector of store-size units
26391 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
26392 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
26393 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
26394 SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
26395 SmallVector<SDValue, 8> Chains;
26396 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, dl,
26397 TLI.getPointerTy(DAG.getDataLayout()));
26398 SDValue Ptr = St->getBasePtr();
26400 // Perform one or more big stores into memory.
26401 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
26402 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
26403 StoreType, ShuffWide,
26404 DAG.getIntPtrConstant(i, dl));
26405 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
26406 St->getPointerInfo(), St->isVolatile(),
26407 St->isNonTemporal(), St->getAlignment());
26408 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
26409 Chains.push_back(Ch);
26412 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
26415 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
26416 // the FP state in cases where an emms may be missing.
26417 // A preferable solution to the general problem is to figure out the right
26418 // places to insert EMMS. This qualifies as a quick hack.
26420 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
26421 if (VT.getSizeInBits() != 64)
26424 const Function *F = DAG.getMachineFunction().getFunction();
26425 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
26427 !Subtarget->useSoftFloat() && !NoImplicitFloatOps && Subtarget->hasSSE2();
26428 if ((VT.isVector() ||
26429 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
26430 isa<LoadSDNode>(St->getValue()) &&
26431 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
26432 St->getChain().hasOneUse() && !St->isVolatile()) {
26433 SDNode* LdVal = St->getValue().getNode();
26434 LoadSDNode *Ld = nullptr;
26435 int TokenFactorIndex = -1;
26436 SmallVector<SDValue, 8> Ops;
26437 SDNode* ChainVal = St->getChain().getNode();
26438 // Must be a store of a load. We currently handle two cases: the load
26439 // is a direct child, and it's under an intervening TokenFactor. It is
26440 // possible to dig deeper under nested TokenFactors.
26441 if (ChainVal == LdVal)
26442 Ld = cast<LoadSDNode>(St->getChain());
26443 else if (St->getValue().hasOneUse() &&
26444 ChainVal->getOpcode() == ISD::TokenFactor) {
26445 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
26446 if (ChainVal->getOperand(i).getNode() == LdVal) {
26447 TokenFactorIndex = i;
26448 Ld = cast<LoadSDNode>(St->getValue());
26450 Ops.push_back(ChainVal->getOperand(i));
26454 if (!Ld || !ISD::isNormalLoad(Ld))
26457 // If this is not the MMX case, i.e. we are just turning i64 load/store
26458 // into f64 load/store, avoid the transformation if there are multiple
26459 // uses of the loaded value.
26460 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
26465 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
26466 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
26468 if (Subtarget->is64Bit() || F64IsLegal) {
26469 MVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
26470 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
26471 Ld->getPointerInfo(), Ld->isVolatile(),
26472 Ld->isNonTemporal(), Ld->isInvariant(),
26473 Ld->getAlignment());
26474 SDValue NewChain = NewLd.getValue(1);
26475 if (TokenFactorIndex != -1) {
26476 Ops.push_back(NewChain);
26477 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
26479 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
26480 St->getPointerInfo(),
26481 St->isVolatile(), St->isNonTemporal(),
26482 St->getAlignment());
26485 // Otherwise, lower to two pairs of 32-bit loads / stores.
26486 SDValue LoAddr = Ld->getBasePtr();
26487 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
26488 DAG.getConstant(4, LdDL, MVT::i32));
26490 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
26491 Ld->getPointerInfo(),
26492 Ld->isVolatile(), Ld->isNonTemporal(),
26493 Ld->isInvariant(), Ld->getAlignment());
26494 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
26495 Ld->getPointerInfo().getWithOffset(4),
26496 Ld->isVolatile(), Ld->isNonTemporal(),
26498 MinAlign(Ld->getAlignment(), 4));
26500 SDValue NewChain = LoLd.getValue(1);
26501 if (TokenFactorIndex != -1) {
26502 Ops.push_back(LoLd);
26503 Ops.push_back(HiLd);
26504 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
26507 LoAddr = St->getBasePtr();
26508 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
26509 DAG.getConstant(4, StDL, MVT::i32));
26511 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
26512 St->getPointerInfo(),
26513 St->isVolatile(), St->isNonTemporal(),
26514 St->getAlignment());
26515 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
26516 St->getPointerInfo().getWithOffset(4),
26518 St->isNonTemporal(),
26519 MinAlign(St->getAlignment(), 4));
26520 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
26523 // This is similar to the above case, but here we handle a scalar 64-bit
26524 // integer store that is extracted from a vector on a 32-bit target.
26525 // If we have SSE2, then we can treat it like a floating-point double
26526 // to get past legalization. The execution dependencies fixup pass will
26527 // choose the optimal machine instruction for the store if this really is
26528 // an integer or v2f32 rather than an f64.
26529 if (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit() &&
26530 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
26531 SDValue OldExtract = St->getOperand(1);
26532 SDValue ExtOp0 = OldExtract.getOperand(0);
26533 unsigned VecSize = ExtOp0.getValueSizeInBits();
26534 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
26535 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
26536 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
26537 BitCast, OldExtract.getOperand(1));
26538 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
26539 St->getPointerInfo(), St->isVolatile(),
26540 St->isNonTemporal(), St->getAlignment());
26546 /// Return 'true' if this vector operation is "horizontal"
26547 /// and return the operands for the horizontal operation in LHS and RHS. A
26548 /// horizontal operation performs the binary operation on successive elements
26549 /// of its first operand, then on successive elements of its second operand,
26550 /// returning the resulting values in a vector. For example, if
26551 /// A = < float a0, float a1, float a2, float a3 >
26553 /// B = < float b0, float b1, float b2, float b3 >
26554 /// then the result of doing a horizontal operation on A and B is
26555 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
26556 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
26557 /// A horizontal-op B, for some already available A and B, and if so then LHS is
26558 /// set to A, RHS to B, and the routine returns 'true'.
26559 /// Note that the binary operation should have the property that if one of the
26560 /// operands is UNDEF then the result is UNDEF.
26561 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
26562 // Look for the following pattern: if
26563 // A = < float a0, float a1, float a2, float a3 >
26564 // B = < float b0, float b1, float b2, float b3 >
26566 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
26567 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
26568 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
26569 // which is A horizontal-op B.
26571 // At least one of the operands should be a vector shuffle.
26572 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
26573 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
26576 MVT VT = LHS.getSimpleValueType();
26578 assert((VT.is128BitVector() || VT.is256BitVector()) &&
26579 "Unsupported vector type for horizontal add/sub");
26581 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
26582 // operate independently on 128-bit lanes.
26583 unsigned NumElts = VT.getVectorNumElements();
26584 unsigned NumLanes = VT.getSizeInBits()/128;
26585 unsigned NumLaneElts = NumElts / NumLanes;
26586 assert((NumLaneElts % 2 == 0) &&
26587 "Vector type should have an even number of elements in each lane");
26588 unsigned HalfLaneElts = NumLaneElts/2;
26590 // View LHS in the form
26591 // LHS = VECTOR_SHUFFLE A, B, LMask
26592 // If LHS is not a shuffle then pretend it is the shuffle
26593 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
26594 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
26597 SmallVector<int, 16> LMask(NumElts);
26598 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
26599 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
26600 A = LHS.getOperand(0);
26601 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
26602 B = LHS.getOperand(1);
26603 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
26604 std::copy(Mask.begin(), Mask.end(), LMask.begin());
26606 if (LHS.getOpcode() != ISD::UNDEF)
26608 for (unsigned i = 0; i != NumElts; ++i)
26612 // Likewise, view RHS in the form
26613 // RHS = VECTOR_SHUFFLE C, D, RMask
26615 SmallVector<int, 16> RMask(NumElts);
26616 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
26617 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
26618 C = RHS.getOperand(0);
26619 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
26620 D = RHS.getOperand(1);
26621 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
26622 std::copy(Mask.begin(), Mask.end(), RMask.begin());
26624 if (RHS.getOpcode() != ISD::UNDEF)
26626 for (unsigned i = 0; i != NumElts; ++i)
26630 // Check that the shuffles are both shuffling the same vectors.
26631 if (!(A == C && B == D) && !(A == D && B == C))
26634 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
26635 if (!A.getNode() && !B.getNode())
26638 // If A and B occur in reverse order in RHS, then "swap" them (which means
26639 // rewriting the mask).
26641 ShuffleVectorSDNode::commuteMask(RMask);
26643 // At this point LHS and RHS are equivalent to
26644 // LHS = VECTOR_SHUFFLE A, B, LMask
26645 // RHS = VECTOR_SHUFFLE A, B, RMask
26646 // Check that the masks correspond to performing a horizontal operation.
26647 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
26648 for (unsigned i = 0; i != NumLaneElts; ++i) {
26649 int LIdx = LMask[i+l], RIdx = RMask[i+l];
26651 // Ignore any UNDEF components.
26652 if (LIdx < 0 || RIdx < 0 ||
26653 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
26654 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
26657 // Check that successive elements are being operated on. If not, this is
26658 // not a horizontal operation.
26659 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
26660 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
26661 if (!(LIdx == Index && RIdx == Index + 1) &&
26662 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
26667 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
26668 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
26672 /// Do target-specific dag combines on floating point adds.
26673 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
26674 const X86Subtarget *Subtarget) {
26675 EVT VT = N->getValueType(0);
26676 SDValue LHS = N->getOperand(0);
26677 SDValue RHS = N->getOperand(1);
26679 // Try to synthesize horizontal adds from adds of shuffles.
26680 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
26681 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
26682 isHorizontalBinOp(LHS, RHS, true))
26683 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
26687 /// Do target-specific dag combines on floating point subs.
26688 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
26689 const X86Subtarget *Subtarget) {
26690 EVT VT = N->getValueType(0);
26691 SDValue LHS = N->getOperand(0);
26692 SDValue RHS = N->getOperand(1);
26694 // Try to synthesize horizontal subs from subs of shuffles.
26695 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
26696 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
26697 isHorizontalBinOp(LHS, RHS, false))
26698 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
26702 /// Truncate a group of v4i32 into v16i8/v8i16 using X86ISD::PACKUS.
26704 combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG,
26705 SmallVector<SDValue, 8> &Regs) {
26706 assert(Regs.size() > 0 && (Regs[0].getValueType() == MVT::v4i32 ||
26707 Regs[0].getValueType() == MVT::v2i64));
26708 EVT OutVT = N->getValueType(0);
26709 EVT OutSVT = OutVT.getVectorElementType();
26710 EVT InVT = Regs[0].getValueType();
26711 EVT InSVT = InVT.getVectorElementType();
26714 // First, use mask to unset all bits that won't appear in the result.
26715 assert((OutSVT == MVT::i8 || OutSVT == MVT::i16) &&
26716 "OutSVT can only be either i8 or i16.");
26718 DAG.getConstant(OutSVT == MVT::i8 ? 0xFF : 0xFFFF, DL, InSVT);
26719 SDValue MaskVec = DAG.getNode(
26720 ISD::BUILD_VECTOR, DL, InVT,
26721 SmallVector<SDValue, 8>(InVT.getVectorNumElements(), MaskVal));
26722 for (auto &Reg : Regs)
26723 Reg = DAG.getNode(ISD::AND, DL, InVT, MaskVec, Reg);
26725 MVT UnpackedVT, PackedVT;
26726 if (OutSVT == MVT::i8) {
26727 UnpackedVT = MVT::v8i16;
26728 PackedVT = MVT::v16i8;
26730 UnpackedVT = MVT::v4i32;
26731 PackedVT = MVT::v8i16;
26734 // In each iteration, truncate the type by a half size.
26735 auto RegNum = Regs.size();
26736 for (unsigned j = 1, e = InSVT.getSizeInBits() / OutSVT.getSizeInBits();
26737 j < e; j *= 2, RegNum /= 2) {
26738 for (unsigned i = 0; i < RegNum; i++)
26739 Regs[i] = DAG.getNode(ISD::BITCAST, DL, UnpackedVT, Regs[i]);
26740 for (unsigned i = 0; i < RegNum / 2; i++)
26741 Regs[i] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[i * 2],
26745 // If the type of the result is v8i8, we need do one more X86ISD::PACKUS, and
26746 // then extract a subvector as the result since v8i8 is not a legal type.
26747 if (OutVT == MVT::v8i8) {
26748 Regs[0] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[0], Regs[0]);
26749 Regs[0] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, Regs[0],
26750 DAG.getIntPtrConstant(0, DL));
26752 } else if (RegNum > 1) {
26753 Regs.resize(RegNum);
26754 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Regs);
26759 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
26761 combineVectorTruncationWithPACKSS(SDNode *N, SelectionDAG &DAG,
26762 SmallVector<SDValue, 8> &Regs) {
26763 assert(Regs.size() > 0 && Regs[0].getValueType() == MVT::v4i32);
26764 EVT OutVT = N->getValueType(0);
26767 // Shift left by 16 bits, then arithmetic-shift right by 16 bits.
26768 SDValue ShAmt = DAG.getConstant(16, DL, MVT::i32);
26769 for (auto &Reg : Regs) {
26770 Reg = getTargetVShiftNode(X86ISD::VSHLI, DL, MVT::v4i32, Reg, ShAmt, DAG);
26771 Reg = getTargetVShiftNode(X86ISD::VSRAI, DL, MVT::v4i32, Reg, ShAmt, DAG);
26774 for (unsigned i = 0, e = Regs.size() / 2; i < e; i++)
26775 Regs[i] = DAG.getNode(X86ISD::PACKSS, DL, MVT::v8i16, Regs[i * 2],
26778 if (Regs.size() > 2) {
26779 Regs.resize(Regs.size() / 2);
26780 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Regs);
26785 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
26786 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
26787 /// legalization the truncation will be translated into a BUILD_VECTOR with each
26788 /// element that is extracted from a vector and then truncated, and it is
26789 /// diffcult to do this optimization based on them.
26790 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
26791 const X86Subtarget *Subtarget) {
26792 EVT OutVT = N->getValueType(0);
26793 if (!OutVT.isVector())
26796 SDValue In = N->getOperand(0);
26797 if (!In.getValueType().isSimple())
26800 EVT InVT = In.getValueType();
26801 unsigned NumElems = OutVT.getVectorNumElements();
26803 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
26804 // SSE2, and we need to take care of it specially.
26805 // AVX512 provides vpmovdb.
26806 if (!Subtarget->hasSSE2() || Subtarget->hasAVX2())
26809 EVT OutSVT = OutVT.getVectorElementType();
26810 EVT InSVT = InVT.getVectorElementType();
26811 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
26812 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
26816 // SSSE3's pshufb results in less instructions in the cases below.
26817 if (Subtarget->hasSSSE3() && NumElems == 8 &&
26818 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
26819 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
26824 // Split a long vector into vectors of legal type.
26825 unsigned RegNum = InVT.getSizeInBits() / 128;
26826 SmallVector<SDValue, 8> SubVec(RegNum);
26827 if (InSVT == MVT::i32) {
26828 for (unsigned i = 0; i < RegNum; i++)
26829 SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
26830 DAG.getIntPtrConstant(i * 4, DL));
26832 for (unsigned i = 0; i < RegNum; i++)
26833 SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
26834 DAG.getIntPtrConstant(i * 2, DL));
26837 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PAKCUS
26838 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
26839 // truncate 2 x v4i32 to v8i16.
26840 if (Subtarget->hasSSE41() || OutSVT == MVT::i8)
26841 return combineVectorTruncationWithPACKUS(N, DAG, SubVec);
26842 else if (InSVT == MVT::i32)
26843 return combineVectorTruncationWithPACKSS(N, DAG, SubVec);
26848 static SDValue PerformTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
26849 const X86Subtarget *Subtarget) {
26850 // Try to detect AVG pattern first.
26851 SDValue Avg = detectAVGPattern(N->getOperand(0), N->getValueType(0), DAG,
26852 Subtarget, SDLoc(N));
26856 return combineVectorTruncation(N, DAG, Subtarget);
26859 /// Do target-specific dag combines on floating point negations.
26860 static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG,
26861 const X86Subtarget *Subtarget) {
26862 EVT VT = N->getValueType(0);
26863 EVT SVT = VT.getScalarType();
26864 SDValue Arg = N->getOperand(0);
26867 // Let legalize expand this if it isn't a legal type yet.
26868 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
26871 // If we're negating a FMUL node on a target with FMA, then we can avoid the
26872 // use of a constant by performing (-0 - A*B) instead.
26873 // FIXME: Check rounding control flags as well once it becomes available.
26874 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
26875 Arg->getFlags()->hasNoSignedZeros() && Subtarget->hasAnyFMA()) {
26876 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
26877 return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
26878 Arg.getOperand(1), Zero);
26881 // If we're negating a FMA node, then we can adjust the
26882 // instruction to include the extra negation.
26883 if (Arg.hasOneUse()) {
26884 switch (Arg.getOpcode()) {
26885 case X86ISD::FMADD:
26886 return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
26887 Arg.getOperand(1), Arg.getOperand(2));
26888 case X86ISD::FMSUB:
26889 return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0),
26890 Arg.getOperand(1), Arg.getOperand(2));
26891 case X86ISD::FNMADD:
26892 return DAG.getNode(X86ISD::FMSUB, DL, VT, Arg.getOperand(0),
26893 Arg.getOperand(1), Arg.getOperand(2));
26894 case X86ISD::FNMSUB:
26895 return DAG.getNode(X86ISD::FMADD, DL, VT, Arg.getOperand(0),
26896 Arg.getOperand(1), Arg.getOperand(2));
26902 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
26903 const X86Subtarget *Subtarget) {
26904 EVT VT = N->getValueType(0);
26905 if (VT.is512BitVector() && !Subtarget->hasDQI()) {
26906 // VXORPS, VORPS, VANDPS, VANDNPS are supported only under DQ extention.
26907 // These logic operations may be executed in the integer domain.
26909 MVT IntScalar = MVT::getIntegerVT(VT.getScalarSizeInBits());
26910 MVT IntVT = MVT::getVectorVT(IntScalar, VT.getVectorNumElements());
26912 SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, IntVT, N->getOperand(0));
26913 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, IntVT, N->getOperand(1));
26914 unsigned IntOpcode = 0;
26915 switch (N->getOpcode()) {
26916 default: llvm_unreachable("Unexpected FP logic op");
26917 case X86ISD::FOR: IntOpcode = ISD::OR; break;
26918 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
26919 case X86ISD::FAND: IntOpcode = ISD::AND; break;
26920 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
26922 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
26923 return DAG.getNode(ISD::BITCAST, dl, VT, IntOp);
26927 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
26928 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG,
26929 const X86Subtarget *Subtarget) {
26930 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
26932 // F[X]OR(0.0, x) -> x
26933 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
26934 if (C->getValueAPF().isPosZero())
26935 return N->getOperand(1);
26937 // F[X]OR(x, 0.0) -> x
26938 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
26939 if (C->getValueAPF().isPosZero())
26940 return N->getOperand(0);
26942 return lowerX86FPLogicOp(N, DAG, Subtarget);
26945 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
26946 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
26947 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
26949 // Only perform optimizations if UnsafeMath is used.
26950 if (!DAG.getTarget().Options.UnsafeFPMath)
26953 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
26954 // into FMINC and FMAXC, which are Commutative operations.
26955 unsigned NewOp = 0;
26956 switch (N->getOpcode()) {
26957 default: llvm_unreachable("unknown opcode");
26958 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
26959 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
26962 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
26963 N->getOperand(0), N->getOperand(1));
26966 static SDValue performFMinNumFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
26967 const X86Subtarget *Subtarget) {
26968 if (Subtarget->useSoftFloat())
26971 // TODO: Check for global or instruction-level "nnan". In that case, we
26972 // should be able to lower to FMAX/FMIN alone.
26973 // TODO: If an operand is already known to be a NaN or not a NaN, this
26974 // should be an optional swap and FMAX/FMIN.
26976 EVT VT = N->getValueType(0);
26977 if (!((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
26978 (Subtarget->hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
26979 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
26982 // This takes at least 3 instructions, so favor a library call when operating
26983 // on a scalar and minimizing code size.
26984 if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize())
26987 SDValue Op0 = N->getOperand(0);
26988 SDValue Op1 = N->getOperand(1);
26990 EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType(
26991 DAG.getDataLayout(), *DAG.getContext(), VT);
26993 // There are 4 possibilities involving NaN inputs, and these are the required
26997 // ----------------
26998 // Num | Max | Op0 |
26999 // Op0 ----------------
27000 // NaN | Op1 | NaN |
27001 // ----------------
27003 // The SSE FP max/min instructions were not designed for this case, but rather
27005 // Min = Op1 < Op0 ? Op1 : Op0
27006 // Max = Op1 > Op0 ? Op1 : Op0
27008 // So they always return Op0 if either input is a NaN. However, we can still
27009 // use those instructions for fmaxnum by selecting away a NaN input.
27011 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
27012 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
27013 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
27014 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType , Op0, Op0, ISD::SETUO);
27016 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
27017 // are NaN, the NaN value of Op1 is the result.
27018 auto SelectOpcode = VT.isVector() ? ISD::VSELECT : ISD::SELECT;
27019 return DAG.getNode(SelectOpcode, DL, VT, IsOp0Nan, Op1, MinOrMax);
27022 /// Do target-specific dag combines on X86ISD::FAND nodes.
27023 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG,
27024 const X86Subtarget *Subtarget) {
27025 // FAND(0.0, x) -> 0.0
27026 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
27027 if (C->getValueAPF().isPosZero())
27028 return N->getOperand(0);
27030 // FAND(x, 0.0) -> 0.0
27031 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
27032 if (C->getValueAPF().isPosZero())
27033 return N->getOperand(1);
27035 return lowerX86FPLogicOp(N, DAG, Subtarget);
27038 /// Do target-specific dag combines on X86ISD::FANDN nodes
27039 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG,
27040 const X86Subtarget *Subtarget) {
27041 // FANDN(0.0, x) -> x
27042 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
27043 if (C->getValueAPF().isPosZero())
27044 return N->getOperand(1);
27046 // FANDN(x, 0.0) -> 0.0
27047 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
27048 if (C->getValueAPF().isPosZero())
27049 return N->getOperand(1);
27051 return lowerX86FPLogicOp(N, DAG, Subtarget);
27054 static SDValue PerformBTCombine(SDNode *N,
27056 TargetLowering::DAGCombinerInfo &DCI) {
27057 // BT ignores high bits in the bit index operand.
27058 SDValue Op1 = N->getOperand(1);
27059 if (Op1.hasOneUse()) {
27060 unsigned BitWidth = Op1.getValueSizeInBits();
27061 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
27062 APInt KnownZero, KnownOne;
27063 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
27064 !DCI.isBeforeLegalizeOps());
27065 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27066 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
27067 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
27068 DCI.CommitTargetLoweringOpt(TLO);
27073 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
27074 SDValue Op = N->getOperand(0);
27075 if (Op.getOpcode() == ISD::BITCAST)
27076 Op = Op.getOperand(0);
27077 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
27078 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
27079 VT.getVectorElementType().getSizeInBits() ==
27080 OpVT.getVectorElementType().getSizeInBits()) {
27081 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
27086 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
27087 const X86Subtarget *Subtarget) {
27088 EVT VT = N->getValueType(0);
27089 if (!VT.isVector())
27092 SDValue N0 = N->getOperand(0);
27093 SDValue N1 = N->getOperand(1);
27094 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
27097 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
27098 // both SSE and AVX2 since there is no sign-extended shift right
27099 // operation on a vector with 64-bit elements.
27100 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
27101 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
27102 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
27103 N0.getOpcode() == ISD::SIGN_EXTEND)) {
27104 SDValue N00 = N0.getOperand(0);
27106 // EXTLOAD has a better solution on AVX2,
27107 // it may be replaced with X86ISD::VSEXT node.
27108 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
27109 if (!ISD::isNormalLoad(N00.getNode()))
27112 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
27113 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
27115 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
27121 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
27122 /// Promoting a sign extension ahead of an 'add nsw' exposes opportunities
27123 /// to combine math ops, use an LEA, or use a complex addressing mode. This can
27124 /// eliminate extend, add, and shift instructions.
27125 static SDValue promoteSextBeforeAddNSW(SDNode *Sext, SelectionDAG &DAG,
27126 const X86Subtarget *Subtarget) {
27127 // TODO: This should be valid for other integer types.
27128 EVT VT = Sext->getValueType(0);
27129 if (VT != MVT::i64)
27132 // We need an 'add nsw' feeding into the 'sext'.
27133 SDValue Add = Sext->getOperand(0);
27134 if (Add.getOpcode() != ISD::ADD || !Add->getFlags()->hasNoSignedWrap())
27137 // Having a constant operand to the 'add' ensures that we are not increasing
27138 // the instruction count because the constant is extended for free below.
27139 // A constant operand can also become the displacement field of an LEA.
27140 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
27144 // Don't make the 'add' bigger if there's no hope of combining it with some
27145 // other 'add' or 'shl' instruction.
27146 // TODO: It may be profitable to generate simpler LEA instructions in place
27147 // of single 'add' instructions, but the cost model for selecting an LEA
27148 // currently has a high threshold.
27149 bool HasLEAPotential = false;
27150 for (auto *User : Sext->uses()) {
27151 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
27152 HasLEAPotential = true;
27156 if (!HasLEAPotential)
27159 // Everything looks good, so pull the 'sext' ahead of the 'add'.
27160 int64_t AddConstant = AddOp1->getSExtValue();
27161 SDValue AddOp0 = Add.getOperand(0);
27162 SDValue NewSext = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(Sext), VT, AddOp0);
27163 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
27165 // The wider add is guaranteed to not wrap because both operands are
27168 Flags.setNoSignedWrap(true);
27169 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewSext, NewConstant, &Flags);
27172 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
27173 TargetLowering::DAGCombinerInfo &DCI,
27174 const X86Subtarget *Subtarget) {
27175 SDValue N0 = N->getOperand(0);
27176 EVT VT = N->getValueType(0);
27177 EVT SVT = VT.getScalarType();
27178 EVT InVT = N0.getValueType();
27179 EVT InSVT = InVT.getScalarType();
27182 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
27183 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
27184 // This exposes the sext to the sdivrem lowering, so that it directly extends
27185 // from AH (which we otherwise need to do contortions to access).
27186 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
27187 InVT == MVT::i8 && VT == MVT::i32) {
27188 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
27189 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, DL, NodeTys,
27190 N0.getOperand(0), N0.getOperand(1));
27191 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
27192 return R.getValue(1);
27195 if (!DCI.isBeforeLegalizeOps()) {
27196 if (InVT == MVT::i1) {
27197 SDValue Zero = DAG.getConstant(0, DL, VT);
27199 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
27200 return DAG.getNode(ISD::SELECT, DL, VT, N0, AllOnes, Zero);
27205 if (VT.isVector() && Subtarget->hasSSE2()) {
27206 auto ExtendVecSize = [&DAG](SDLoc DL, SDValue N, unsigned Size) {
27207 EVT InVT = N.getValueType();
27208 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
27209 Size / InVT.getScalarSizeInBits());
27210 SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
27211 DAG.getUNDEF(InVT));
27213 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
27216 // If target-size is less than 128-bits, extend to a type that would extend
27217 // to 128 bits, extend that and extract the original target vector.
27218 if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits()) &&
27219 (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
27220 (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
27221 unsigned Scale = 128 / VT.getSizeInBits();
27223 EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
27224 SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
27225 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, ExVT, Ex);
27226 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
27227 DAG.getIntPtrConstant(0, DL));
27230 // If target-size is 128-bits, then convert to ISD::SIGN_EXTEND_VECTOR_INREG
27231 // which ensures lowering to X86ISD::VSEXT (pmovsx*).
27232 if (VT.getSizeInBits() == 128 &&
27233 (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
27234 (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
27235 SDValue ExOp = ExtendVecSize(DL, N0, 128);
27236 return DAG.getSignExtendVectorInReg(ExOp, DL, VT);
27239 // On pre-AVX2 targets, split into 128-bit nodes of
27240 // ISD::SIGN_EXTEND_VECTOR_INREG.
27241 if (!Subtarget->hasInt256() && !(VT.getSizeInBits() % 128) &&
27242 (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
27243 (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
27244 unsigned NumVecs = VT.getSizeInBits() / 128;
27245 unsigned NumSubElts = 128 / SVT.getSizeInBits();
27246 EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
27247 EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
27249 SmallVector<SDValue, 8> Opnds;
27250 for (unsigned i = 0, Offset = 0; i != NumVecs;
27251 ++i, Offset += NumSubElts) {
27252 SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
27253 DAG.getIntPtrConstant(Offset, DL));
27254 SrcVec = ExtendVecSize(DL, SrcVec, 128);
27255 SrcVec = DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT);
27256 Opnds.push_back(SrcVec);
27258 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
27262 if (Subtarget->hasAVX() && VT.is256BitVector())
27263 if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
27266 if (SDValue NewAdd = promoteSextBeforeAddNSW(N, DAG, Subtarget))
27272 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
27273 const X86Subtarget* Subtarget) {
27275 EVT VT = N->getValueType(0);
27277 // Let legalize expand this if it isn't a legal type yet.
27278 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
27281 EVT ScalarVT = VT.getScalarType();
27282 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget->hasAnyFMA())
27285 SDValue A = N->getOperand(0);
27286 SDValue B = N->getOperand(1);
27287 SDValue C = N->getOperand(2);
27289 bool NegA = (A.getOpcode() == ISD::FNEG);
27290 bool NegB = (B.getOpcode() == ISD::FNEG);
27291 bool NegC = (C.getOpcode() == ISD::FNEG);
27293 // Negative multiplication when NegA xor NegB
27294 bool NegMul = (NegA != NegB);
27296 A = A.getOperand(0);
27298 B = B.getOperand(0);
27300 C = C.getOperand(0);
27304 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
27306 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
27308 return DAG.getNode(Opcode, dl, VT, A, B, C);
27311 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
27312 TargetLowering::DAGCombinerInfo &DCI,
27313 const X86Subtarget *Subtarget) {
27314 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
27315 // (and (i32 x86isd::setcc_carry), 1)
27316 // This eliminates the zext. This transformation is necessary because
27317 // ISD::SETCC is always legalized to i8.
27319 SDValue N0 = N->getOperand(0);
27320 EVT VT = N->getValueType(0);
27322 if (N0.getOpcode() == ISD::AND &&
27324 N0.getOperand(0).hasOneUse()) {
27325 SDValue N00 = N0.getOperand(0);
27326 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
27327 if (!isOneConstant(N0.getOperand(1)))
27329 return DAG.getNode(ISD::AND, dl, VT,
27330 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
27331 N00.getOperand(0), N00.getOperand(1)),
27332 DAG.getConstant(1, dl, VT));
27336 if (N0.getOpcode() == ISD::TRUNCATE &&
27338 N0.getOperand(0).hasOneUse()) {
27339 SDValue N00 = N0.getOperand(0);
27340 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
27341 return DAG.getNode(ISD::AND, dl, VT,
27342 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
27343 N00.getOperand(0), N00.getOperand(1)),
27344 DAG.getConstant(1, dl, VT));
27348 if (VT.is256BitVector())
27349 if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
27352 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
27353 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
27354 // This exposes the zext to the udivrem lowering, so that it directly extends
27355 // from AH (which we otherwise need to do contortions to access).
27356 if (N0.getOpcode() == ISD::UDIVREM &&
27357 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
27358 (VT == MVT::i32 || VT == MVT::i64)) {
27359 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
27360 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
27361 N0.getOperand(0), N0.getOperand(1));
27362 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
27363 return R.getValue(1);
27369 // Optimize x == -y --> x+y == 0
27370 // x != -y --> x+y != 0
27371 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
27372 const X86Subtarget* Subtarget) {
27373 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
27374 SDValue LHS = N->getOperand(0);
27375 SDValue RHS = N->getOperand(1);
27376 EVT VT = N->getValueType(0);
27379 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
27380 if (isNullConstant(LHS.getOperand(0)) && LHS.hasOneUse()) {
27381 SDValue addV = DAG.getNode(ISD::ADD, DL, LHS.getValueType(), RHS,
27382 LHS.getOperand(1));
27383 return DAG.getSetCC(DL, N->getValueType(0), addV,
27384 DAG.getConstant(0, DL, addV.getValueType()), CC);
27386 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
27387 if (isNullConstant(RHS.getOperand(0)) && RHS.hasOneUse()) {
27388 SDValue addV = DAG.getNode(ISD::ADD, DL, RHS.getValueType(), LHS,
27389 RHS.getOperand(1));
27390 return DAG.getSetCC(DL, N->getValueType(0), addV,
27391 DAG.getConstant(0, DL, addV.getValueType()), CC);
27394 if (VT.getScalarType() == MVT::i1 &&
27395 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
27397 (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
27398 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
27399 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
27401 if (!IsSEXT0 || !IsVZero1) {
27402 // Swap the operands and update the condition code.
27403 std::swap(LHS, RHS);
27404 CC = ISD::getSetCCSwappedOperands(CC);
27406 IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
27407 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
27408 IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
27411 if (IsSEXT0 && IsVZero1) {
27412 assert(VT == LHS.getOperand(0).getValueType() &&
27413 "Uexpected operand type");
27414 if (CC == ISD::SETGT)
27415 return DAG.getConstant(0, DL, VT);
27416 if (CC == ISD::SETLE)
27417 return DAG.getConstant(1, DL, VT);
27418 if (CC == ISD::SETEQ || CC == ISD::SETGE)
27419 return DAG.getNOT(DL, LHS.getOperand(0), VT);
27421 assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
27422 "Unexpected condition code!");
27423 return LHS.getOperand(0);
27430 static SDValue PerformBLENDICombine(SDNode *N, SelectionDAG &DAG) {
27431 SDValue V0 = N->getOperand(0);
27432 SDValue V1 = N->getOperand(1);
27434 EVT VT = N->getValueType(0);
27436 // Canonicalize a v2f64 blend with a mask of 2 by swapping the vector
27437 // operands and changing the mask to 1. This saves us a bunch of
27438 // pattern-matching possibilities related to scalar math ops in SSE/AVX.
27439 // x86InstrInfo knows how to commute this back after instruction selection
27440 // if it would help register allocation.
27442 // TODO: If optimizing for size or a processor that doesn't suffer from
27443 // partial register update stalls, this should be transformed into a MOVSD
27444 // instruction because a MOVSD is 1-2 bytes smaller than a BLENDPD.
27446 if (VT == MVT::v2f64)
27447 if (auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(2)))
27448 if (Mask->getZExtValue() == 2 && !isShuffleFoldableLoad(V0)) {
27449 SDValue NewMask = DAG.getConstant(1, DL, MVT::i8);
27450 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask);
27456 static SDValue PerformGatherScatterCombine(SDNode *N, SelectionDAG &DAG) {
27458 // Gather and Scatter instructions use k-registers for masks. The type of
27459 // the masks is v*i1. So the mask will be truncated anyway.
27460 // The SIGN_EXTEND_INREG my be dropped.
27461 SDValue Mask = N->getOperand(2);
27462 if (Mask.getOpcode() == ISD::SIGN_EXTEND_INREG) {
27463 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
27464 NewOps[2] = Mask.getOperand(0);
27465 DAG.UpdateNodeOperands(N, NewOps);
27470 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
27471 // as "sbb reg,reg", since it can be extended without zext and produces
27472 // an all-ones bit which is more useful than 0/1 in some cases.
27473 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
27476 return DAG.getNode(ISD::AND, DL, VT,
27477 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
27478 DAG.getConstant(X86::COND_B, DL, MVT::i8),
27480 DAG.getConstant(1, DL, VT));
27481 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
27482 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
27483 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
27484 DAG.getConstant(X86::COND_B, DL, MVT::i8),
27488 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
27489 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
27490 TargetLowering::DAGCombinerInfo &DCI,
27491 const X86Subtarget *Subtarget) {
27493 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
27494 SDValue EFLAGS = N->getOperand(1);
27496 if (CC == X86::COND_A) {
27497 // Try to convert COND_A into COND_B in an attempt to facilitate
27498 // materializing "setb reg".
27500 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
27501 // cannot take an immediate as its first operand.
27503 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
27504 EFLAGS.getValueType().isInteger() &&
27505 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
27506 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
27507 EFLAGS.getNode()->getVTList(),
27508 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
27509 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
27510 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
27514 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
27515 // a zext and produces an all-ones bit which is more useful than 0/1 in some
27517 if (CC == X86::COND_B)
27518 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
27520 if (SDValue Flags = checkBoolTestSetCCCombine(EFLAGS, CC)) {
27521 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
27522 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
27528 // Optimize branch condition evaluation.
27530 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
27531 TargetLowering::DAGCombinerInfo &DCI,
27532 const X86Subtarget *Subtarget) {
27534 SDValue Chain = N->getOperand(0);
27535 SDValue Dest = N->getOperand(1);
27536 SDValue EFLAGS = N->getOperand(3);
27537 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
27539 if (SDValue Flags = checkBoolTestSetCCCombine(EFLAGS, CC)) {
27540 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
27541 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
27548 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
27549 SelectionDAG &DAG) {
27550 // Take advantage of vector comparisons producing 0 or -1 in each lane to
27551 // optimize away operation when it's from a constant.
27553 // The general transformation is:
27554 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
27555 // AND(VECTOR_CMP(x,y), constant2)
27556 // constant2 = UNARYOP(constant)
27558 // Early exit if this isn't a vector operation, the operand of the
27559 // unary operation isn't a bitwise AND, or if the sizes of the operations
27560 // aren't the same.
27561 EVT VT = N->getValueType(0);
27562 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
27563 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
27564 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
27567 // Now check that the other operand of the AND is a constant. We could
27568 // make the transformation for non-constant splats as well, but it's unclear
27569 // that would be a benefit as it would not eliminate any operations, just
27570 // perform one more step in scalar code before moving to the vector unit.
27571 if (BuildVectorSDNode *BV =
27572 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
27573 // Bail out if the vector isn't a constant.
27574 if (!BV->isConstant())
27577 // Everything checks out. Build up the new and improved node.
27579 EVT IntVT = BV->getValueType(0);
27580 // Create a new constant of the appropriate type for the transformed
27582 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
27583 // The AND node needs bitcasts to/from an integer vector type around it.
27584 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
27585 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
27586 N->getOperand(0)->getOperand(0), MaskConst);
27587 SDValue Res = DAG.getBitcast(VT, NewAnd);
27594 static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
27595 const X86Subtarget *Subtarget) {
27596 SDValue Op0 = N->getOperand(0);
27597 EVT VT = N->getValueType(0);
27598 EVT InVT = Op0.getValueType();
27599 EVT InSVT = InVT.getScalarType();
27600 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27602 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
27603 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
27604 if (InVT.isVector() && (InSVT == MVT::i8 || InSVT == MVT::i16)) {
27606 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
27607 InVT.getVectorNumElements());
27608 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
27610 if (TLI.isOperationLegal(ISD::UINT_TO_FP, DstVT))
27611 return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
27613 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
27619 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
27620 const X86Subtarget *Subtarget) {
27621 // First try to optimize away the conversion entirely when it's
27622 // conditionally from a constant. Vectors only.
27623 if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
27626 // Now move on to more general possibilities.
27627 SDValue Op0 = N->getOperand(0);
27628 EVT VT = N->getValueType(0);
27629 EVT InVT = Op0.getValueType();
27630 EVT InSVT = InVT.getScalarType();
27632 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
27633 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
27634 if (InVT.isVector() && (InSVT == MVT::i8 || InSVT == MVT::i16)) {
27636 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
27637 InVT.getVectorNumElements());
27638 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
27639 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
27642 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
27643 // a 32-bit target where SSE doesn't support i64->FP operations.
27644 if (!Subtarget->useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
27645 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
27646 EVT LdVT = Ld->getValueType(0);
27648 // This transformation is not supported if the result type is f16
27649 if (VT == MVT::f16)
27652 if (!Ld->isVolatile() && !VT.isVector() &&
27653 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
27654 !Subtarget->is64Bit() && LdVT == MVT::i64) {
27655 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
27656 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
27657 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
27664 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
27665 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
27666 X86TargetLowering::DAGCombinerInfo &DCI) {
27667 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
27668 // the result is either zero or one (depending on the input carry bit).
27669 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
27670 if (X86::isZeroNode(N->getOperand(0)) &&
27671 X86::isZeroNode(N->getOperand(1)) &&
27672 // We don't have a good way to replace an EFLAGS use, so only do this when
27674 SDValue(N, 1).use_empty()) {
27676 EVT VT = N->getValueType(0);
27677 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
27678 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
27679 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
27680 DAG.getConstant(X86::COND_B, DL,
27683 DAG.getConstant(1, DL, VT));
27684 return DCI.CombineTo(N, Res1, CarryOut);
27690 // fold (add Y, (sete X, 0)) -> adc 0, Y
27691 // (add Y, (setne X, 0)) -> sbb -1, Y
27692 // (sub (sete X, 0), Y) -> sbb 0, Y
27693 // (sub (setne X, 0), Y) -> adc -1, Y
27694 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
27697 // Look through ZExts.
27698 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
27699 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
27702 SDValue SetCC = Ext.getOperand(0);
27703 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
27706 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
27707 if (CC != X86::COND_E && CC != X86::COND_NE)
27710 SDValue Cmp = SetCC.getOperand(1);
27711 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
27712 !X86::isZeroNode(Cmp.getOperand(1)) ||
27713 !Cmp.getOperand(0).getValueType().isInteger())
27716 SDValue CmpOp0 = Cmp.getOperand(0);
27717 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
27718 DAG.getConstant(1, DL, CmpOp0.getValueType()));
27720 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
27721 if (CC == X86::COND_NE)
27722 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
27723 DL, OtherVal.getValueType(), OtherVal,
27724 DAG.getConstant(-1ULL, DL, OtherVal.getValueType()),
27726 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
27727 DL, OtherVal.getValueType(), OtherVal,
27728 DAG.getConstant(0, DL, OtherVal.getValueType()), NewCmp);
27731 /// PerformADDCombine - Do target-specific dag combines on integer adds.
27732 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
27733 const X86Subtarget *Subtarget) {
27734 EVT VT = N->getValueType(0);
27735 SDValue Op0 = N->getOperand(0);
27736 SDValue Op1 = N->getOperand(1);
27738 // Try to synthesize horizontal adds from adds of shuffles.
27739 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
27740 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
27741 isHorizontalBinOp(Op0, Op1, true))
27742 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
27744 return OptimizeConditionalInDecrement(N, DAG);
27747 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
27748 const X86Subtarget *Subtarget) {
27749 SDValue Op0 = N->getOperand(0);
27750 SDValue Op1 = N->getOperand(1);
27752 // X86 can't encode an immediate LHS of a sub. See if we can push the
27753 // negation into a preceding instruction.
27754 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
27755 // If the RHS of the sub is a XOR with one use and a constant, invert the
27756 // immediate. Then add one to the LHS of the sub so we can turn
27757 // X-Y -> X+~Y+1, saving one register.
27758 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
27759 isa<ConstantSDNode>(Op1.getOperand(1))) {
27760 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
27761 EVT VT = Op0.getValueType();
27762 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
27764 DAG.getConstant(~XorC, SDLoc(Op1), VT));
27765 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
27766 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
27770 // Try to synthesize horizontal adds from adds of shuffles.
27771 EVT VT = N->getValueType(0);
27772 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
27773 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
27774 isHorizontalBinOp(Op0, Op1, true))
27775 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
27777 return OptimizeConditionalInDecrement(N, DAG);
27780 /// performVZEXTCombine - Performs build vector combines
27781 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
27782 TargetLowering::DAGCombinerInfo &DCI,
27783 const X86Subtarget *Subtarget) {
27785 MVT VT = N->getSimpleValueType(0);
27786 SDValue Op = N->getOperand(0);
27787 MVT OpVT = Op.getSimpleValueType();
27788 MVT OpEltVT = OpVT.getVectorElementType();
27789 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
27791 // (vzext (bitcast (vzext (x)) -> (vzext x)
27793 while (V.getOpcode() == ISD::BITCAST)
27794 V = V.getOperand(0);
27796 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
27797 MVT InnerVT = V.getSimpleValueType();
27798 MVT InnerEltVT = InnerVT.getVectorElementType();
27800 // If the element sizes match exactly, we can just do one larger vzext. This
27801 // is always an exact type match as vzext operates on integer types.
27802 if (OpEltVT == InnerEltVT) {
27803 assert(OpVT == InnerVT && "Types must match for vzext!");
27804 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
27807 // The only other way we can combine them is if only a single element of the
27808 // inner vzext is used in the input to the outer vzext.
27809 if (InnerEltVT.getSizeInBits() < InputBits)
27812 // In this case, the inner vzext is completely dead because we're going to
27813 // only look at bits inside of the low element. Just do the outer vzext on
27814 // a bitcast of the input to the inner.
27815 return DAG.getNode(X86ISD::VZEXT, DL, VT, DAG.getBitcast(OpVT, V));
27818 // Check if we can bypass extracting and re-inserting an element of an input
27819 // vector. Essentially:
27820 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
27821 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
27822 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
27823 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
27824 SDValue ExtractedV = V.getOperand(0);
27825 SDValue OrigV = ExtractedV.getOperand(0);
27826 if (isNullConstant(ExtractedV.getOperand(1))) {
27827 MVT OrigVT = OrigV.getSimpleValueType();
27828 // Extract a subvector if necessary...
27829 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
27830 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
27831 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
27832 OrigVT.getVectorNumElements() / Ratio);
27833 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
27834 DAG.getIntPtrConstant(0, DL));
27836 Op = DAG.getBitcast(OpVT, OrigV);
27837 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
27844 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
27845 DAGCombinerInfo &DCI) const {
27846 SelectionDAG &DAG = DCI.DAG;
27847 switch (N->getOpcode()) {
27849 case ISD::EXTRACT_VECTOR_ELT:
27850 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
27853 case X86ISD::SHRUNKBLEND:
27854 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
27855 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG, Subtarget);
27856 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
27857 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
27858 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
27859 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
27860 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
27863 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
27864 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
27865 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
27866 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
27867 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
27868 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
27869 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
27870 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
27871 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
27872 case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG, Subtarget);
27873 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
27874 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
27875 case ISD::FNEG: return PerformFNEGCombine(N, DAG, Subtarget);
27876 case ISD::TRUNCATE: return PerformTRUNCATECombine(N, DAG, Subtarget);
27878 case X86ISD::FOR: return PerformFORCombine(N, DAG, Subtarget);
27880 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
27882 case ISD::FMAXNUM: return performFMinNumFMaxNumCombine(N, DAG,
27884 case X86ISD::FAND: return PerformFANDCombine(N, DAG, Subtarget);
27885 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG, Subtarget);
27886 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
27887 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
27888 case ISD::ANY_EXTEND:
27889 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
27890 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
27891 case ISD::SIGN_EXTEND_INREG:
27892 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
27893 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
27894 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
27895 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
27896 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
27897 case X86ISD::SHUFP: // Handle all target specific shuffles
27898 case X86ISD::PALIGNR:
27899 case X86ISD::UNPCKH:
27900 case X86ISD::UNPCKL:
27901 case X86ISD::MOVHLPS:
27902 case X86ISD::MOVLHPS:
27903 case X86ISD::PSHUFB:
27904 case X86ISD::PSHUFD:
27905 case X86ISD::PSHUFHW:
27906 case X86ISD::PSHUFLW:
27907 case X86ISD::MOVSS:
27908 case X86ISD::MOVSD:
27909 case X86ISD::VPERMILPI:
27910 case X86ISD::VPERM2X128:
27911 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
27912 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
27913 case X86ISD::BLENDI: return PerformBLENDICombine(N, DAG);
27915 case ISD::MSCATTER: return PerformGatherScatterCombine(N, DAG);
27921 /// isTypeDesirableForOp - Return true if the target has native support for
27922 /// the specified value type and it is 'desirable' to use the type for the
27923 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
27924 /// instruction encodings are longer and some i16 instructions are slow.
27925 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
27926 if (!isTypeLegal(VT))
27928 if (VT != MVT::i16)
27935 case ISD::SIGN_EXTEND:
27936 case ISD::ZERO_EXTEND:
27937 case ISD::ANY_EXTEND:
27950 /// IsDesirableToPromoteOp - This method query the target whether it is
27951 /// beneficial for dag combiner to promote the specified node. If true, it
27952 /// should return the desired promotion type by reference.
27953 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
27954 EVT VT = Op.getValueType();
27955 if (VT != MVT::i16)
27958 bool Promote = false;
27959 bool Commute = false;
27960 switch (Op.getOpcode()) {
27963 LoadSDNode *LD = cast<LoadSDNode>(Op);
27964 // If the non-extending load has a single use and it's not live out, then it
27965 // might be folded.
27966 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
27967 Op.hasOneUse()*/) {
27968 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
27969 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
27970 // The only case where we'd want to promote LOAD (rather then it being
27971 // promoted as an operand is when it's only use is liveout.
27972 if (UI->getOpcode() != ISD::CopyToReg)
27979 case ISD::SIGN_EXTEND:
27980 case ISD::ZERO_EXTEND:
27981 case ISD::ANY_EXTEND:
27986 SDValue N0 = Op.getOperand(0);
27987 // Look out for (store (shl (load), x)).
27988 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
28001 SDValue N0 = Op.getOperand(0);
28002 SDValue N1 = Op.getOperand(1);
28003 if (!Commute && MayFoldLoad(N1))
28005 // Avoid disabling potential load folding opportunities.
28006 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
28008 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
28018 //===----------------------------------------------------------------------===//
28019 // X86 Inline Assembly Support
28020 //===----------------------------------------------------------------------===//
28022 // Helper to match a string separated by whitespace.
28023 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
28024 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
28026 for (StringRef Piece : Pieces) {
28027 if (!S.startswith(Piece)) // Check if the piece matches.
28030 S = S.substr(Piece.size());
28031 StringRef::size_type Pos = S.find_first_not_of(" \t");
28032 if (Pos == 0) // We matched a prefix.
28041 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
28043 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
28044 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
28045 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
28046 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
28048 if (AsmPieces.size() == 3)
28050 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
28057 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
28058 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
28060 std::string AsmStr = IA->getAsmString();
28062 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
28063 if (!Ty || Ty->getBitWidth() % 16 != 0)
28066 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
28067 SmallVector<StringRef, 4> AsmPieces;
28068 SplitString(AsmStr, AsmPieces, ";\n");
28070 switch (AsmPieces.size()) {
28071 default: return false;
28073 // FIXME: this should verify that we are targeting a 486 or better. If not,
28074 // we will turn this bswap into something that will be lowered to logical
28075 // ops instead of emitting the bswap asm. For now, we don't support 486 or
28076 // lower so don't worry about this.
28078 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
28079 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
28080 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
28081 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
28082 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
28083 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
28084 // No need to check constraints, nothing other than the equivalent of
28085 // "=r,0" would be valid here.
28086 return IntrinsicLowering::LowerToByteSwap(CI);
28089 // rorw $$8, ${0:w} --> llvm.bswap.i16
28090 if (CI->getType()->isIntegerTy(16) &&
28091 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
28092 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
28093 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
28095 StringRef ConstraintsStr = IA->getConstraintString();
28096 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
28097 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
28098 if (clobbersFlagRegisters(AsmPieces))
28099 return IntrinsicLowering::LowerToByteSwap(CI);
28103 if (CI->getType()->isIntegerTy(32) &&
28104 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
28105 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
28106 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
28107 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
28109 StringRef ConstraintsStr = IA->getConstraintString();
28110 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
28111 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
28112 if (clobbersFlagRegisters(AsmPieces))
28113 return IntrinsicLowering::LowerToByteSwap(CI);
28116 if (CI->getType()->isIntegerTy(64)) {
28117 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
28118 if (Constraints.size() >= 2 &&
28119 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
28120 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
28121 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
28122 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
28123 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
28124 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
28125 return IntrinsicLowering::LowerToByteSwap(CI);
28133 /// getConstraintType - Given a constraint letter, return the type of
28134 /// constraint it is for this target.
28135 X86TargetLowering::ConstraintType
28136 X86TargetLowering::getConstraintType(StringRef Constraint) const {
28137 if (Constraint.size() == 1) {
28138 switch (Constraint[0]) {
28149 return C_RegisterClass;
28173 return TargetLowering::getConstraintType(Constraint);
28176 /// Examine constraint type and operand type and determine a weight value.
28177 /// This object must already have been set up with the operand type
28178 /// and the current alternative constraint selected.
28179 TargetLowering::ConstraintWeight
28180 X86TargetLowering::getSingleConstraintMatchWeight(
28181 AsmOperandInfo &info, const char *constraint) const {
28182 ConstraintWeight weight = CW_Invalid;
28183 Value *CallOperandVal = info.CallOperandVal;
28184 // If we don't have a value, we can't do a match,
28185 // but allow it at the lowest weight.
28186 if (!CallOperandVal)
28188 Type *type = CallOperandVal->getType();
28189 // Look at the constraint type.
28190 switch (*constraint) {
28192 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
28203 if (CallOperandVal->getType()->isIntegerTy())
28204 weight = CW_SpecificReg;
28209 if (type->isFloatingPointTy())
28210 weight = CW_SpecificReg;
28213 if (type->isX86_MMXTy() && Subtarget->hasMMX())
28214 weight = CW_SpecificReg;
28218 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
28219 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
28220 weight = CW_Register;
28223 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
28224 if (C->getZExtValue() <= 31)
28225 weight = CW_Constant;
28229 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
28230 if (C->getZExtValue() <= 63)
28231 weight = CW_Constant;
28235 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
28236 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
28237 weight = CW_Constant;
28241 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
28242 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
28243 weight = CW_Constant;
28247 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
28248 if (C->getZExtValue() <= 3)
28249 weight = CW_Constant;
28253 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
28254 if (C->getZExtValue() <= 0xff)
28255 weight = CW_Constant;
28260 if (isa<ConstantFP>(CallOperandVal)) {
28261 weight = CW_Constant;
28265 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
28266 if ((C->getSExtValue() >= -0x80000000LL) &&
28267 (C->getSExtValue() <= 0x7fffffffLL))
28268 weight = CW_Constant;
28272 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
28273 if (C->getZExtValue() <= 0xffffffff)
28274 weight = CW_Constant;
28281 /// LowerXConstraint - try to replace an X constraint, which matches anything,
28282 /// with another that has more specific requirements based on the type of the
28283 /// corresponding operand.
28284 const char *X86TargetLowering::
28285 LowerXConstraint(EVT ConstraintVT) const {
28286 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
28287 // 'f' like normal targets.
28288 if (ConstraintVT.isFloatingPoint()) {
28289 if (Subtarget->hasSSE2())
28291 if (Subtarget->hasSSE1())
28295 return TargetLowering::LowerXConstraint(ConstraintVT);
28298 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
28299 /// vector. If it is invalid, don't add anything to Ops.
28300 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
28301 std::string &Constraint,
28302 std::vector<SDValue>&Ops,
28303 SelectionDAG &DAG) const {
28306 // Only support length 1 constraints for now.
28307 if (Constraint.length() > 1) return;
28309 char ConstraintLetter = Constraint[0];
28310 switch (ConstraintLetter) {
28313 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28314 if (C->getZExtValue() <= 31) {
28315 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
28316 Op.getValueType());
28322 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28323 if (C->getZExtValue() <= 63) {
28324 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
28325 Op.getValueType());
28331 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28332 if (isInt<8>(C->getSExtValue())) {
28333 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
28334 Op.getValueType());
28340 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28341 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
28342 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
28343 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
28344 Op.getValueType());
28350 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28351 if (C->getZExtValue() <= 3) {
28352 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
28353 Op.getValueType());
28359 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28360 if (C->getZExtValue() <= 255) {
28361 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
28362 Op.getValueType());
28368 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28369 if (C->getZExtValue() <= 127) {
28370 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
28371 Op.getValueType());
28377 // 32-bit signed value
28378 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28379 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
28380 C->getSExtValue())) {
28381 // Widen to 64 bits here to get it sign extended.
28382 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
28385 // FIXME gcc accepts some relocatable values here too, but only in certain
28386 // memory models; it's complicated.
28391 // 32-bit unsigned value
28392 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
28393 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
28394 C->getZExtValue())) {
28395 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
28396 Op.getValueType());
28400 // FIXME gcc accepts some relocatable values here too, but only in certain
28401 // memory models; it's complicated.
28405 // Literal immediates are always ok.
28406 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
28407 // Widen to 64 bits here to get it sign extended.
28408 Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64);
28412 // In any sort of PIC mode addresses need to be computed at runtime by
28413 // adding in a register or some sort of table lookup. These can't
28414 // be used as immediates.
28415 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
28418 // If we are in non-pic codegen mode, we allow the address of a global (with
28419 // an optional displacement) to be used with 'i'.
28420 GlobalAddressSDNode *GA = nullptr;
28421 int64_t Offset = 0;
28423 // Match either (GA), (GA+C), (GA+C1+C2), etc.
28425 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
28426 Offset += GA->getOffset();
28428 } else if (Op.getOpcode() == ISD::ADD) {
28429 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
28430 Offset += C->getZExtValue();
28431 Op = Op.getOperand(0);
28434 } else if (Op.getOpcode() == ISD::SUB) {
28435 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
28436 Offset += -C->getZExtValue();
28437 Op = Op.getOperand(0);
28442 // Otherwise, this isn't something we can handle, reject it.
28446 const GlobalValue *GV = GA->getGlobal();
28447 // If we require an extra load to get this address, as in PIC mode, we
28448 // can't accept it.
28449 if (isGlobalStubReference(
28450 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
28453 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
28454 GA->getValueType(0), Offset);
28459 if (Result.getNode()) {
28460 Ops.push_back(Result);
28463 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
28466 std::pair<unsigned, const TargetRegisterClass *>
28467 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
28468 StringRef Constraint,
28470 // First, see if this is a constraint that directly corresponds to an LLVM
28472 if (Constraint.size() == 1) {
28473 // GCC Constraint Letters
28474 switch (Constraint[0]) {
28476 // TODO: Slight differences here in allocation order and leaving
28477 // RIP in the class. Do they matter any more here than they do
28478 // in the normal allocation?
28479 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
28480 if (Subtarget->is64Bit()) {
28481 if (VT == MVT::i32 || VT == MVT::f32)
28482 return std::make_pair(0U, &X86::GR32RegClass);
28483 if (VT == MVT::i16)
28484 return std::make_pair(0U, &X86::GR16RegClass);
28485 if (VT == MVT::i8 || VT == MVT::i1)
28486 return std::make_pair(0U, &X86::GR8RegClass);
28487 if (VT == MVT::i64 || VT == MVT::f64)
28488 return std::make_pair(0U, &X86::GR64RegClass);
28491 // 32-bit fallthrough
28492 case 'Q': // Q_REGS
28493 if (VT == MVT::i32 || VT == MVT::f32)
28494 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
28495 if (VT == MVT::i16)
28496 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
28497 if (VT == MVT::i8 || VT == MVT::i1)
28498 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
28499 if (VT == MVT::i64)
28500 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
28502 case 'r': // GENERAL_REGS
28503 case 'l': // INDEX_REGS
28504 if (VT == MVT::i8 || VT == MVT::i1)
28505 return std::make_pair(0U, &X86::GR8RegClass);
28506 if (VT == MVT::i16)
28507 return std::make_pair(0U, &X86::GR16RegClass);
28508 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
28509 return std::make_pair(0U, &X86::GR32RegClass);
28510 return std::make_pair(0U, &X86::GR64RegClass);
28511 case 'R': // LEGACY_REGS
28512 if (VT == MVT::i8 || VT == MVT::i1)
28513 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
28514 if (VT == MVT::i16)
28515 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
28516 if (VT == MVT::i32 || !Subtarget->is64Bit())
28517 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
28518 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
28519 case 'f': // FP Stack registers.
28520 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
28521 // value to the correct fpstack register class.
28522 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
28523 return std::make_pair(0U, &X86::RFP32RegClass);
28524 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
28525 return std::make_pair(0U, &X86::RFP64RegClass);
28526 return std::make_pair(0U, &X86::RFP80RegClass);
28527 case 'y': // MMX_REGS if MMX allowed.
28528 if (!Subtarget->hasMMX()) break;
28529 return std::make_pair(0U, &X86::VR64RegClass);
28530 case 'Y': // SSE_REGS if SSE2 allowed
28531 if (!Subtarget->hasSSE2()) break;
28533 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
28534 if (!Subtarget->hasSSE1()) break;
28536 switch (VT.SimpleTy) {
28538 // Scalar SSE types.
28541 return std::make_pair(0U, &X86::FR32RegClass);
28544 return std::make_pair(0U, &X86::FR64RegClass);
28545 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
28553 return std::make_pair(0U, &X86::VR128RegClass);
28561 return std::make_pair(0U, &X86::VR256RegClass);
28566 return std::make_pair(0U, &X86::VR512RegClass);
28572 // Use the default implementation in TargetLowering to convert the register
28573 // constraint into a member of a register class.
28574 std::pair<unsigned, const TargetRegisterClass*> Res;
28575 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
28577 // Not found as a standard register?
28579 // Map st(0) -> st(7) -> ST0
28580 if (Constraint.size() == 7 && Constraint[0] == '{' &&
28581 tolower(Constraint[1]) == 's' &&
28582 tolower(Constraint[2]) == 't' &&
28583 Constraint[3] == '(' &&
28584 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
28585 Constraint[5] == ')' &&
28586 Constraint[6] == '}') {
28588 Res.first = X86::FP0+Constraint[4]-'0';
28589 Res.second = &X86::RFP80RegClass;
28593 // GCC allows "st(0)" to be called just plain "st".
28594 if (StringRef("{st}").equals_lower(Constraint)) {
28595 Res.first = X86::FP0;
28596 Res.second = &X86::RFP80RegClass;
28601 if (StringRef("{flags}").equals_lower(Constraint)) {
28602 Res.first = X86::EFLAGS;
28603 Res.second = &X86::CCRRegClass;
28607 // 'A' means EAX + EDX.
28608 if (Constraint == "A") {
28609 Res.first = X86::EAX;
28610 Res.second = &X86::GR32_ADRegClass;
28616 // Otherwise, check to see if this is a register class of the wrong value
28617 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
28618 // turn into {ax},{dx}.
28619 // MVT::Other is used to specify clobber names.
28620 if (Res.second->hasType(VT) || VT == MVT::Other)
28621 return Res; // Correct type already, nothing to do.
28623 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
28624 // return "eax". This should even work for things like getting 64bit integer
28625 // registers when given an f64 type.
28626 const TargetRegisterClass *Class = Res.second;
28627 if (Class == &X86::GR8RegClass || Class == &X86::GR16RegClass ||
28628 Class == &X86::GR32RegClass || Class == &X86::GR64RegClass) {
28629 unsigned Size = VT.getSizeInBits();
28630 if (Size == 1) Size = 8;
28631 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
28633 Res.first = DestReg;
28634 Res.second = Size == 8 ? &X86::GR8RegClass
28635 : Size == 16 ? &X86::GR16RegClass
28636 : Size == 32 ? &X86::GR32RegClass
28637 : &X86::GR64RegClass;
28638 assert(Res.second->contains(Res.first) && "Register in register class");
28640 // No register found/type mismatch.
28642 Res.second = nullptr;
28644 } else if (Class == &X86::FR32RegClass || Class == &X86::FR64RegClass ||
28645 Class == &X86::VR128RegClass || Class == &X86::VR256RegClass ||
28646 Class == &X86::FR32XRegClass || Class == &X86::FR64XRegClass ||
28647 Class == &X86::VR128XRegClass || Class == &X86::VR256XRegClass ||
28648 Class == &X86::VR512RegClass) {
28649 // Handle references to XMM physical registers that got mapped into the
28650 // wrong class. This can happen with constraints like {xmm0} where the
28651 // target independent register mapper will just pick the first match it can
28652 // find, ignoring the required type.
28654 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
28655 if (VT == MVT::f32 || VT == MVT::i32)
28656 Res.second = &X86::FR32RegClass;
28657 else if (VT == MVT::f64 || VT == MVT::i64)
28658 Res.second = &X86::FR64RegClass;
28659 else if (X86::VR128RegClass.hasType(VT))
28660 Res.second = &X86::VR128RegClass;
28661 else if (X86::VR256RegClass.hasType(VT))
28662 Res.second = &X86::VR256RegClass;
28663 else if (X86::VR512RegClass.hasType(VT))
28664 Res.second = &X86::VR512RegClass;
28666 // Type mismatch and not a clobber: Return an error;
28668 Res.second = nullptr;
28675 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
28676 const AddrMode &AM, Type *Ty,
28677 unsigned AS) const {
28678 // Scaling factors are not free at all.
28679 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
28680 // will take 2 allocations in the out of order engine instead of 1
28681 // for plain addressing mode, i.e. inst (reg1).
28683 // vaddps (%rsi,%drx), %ymm0, %ymm1
28684 // Requires two allocations (one for the load, one for the computation)
28686 // vaddps (%rsi), %ymm0, %ymm1
28687 // Requires just 1 allocation, i.e., freeing allocations for other operations
28688 // and having less micro operations to execute.
28690 // For some X86 architectures, this is even worse because for instance for
28691 // stores, the complex addressing mode forces the instruction to use the
28692 // "load" ports instead of the dedicated "store" port.
28693 // E.g., on Haswell:
28694 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
28695 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
28696 if (isLegalAddressingMode(DL, AM, Ty, AS))
28697 // Scale represents reg2 * scale, thus account for 1
28698 // as soon as we use a second register.
28699 return AM.Scale != 0;
28703 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
28704 // Integer division on x86 is expensive. However, when aggressively optimizing
28705 // for code size, we prefer to use a div instruction, as it is usually smaller
28706 // than the alternative sequence.
28707 // The exception to this is vector division. Since x86 doesn't have vector
28708 // integer division, leaving the division as-is is a loss even in terms of
28709 // size, because it will have to be scalarized, while the alternative code
28710 // sequence can be performed in vector form.
28711 bool OptSize = Attr.hasAttribute(AttributeSet::FunctionIndex,
28712 Attribute::MinSize);
28713 return OptSize && !VT.isVector();