1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86TargetMachine.h"
20 #include "X86TargetObjectFile.h"
21 #include "Utils/X86ShuffleDecode.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/Constants.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/GlobalAlias.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/Function.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/Intrinsics.h"
30 #include "llvm/LLVMContext.h"
31 #include "llvm/CodeGen/IntrinsicLowering.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/PseudoSourceValue.h"
39 #include "llvm/MC/MCAsmInfo.h"
40 #include "llvm/MC/MCContext.h"
41 #include "llvm/MC/MCExpr.h"
42 #include "llvm/MC/MCSymbol.h"
43 #include "llvm/ADT/BitVector.h"
44 #include "llvm/ADT/SmallSet.h"
45 #include "llvm/ADT/Statistic.h"
46 #include "llvm/ADT/StringExtras.h"
47 #include "llvm/ADT/VectorExtras.h"
48 #include "llvm/Support/CallSite.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/Dwarf.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
55 using namespace dwarf;
57 STATISTIC(NumTailCalls, "Number of tail calls");
59 // Forward declarations.
60 static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
63 static SDValue Insert128BitVector(SDValue Result,
69 static SDValue Extract128BitVector(SDValue Vec,
74 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
75 /// sets things up to match to an AVX VEXTRACTF128 instruction or a
76 /// simple subregister reference. Idx is an index in the 128 bits we
77 /// want. It need not be aligned to a 128-bit bounday. That makes
78 /// lowering EXTRACT_VECTOR_ELT operations easier.
79 static SDValue Extract128BitVector(SDValue Vec,
83 EVT VT = Vec.getValueType();
84 assert(VT.getSizeInBits() == 256 && "Unexpected vector size!");
85 EVT ElVT = VT.getVectorElementType();
86 int Factor = VT.getSizeInBits()/128;
87 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
88 VT.getVectorNumElements()/Factor);
90 // Extract from UNDEF is UNDEF.
91 if (Vec.getOpcode() == ISD::UNDEF)
92 return DAG.getNode(ISD::UNDEF, dl, ResultVT);
94 if (isa<ConstantSDNode>(Idx)) {
95 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
97 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR
98 // we can match to VEXTRACTF128.
99 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits();
101 // This is the index of the first element of the 128-bit chunk
103 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128)
106 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
107 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec,
116 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
117 /// sets things up to match to an AVX VINSERTF128 instruction or a
118 /// simple superregister reference. Idx is an index in the 128 bits
119 /// we want. It need not be aligned to a 128-bit bounday. That makes
120 /// lowering INSERT_VECTOR_ELT operations easier.
121 static SDValue Insert128BitVector(SDValue Result,
126 if (isa<ConstantSDNode>(Idx)) {
127 EVT VT = Vec.getValueType();
128 assert(VT.getSizeInBits() == 128 && "Unexpected vector size!");
130 EVT ElVT = VT.getVectorElementType();
131 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
132 EVT ResultVT = Result.getValueType();
134 // Insert the relevant 128 bits.
135 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits();
137 // This is the index of the first element of the 128-bit chunk
139 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128)
142 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
143 Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec,
151 static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
152 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
153 bool is64Bit = Subtarget->is64Bit();
155 if (Subtarget->isTargetEnvMacho()) {
157 return new X8664_MachoTargetObjectFile();
158 return new TargetLoweringObjectFileMachO();
161 if (Subtarget->isTargetELF())
162 return new TargetLoweringObjectFileELF();
163 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho())
164 return new TargetLoweringObjectFileCOFF();
165 llvm_unreachable("unknown subtarget type");
168 X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
169 : TargetLowering(TM, createTLOF(TM)) {
170 Subtarget = &TM.getSubtarget<X86Subtarget>();
171 X86ScalarSSEf64 = Subtarget->hasXMMInt();
172 X86ScalarSSEf32 = Subtarget->hasXMM();
173 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
175 RegInfo = TM.getRegisterInfo();
176 TD = getTargetData();
178 // Set up the TargetLowering object.
179 static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
181 // X86 is weird, it always uses i8 for shift amounts and setcc results.
182 setBooleanContents(ZeroOrOneBooleanContent);
184 // For 64-bit since we have so many registers use the ILP scheduler, for
185 // 32-bit code use the register pressure specific scheduling.
186 if (Subtarget->is64Bit())
187 setSchedulingPreference(Sched::ILP);
189 setSchedulingPreference(Sched::RegPressure);
190 setStackPointerRegisterToSaveRestore(X86StackPtr);
192 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) {
193 // Setup Windows compiler runtime calls.
194 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
195 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
196 setLibcallName(RTLIB::SREM_I64, "_allrem");
197 setLibcallName(RTLIB::UREM_I64, "_aullrem");
198 setLibcallName(RTLIB::MUL_I64, "_allmul");
199 setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2");
200 setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2");
201 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
202 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
203 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
204 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
205 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
206 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C);
207 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C);
210 if (Subtarget->isTargetDarwin()) {
211 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
212 setUseUnderscoreSetJmp(false);
213 setUseUnderscoreLongJmp(false);
214 } else if (Subtarget->isTargetMingw()) {
215 // MS runtime is weird: it exports _setjmp, but longjmp!
216 setUseUnderscoreSetJmp(true);
217 setUseUnderscoreLongJmp(false);
219 setUseUnderscoreSetJmp(true);
220 setUseUnderscoreLongJmp(true);
223 // Set up the register classes.
224 addRegisterClass(MVT::i8, X86::GR8RegisterClass);
225 addRegisterClass(MVT::i16, X86::GR16RegisterClass);
226 addRegisterClass(MVT::i32, X86::GR32RegisterClass);
227 if (Subtarget->is64Bit())
228 addRegisterClass(MVT::i64, X86::GR64RegisterClass);
230 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
232 // We don't accept any truncstore of integer registers.
233 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
234 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
235 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
236 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
237 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
238 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
240 // SETOEQ and SETUNE require checking two conditions.
241 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
242 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
243 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
244 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
245 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
246 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
248 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
250 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
251 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
252 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
254 if (Subtarget->is64Bit()) {
255 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
256 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
257 } else if (!UseSoftFloat) {
258 // We have an algorithm for SSE2->double, and we turn this into a
259 // 64-bit FILD followed by conditional FADD for other targets.
260 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
261 // We have an algorithm for SSE2, and we turn this into a 64-bit
262 // FILD for other targets.
263 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
266 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
268 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
269 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
272 // SSE has no i16 to fp conversion, only i32
273 if (X86ScalarSSEf32) {
274 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
275 // f32 and f64 cases are Legal, f80 case is not
276 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
278 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
279 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
282 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
283 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
286 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
287 // are Legal, f80 is custom lowered.
288 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
289 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
291 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
293 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
294 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
296 if (X86ScalarSSEf32) {
297 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
298 // f32 and f64 cases are Legal, f80 case is not
299 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
301 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
302 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
305 // Handle FP_TO_UINT by promoting the destination to a larger signed
307 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
308 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
309 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
311 if (Subtarget->is64Bit()) {
312 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
313 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
314 } else if (!UseSoftFloat) {
315 if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
316 // Expand FP_TO_UINT into a select.
317 // FIXME: We would like to use a Custom expander here eventually to do
318 // the optimal thing for SSE vs. the default expansion in the legalizer.
319 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
321 // With SSE3 we can use fisttpll to convert to a signed i64; without
322 // SSE, we're stuck with a fistpll.
323 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
326 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
327 if (!X86ScalarSSEf64) {
328 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
329 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
330 if (Subtarget->is64Bit()) {
331 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
332 // Without SSE, i64->f64 goes through memory.
333 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
337 // Scalar integer divide and remainder are lowered to use operations that
338 // produce two results, to match the available instructions. This exposes
339 // the two-result form to trivial CSE, which is able to combine x/y and x%y
340 // into a single instruction.
342 // Scalar integer multiply-high is also lowered to use two-result
343 // operations, to match the available instructions. However, plain multiply
344 // (low) operations are left as Legal, as there are single-result
345 // instructions for this in x86. Using the two-result multiply instructions
346 // when both high and low results are needed must be arranged by dagcombine.
347 for (unsigned i = 0, e = 4; i != e; ++i) {
349 setOperationAction(ISD::MULHS, VT, Expand);
350 setOperationAction(ISD::MULHU, VT, Expand);
351 setOperationAction(ISD::SDIV, VT, Expand);
352 setOperationAction(ISD::UDIV, VT, Expand);
353 setOperationAction(ISD::SREM, VT, Expand);
354 setOperationAction(ISD::UREM, VT, Expand);
356 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
357 setOperationAction(ISD::ADDC, VT, Custom);
358 setOperationAction(ISD::ADDE, VT, Custom);
359 setOperationAction(ISD::SUBC, VT, Custom);
360 setOperationAction(ISD::SUBE, VT, Custom);
363 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
364 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
365 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
366 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
367 if (Subtarget->is64Bit())
368 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
369 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
370 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
371 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
372 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
373 setOperationAction(ISD::FREM , MVT::f32 , Expand);
374 setOperationAction(ISD::FREM , MVT::f64 , Expand);
375 setOperationAction(ISD::FREM , MVT::f80 , Expand);
376 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
378 setOperationAction(ISD::CTTZ , MVT::i8 , Custom);
379 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
380 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
381 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
382 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
383 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
384 if (Subtarget->is64Bit()) {
385 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
386 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
389 if (Subtarget->hasPOPCNT()) {
390 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
392 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
393 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
394 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
395 if (Subtarget->is64Bit())
396 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
399 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
400 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
402 // These should be promoted to a larger select which is supported.
403 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
404 // X86 wants to expand cmov itself.
405 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
406 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
407 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
408 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
409 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
410 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
411 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
412 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
413 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
414 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
415 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
416 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
417 if (Subtarget->is64Bit()) {
418 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
419 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
421 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
424 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
425 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
426 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
427 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
428 if (Subtarget->is64Bit())
429 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
430 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
431 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
432 if (Subtarget->is64Bit()) {
433 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
434 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
435 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
436 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
437 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
439 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
440 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
441 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
442 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
443 if (Subtarget->is64Bit()) {
444 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
445 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
446 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
449 if (Subtarget->hasXMM())
450 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
452 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom);
453 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
455 // On X86 and X86-64, atomic operations are lowered to locked instructions.
456 // Locked instructions, in turn, have implicit fence semantics (all memory
457 // operations are flushed before issuing the locked instruction, and they
458 // are not buffered), so we can fold away the common pattern of
459 // fence-atomic-fence.
460 setShouldFoldAtomicFences(true);
462 // Expand certain atomics
463 for (unsigned i = 0, e = 4; i != e; ++i) {
465 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom);
466 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
469 if (!Subtarget->is64Bit()) {
470 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
471 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
472 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
473 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom);
474 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom);
475 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom);
476 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom);
479 // FIXME - use subtarget debug flags
480 if (!Subtarget->isTargetDarwin() &&
481 !Subtarget->isTargetELF() &&
482 !Subtarget->isTargetCygMing()) {
483 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
486 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
487 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
488 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
489 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
490 if (Subtarget->is64Bit()) {
491 setExceptionPointerRegister(X86::RAX);
492 setExceptionSelectorRegister(X86::RDX);
494 setExceptionPointerRegister(X86::EAX);
495 setExceptionSelectorRegister(X86::EDX);
497 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
498 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
500 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
502 setOperationAction(ISD::TRAP, MVT::Other, Legal);
504 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
505 setOperationAction(ISD::VASTART , MVT::Other, Custom);
506 setOperationAction(ISD::VAEND , MVT::Other, Expand);
507 if (Subtarget->is64Bit()) {
508 setOperationAction(ISD::VAARG , MVT::Other, Custom);
509 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
511 setOperationAction(ISD::VAARG , MVT::Other, Expand);
512 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
515 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
516 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
517 setOperationAction(ISD::DYNAMIC_STACKALLOC,
518 (Subtarget->is64Bit() ? MVT::i64 : MVT::i32),
519 (Subtarget->isTargetCOFF()
520 && !Subtarget->isTargetEnvMacho()
523 if (!UseSoftFloat && X86ScalarSSEf64) {
524 // f32 and f64 use SSE.
525 // Set up the FP register classes.
526 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
527 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
529 // Use ANDPD to simulate FABS.
530 setOperationAction(ISD::FABS , MVT::f64, Custom);
531 setOperationAction(ISD::FABS , MVT::f32, Custom);
533 // Use XORP to simulate FNEG.
534 setOperationAction(ISD::FNEG , MVT::f64, Custom);
535 setOperationAction(ISD::FNEG , MVT::f32, Custom);
537 // Use ANDPD and ORPD to simulate FCOPYSIGN.
538 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
539 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
541 // Lower this to FGETSIGNx86 plus an AND.
542 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
543 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
545 // We don't support sin/cos/fmod
546 setOperationAction(ISD::FSIN , MVT::f64, Expand);
547 setOperationAction(ISD::FCOS , MVT::f64, Expand);
548 setOperationAction(ISD::FSIN , MVT::f32, Expand);
549 setOperationAction(ISD::FCOS , MVT::f32, Expand);
551 // Expand FP immediates into loads from the stack, except for the special
553 addLegalFPImmediate(APFloat(+0.0)); // xorpd
554 addLegalFPImmediate(APFloat(+0.0f)); // xorps
555 } else if (!UseSoftFloat && X86ScalarSSEf32) {
556 // Use SSE for f32, x87 for f64.
557 // Set up the FP register classes.
558 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
559 addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
561 // Use ANDPS to simulate FABS.
562 setOperationAction(ISD::FABS , MVT::f32, Custom);
564 // Use XORP to simulate FNEG.
565 setOperationAction(ISD::FNEG , MVT::f32, Custom);
567 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
569 // Use ANDPS and ORPS to simulate FCOPYSIGN.
570 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
571 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
573 // We don't support sin/cos/fmod
574 setOperationAction(ISD::FSIN , MVT::f32, Expand);
575 setOperationAction(ISD::FCOS , MVT::f32, Expand);
577 // Special cases we handle for FP constants.
578 addLegalFPImmediate(APFloat(+0.0f)); // xorps
579 addLegalFPImmediate(APFloat(+0.0)); // FLD0
580 addLegalFPImmediate(APFloat(+1.0)); // FLD1
581 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
582 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
585 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
586 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
588 } else if (!UseSoftFloat) {
589 // f32 and f64 in x87.
590 // Set up the FP register classes.
591 addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
592 addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
594 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
595 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
596 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
597 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
600 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
601 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
603 addLegalFPImmediate(APFloat(+0.0)); // FLD0
604 addLegalFPImmediate(APFloat(+1.0)); // FLD1
605 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
606 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
607 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
608 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
609 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
610 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
613 // We don't support FMA.
614 setOperationAction(ISD::FMA, MVT::f64, Expand);
615 setOperationAction(ISD::FMA, MVT::f32, Expand);
617 // Long double always uses X87.
619 addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
620 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
621 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
623 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
624 addLegalFPImmediate(TmpFlt); // FLD0
626 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
629 APFloat TmpFlt2(+1.0);
630 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
632 addLegalFPImmediate(TmpFlt2); // FLD1
633 TmpFlt2.changeSign();
634 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
638 setOperationAction(ISD::FSIN , MVT::f80 , Expand);
639 setOperationAction(ISD::FCOS , MVT::f80 , Expand);
642 setOperationAction(ISD::FMA, MVT::f80, Expand);
645 // Always use a library call for pow.
646 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
647 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
648 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
650 setOperationAction(ISD::FLOG, MVT::f80, Expand);
651 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
652 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
653 setOperationAction(ISD::FEXP, MVT::f80, Expand);
654 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
656 // First set operation action for all vector types to either promote
657 // (for widening) or expand (for scalarization). Then we will selectively
658 // turn on ones that can be effectively codegen'd.
659 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
660 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
661 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand);
662 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand);
663 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand);
664 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand);
665 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand);
666 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand);
667 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand);
668 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand);
669 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand);
670 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand);
671 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand);
672 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand);
673 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand);
674 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand);
675 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand);
676 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand);
677 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand);
678 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand);
679 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand);
680 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand);
681 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand);
682 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand);
683 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand);
684 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand);
685 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
686 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
687 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
688 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand);
689 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand);
690 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand);
691 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand);
692 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand);
693 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand);
694 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand);
695 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand);
696 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand);
697 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand);
698 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand);
699 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
700 setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand);
701 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand);
702 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand);
703 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand);
704 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand);
705 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand);
706 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand);
707 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand);
708 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand);
709 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand);
710 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand);
711 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand);
712 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand);
713 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand);
714 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand);
715 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
716 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
717 setTruncStoreAction((MVT::SimpleValueType)VT,
718 (MVT::SimpleValueType)InnerVT, Expand);
719 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
720 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
721 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
724 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
725 // with -msoft-float, disable use of MMX as well.
726 if (!UseSoftFloat && Subtarget->hasMMX()) {
727 addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass);
728 // No operations on x86mmx supported, everything uses intrinsics.
731 // MMX-sized vectors (other than x86mmx) are expected to be expanded
732 // into smaller operations.
733 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
734 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
735 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
736 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
737 setOperationAction(ISD::AND, MVT::v8i8, Expand);
738 setOperationAction(ISD::AND, MVT::v4i16, Expand);
739 setOperationAction(ISD::AND, MVT::v2i32, Expand);
740 setOperationAction(ISD::AND, MVT::v1i64, Expand);
741 setOperationAction(ISD::OR, MVT::v8i8, Expand);
742 setOperationAction(ISD::OR, MVT::v4i16, Expand);
743 setOperationAction(ISD::OR, MVT::v2i32, Expand);
744 setOperationAction(ISD::OR, MVT::v1i64, Expand);
745 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
746 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
747 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
748 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
749 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
750 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
751 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
752 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
753 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
754 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
755 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
756 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
757 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
758 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
759 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
760 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
761 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
763 if (!UseSoftFloat && Subtarget->hasXMM()) {
764 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
766 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
767 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
768 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
769 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
770 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
771 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
772 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
773 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
774 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
775 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
776 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
777 setOperationAction(ISD::VSETCC, MVT::v4f32, Custom);
780 if (!UseSoftFloat && Subtarget->hasXMMInt()) {
781 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
783 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
784 // registers cannot be used even for integer operations.
785 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
786 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
787 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
788 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
790 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
791 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
792 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
793 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
794 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
795 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
796 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
797 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
798 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
799 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
800 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
801 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
802 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
803 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
804 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
805 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
807 setOperationAction(ISD::VSETCC, MVT::v2f64, Custom);
808 setOperationAction(ISD::VSETCC, MVT::v16i8, Custom);
809 setOperationAction(ISD::VSETCC, MVT::v8i16, Custom);
810 setOperationAction(ISD::VSETCC, MVT::v4i32, Custom);
812 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
813 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
814 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
815 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
816 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
818 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom);
819 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom);
820 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom);
821 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom);
822 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
824 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
825 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) {
826 EVT VT = (MVT::SimpleValueType)i;
827 // Do not attempt to custom lower non-power-of-2 vectors
828 if (!isPowerOf2_32(VT.getVectorNumElements()))
830 // Do not attempt to custom lower non-128-bit vectors
831 if (!VT.is128BitVector())
833 setOperationAction(ISD::BUILD_VECTOR,
834 VT.getSimpleVT().SimpleTy, Custom);
835 setOperationAction(ISD::VECTOR_SHUFFLE,
836 VT.getSimpleVT().SimpleTy, Custom);
837 setOperationAction(ISD::EXTRACT_VECTOR_ELT,
838 VT.getSimpleVT().SimpleTy, Custom);
841 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
842 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
843 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
844 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
845 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
846 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
848 if (Subtarget->is64Bit()) {
849 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
850 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
853 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
854 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) {
855 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
858 // Do not attempt to promote non-128-bit vectors
859 if (!VT.is128BitVector())
862 setOperationAction(ISD::AND, SVT, Promote);
863 AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
864 setOperationAction(ISD::OR, SVT, Promote);
865 AddPromotedToType (ISD::OR, SVT, MVT::v2i64);
866 setOperationAction(ISD::XOR, SVT, Promote);
867 AddPromotedToType (ISD::XOR, SVT, MVT::v2i64);
868 setOperationAction(ISD::LOAD, SVT, Promote);
869 AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64);
870 setOperationAction(ISD::SELECT, SVT, Promote);
871 AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64);
874 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
876 // Custom lower v2i64 and v2f64 selects.
877 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
878 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
879 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
880 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
882 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
883 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
886 if (Subtarget->hasSSE41()) {
887 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
888 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
889 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
890 setOperationAction(ISD::FRINT, MVT::f32, Legal);
891 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
892 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
893 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
894 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
895 setOperationAction(ISD::FRINT, MVT::f64, Legal);
896 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
898 // FIXME: Do we need to handle scalar-to-vector here?
899 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
901 // Can turn SHL into an integer multiply.
902 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
903 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
905 // i8 and i16 vectors are custom , because the source register and source
906 // source memory operand types are not the same width. f32 vectors are
907 // custom since the immediate controlling the insert encodes additional
909 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
910 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
911 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
912 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
914 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
915 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
916 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
917 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
919 if (Subtarget->is64Bit()) {
920 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal);
921 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
925 if (Subtarget->hasSSE2()) {
926 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
927 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
928 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
930 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
931 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
932 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
934 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
935 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
938 if (Subtarget->hasSSE42())
939 setOperationAction(ISD::VSETCC, MVT::v2i64, Custom);
941 if (!UseSoftFloat && Subtarget->hasAVX()) {
942 addRegisterClass(MVT::v32i8, X86::VR256RegisterClass);
943 addRegisterClass(MVT::v16i16, X86::VR256RegisterClass);
944 addRegisterClass(MVT::v8i32, X86::VR256RegisterClass);
945 addRegisterClass(MVT::v8f32, X86::VR256RegisterClass);
946 addRegisterClass(MVT::v4i64, X86::VR256RegisterClass);
947 addRegisterClass(MVT::v4f64, X86::VR256RegisterClass);
949 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
950 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
951 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
953 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
954 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
955 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
956 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
957 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
958 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
960 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
961 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
962 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
963 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
964 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
965 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
967 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
968 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
970 // Custom lower several nodes for 256-bit types.
971 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
972 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
973 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
976 // Extract subvector is special because the value type
977 // (result) is 128-bit but the source is 256-bit wide.
978 if (VT.is128BitVector())
979 setOperationAction(ISD::EXTRACT_SUBVECTOR, SVT, Custom);
981 // Do not attempt to custom lower other non-256-bit vectors
982 if (!VT.is256BitVector())
985 setOperationAction(ISD::BUILD_VECTOR, SVT, Custom);
986 setOperationAction(ISD::VECTOR_SHUFFLE, SVT, Custom);
987 setOperationAction(ISD::INSERT_VECTOR_ELT, SVT, Custom);
988 setOperationAction(ISD::EXTRACT_VECTOR_ELT, SVT, Custom);
989 setOperationAction(ISD::SCALAR_TO_VECTOR, SVT, Custom);
990 setOperationAction(ISD::INSERT_SUBVECTOR, SVT, Custom);
993 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
994 for (unsigned i = (unsigned)MVT::v32i8; i != (unsigned)MVT::v4i64; ++i) {
995 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
998 // Do not attempt to promote non-256-bit vectors
999 if (!VT.is256BitVector())
1002 setOperationAction(ISD::AND, SVT, Promote);
1003 AddPromotedToType (ISD::AND, SVT, MVT::v4i64);
1004 setOperationAction(ISD::OR, SVT, Promote);
1005 AddPromotedToType (ISD::OR, SVT, MVT::v4i64);
1006 setOperationAction(ISD::XOR, SVT, Promote);
1007 AddPromotedToType (ISD::XOR, SVT, MVT::v4i64);
1008 setOperationAction(ISD::LOAD, SVT, Promote);
1009 AddPromotedToType (ISD::LOAD, SVT, MVT::v4i64);
1010 setOperationAction(ISD::SELECT, SVT, Promote);
1011 AddPromotedToType (ISD::SELECT, SVT, MVT::v4i64);
1015 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1016 // of this type with custom code.
1017 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
1018 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT++) {
1019 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, Custom);
1022 // We want to custom lower some of our intrinsics.
1023 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1026 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1027 // handle type legalization for these operations here.
1029 // FIXME: We really should do custom legalization for addition and
1030 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1031 // than generic legalization for 64-bit multiplication-with-overflow, though.
1032 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1033 // Add/Sub/Mul with overflow operations are custom lowered.
1035 setOperationAction(ISD::SADDO, VT, Custom);
1036 setOperationAction(ISD::UADDO, VT, Custom);
1037 setOperationAction(ISD::SSUBO, VT, Custom);
1038 setOperationAction(ISD::USUBO, VT, Custom);
1039 setOperationAction(ISD::SMULO, VT, Custom);
1040 setOperationAction(ISD::UMULO, VT, Custom);
1043 // There are no 8-bit 3-address imul/mul instructions
1044 setOperationAction(ISD::SMULO, MVT::i8, Expand);
1045 setOperationAction(ISD::UMULO, MVT::i8, Expand);
1047 if (!Subtarget->is64Bit()) {
1048 // These libcalls are not available in 32-bit.
1049 setLibcallName(RTLIB::SHL_I128, 0);
1050 setLibcallName(RTLIB::SRL_I128, 0);
1051 setLibcallName(RTLIB::SRA_I128, 0);
1054 // We have target-specific dag combine patterns for the following nodes:
1055 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1056 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1057 setTargetDAGCombine(ISD::BUILD_VECTOR);
1058 setTargetDAGCombine(ISD::SELECT);
1059 setTargetDAGCombine(ISD::SHL);
1060 setTargetDAGCombine(ISD::SRA);
1061 setTargetDAGCombine(ISD::SRL);
1062 setTargetDAGCombine(ISD::OR);
1063 setTargetDAGCombine(ISD::AND);
1064 setTargetDAGCombine(ISD::ADD);
1065 setTargetDAGCombine(ISD::SUB);
1066 setTargetDAGCombine(ISD::STORE);
1067 setTargetDAGCombine(ISD::ZERO_EXTEND);
1068 setTargetDAGCombine(ISD::SINT_TO_FP);
1069 if (Subtarget->is64Bit())
1070 setTargetDAGCombine(ISD::MUL);
1072 computeRegisterProperties();
1074 // On Darwin, -Os means optimize for size without hurting performance,
1075 // do not reduce the limit.
1076 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1077 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1078 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1079 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1080 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1081 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1082 setPrefLoopAlignment(16);
1083 benefitFromCodePlacementOpt = true;
1085 setPrefFunctionAlignment(4);
1089 MVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const {
1094 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1095 /// the desired ByVal argument alignment.
1096 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1099 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1100 if (VTy->getBitWidth() == 128)
1102 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1103 unsigned EltAlign = 0;
1104 getMaxByValAlign(ATy->getElementType(), EltAlign);
1105 if (EltAlign > MaxAlign)
1106 MaxAlign = EltAlign;
1107 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1108 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1109 unsigned EltAlign = 0;
1110 getMaxByValAlign(STy->getElementType(i), EltAlign);
1111 if (EltAlign > MaxAlign)
1112 MaxAlign = EltAlign;
1120 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1121 /// function arguments in the caller parameter area. For X86, aggregates
1122 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1123 /// are at 4-byte boundaries.
1124 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1125 if (Subtarget->is64Bit()) {
1126 // Max of 8 and alignment of type.
1127 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1134 if (Subtarget->hasXMM())
1135 getMaxByValAlign(Ty, Align);
1139 /// getOptimalMemOpType - Returns the target specific optimal type for load
1140 /// and store operations as a result of memset, memcpy, and memmove
1141 /// lowering. If DstAlign is zero that means it's safe to destination
1142 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1143 /// means there isn't a need to check it against alignment requirement,
1144 /// probably because the source does not need to be loaded. If
1145 /// 'NonScalarIntSafe' is true, that means it's safe to return a
1146 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
1147 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
1148 /// constant so it does not need to be loaded.
1149 /// It returns EVT::Other if the type should be determined using generic
1150 /// target-independent logic.
1152 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1153 unsigned DstAlign, unsigned SrcAlign,
1154 bool NonScalarIntSafe,
1156 MachineFunction &MF) const {
1157 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like
1158 // linux. This is because the stack realignment code can't handle certain
1159 // cases like PR2962. This should be removed when PR2962 is fixed.
1160 const Function *F = MF.getFunction();
1161 if (NonScalarIntSafe &&
1162 !F->hasFnAttr(Attribute::NoImplicitFloat)) {
1164 (Subtarget->isUnalignedMemAccessFast() ||
1165 ((DstAlign == 0 || DstAlign >= 16) &&
1166 (SrcAlign == 0 || SrcAlign >= 16))) &&
1167 Subtarget->getStackAlignment() >= 16) {
1168 if (Subtarget->hasSSE2())
1170 if (Subtarget->hasSSE1())
1172 } else if (!MemcpyStrSrc && Size >= 8 &&
1173 !Subtarget->is64Bit() &&
1174 Subtarget->getStackAlignment() >= 8 &&
1175 Subtarget->hasXMMInt()) {
1176 // Do not use f64 to lower memcpy if source is string constant. It's
1177 // better to use i32 to avoid the loads.
1181 if (Subtarget->is64Bit() && Size >= 8)
1186 /// getJumpTableEncoding - Return the entry encoding for a jump table in the
1187 /// current function. The returned value is a member of the
1188 /// MachineJumpTableInfo::JTEntryKind enum.
1189 unsigned X86TargetLowering::getJumpTableEncoding() const {
1190 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1192 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1193 Subtarget->isPICStyleGOT())
1194 return MachineJumpTableInfo::EK_Custom32;
1196 // Otherwise, use the normal jump table encoding heuristics.
1197 return TargetLowering::getJumpTableEncoding();
1201 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1202 const MachineBasicBlock *MBB,
1203 unsigned uid,MCContext &Ctx) const{
1204 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1205 Subtarget->isPICStyleGOT());
1206 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1208 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1209 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1212 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
1214 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1215 SelectionDAG &DAG) const {
1216 if (!Subtarget->is64Bit())
1217 // This doesn't have DebugLoc associated with it, but is not really the
1218 // same as a Register.
1219 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy());
1223 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the
1224 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
1226 const MCExpr *X86TargetLowering::
1227 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1228 MCContext &Ctx) const {
1229 // X86-64 uses RIP relative addressing based on the jump table label.
1230 if (Subtarget->isPICStyleRIPRel())
1231 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1233 // Otherwise, the reference is relative to the PIC base.
1234 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1237 // FIXME: Why this routine is here? Move to RegInfo!
1238 std::pair<const TargetRegisterClass*, uint8_t>
1239 X86TargetLowering::findRepresentativeClass(EVT VT) const{
1240 const TargetRegisterClass *RRC = 0;
1242 switch (VT.getSimpleVT().SimpleTy) {
1244 return TargetLowering::findRepresentativeClass(VT);
1245 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1246 RRC = (Subtarget->is64Bit()
1247 ? X86::GR64RegisterClass : X86::GR32RegisterClass);
1250 RRC = X86::VR64RegisterClass;
1252 case MVT::f32: case MVT::f64:
1253 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1254 case MVT::v4f32: case MVT::v2f64:
1255 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1257 RRC = X86::VR128RegisterClass;
1260 return std::make_pair(RRC, Cost);
1263 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1264 unsigned &Offset) const {
1265 if (!Subtarget->isTargetLinux())
1268 if (Subtarget->is64Bit()) {
1269 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1271 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1284 //===----------------------------------------------------------------------===//
1285 // Return Value Calling Convention Implementation
1286 //===----------------------------------------------------------------------===//
1288 #include "X86GenCallingConv.inc"
1291 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
1292 MachineFunction &MF, bool isVarArg,
1293 const SmallVectorImpl<ISD::OutputArg> &Outs,
1294 LLVMContext &Context) const {
1295 SmallVector<CCValAssign, 16> RVLocs;
1296 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
1298 return CCInfo.CheckReturn(Outs, RetCC_X86);
1302 X86TargetLowering::LowerReturn(SDValue Chain,
1303 CallingConv::ID CallConv, bool isVarArg,
1304 const SmallVectorImpl<ISD::OutputArg> &Outs,
1305 const SmallVectorImpl<SDValue> &OutVals,
1306 DebugLoc dl, SelectionDAG &DAG) const {
1307 MachineFunction &MF = DAG.getMachineFunction();
1308 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1310 SmallVector<CCValAssign, 16> RVLocs;
1311 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
1312 RVLocs, *DAG.getContext());
1313 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
1315 // Add the regs to the liveout set for the function.
1316 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1317 for (unsigned i = 0; i != RVLocs.size(); ++i)
1318 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg()))
1319 MRI.addLiveOut(RVLocs[i].getLocReg());
1323 SmallVector<SDValue, 6> RetOps;
1324 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
1325 // Operand #1 = Bytes To Pop
1326 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
1329 // Copy the result values into the output registers.
1330 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1331 CCValAssign &VA = RVLocs[i];
1332 assert(VA.isRegLoc() && "Can only return in registers!");
1333 SDValue ValToCopy = OutVals[i];
1334 EVT ValVT = ValToCopy.getValueType();
1336 // If this is x86-64, and we disabled SSE, we can't return FP values,
1337 // or SSE or MMX vectors.
1338 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
1339 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
1340 (Subtarget->is64Bit() && !Subtarget->hasXMM())) {
1341 report_fatal_error("SSE register return with SSE disabled");
1343 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
1344 // llvm-gcc has never done it right and no one has noticed, so this
1345 // should be OK for now.
1346 if (ValVT == MVT::f64 &&
1347 (Subtarget->is64Bit() && !Subtarget->hasXMMInt()))
1348 report_fatal_error("SSE2 register return with SSE2 disabled");
1350 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
1351 // the RET instruction and handled by the FP Stackifier.
1352 if (VA.getLocReg() == X86::ST0 ||
1353 VA.getLocReg() == X86::ST1) {
1354 // If this is a copy from an xmm register to ST(0), use an FPExtend to
1355 // change the value to the FP stack register class.
1356 if (isScalarFPTypeInSSEReg(VA.getValVT()))
1357 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
1358 RetOps.push_back(ValToCopy);
1359 // Don't emit a copytoreg.
1363 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
1364 // which is returned in RAX / RDX.
1365 if (Subtarget->is64Bit()) {
1366 if (ValVT == MVT::x86mmx) {
1367 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
1368 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
1369 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
1371 // If we don't have SSE2 available, convert to v4f32 so the generated
1372 // register is legal.
1373 if (!Subtarget->hasSSE2())
1374 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
1379 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
1380 Flag = Chain.getValue(1);
1383 // The x86-64 ABI for returning structs by value requires that we copy
1384 // the sret argument into %rax for the return. We saved the argument into
1385 // a virtual register in the entry block, so now we copy the value out
1387 if (Subtarget->is64Bit() &&
1388 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
1389 MachineFunction &MF = DAG.getMachineFunction();
1390 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1391 unsigned Reg = FuncInfo->getSRetReturnReg();
1393 "SRetReturnReg should have been set in LowerFormalArguments().");
1394 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
1396 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag);
1397 Flag = Chain.getValue(1);
1399 // RAX now acts like a return value.
1400 MRI.addLiveOut(X86::RAX);
1403 RetOps[0] = Chain; // Update chain.
1405 // Add the flag if we have it.
1407 RetOps.push_back(Flag);
1409 return DAG.getNode(X86ISD::RET_FLAG, dl,
1410 MVT::Other, &RetOps[0], RetOps.size());
1413 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const {
1414 if (N->getNumValues() != 1)
1416 if (!N->hasNUsesOfValue(1, 0))
1419 SDNode *Copy = *N->use_begin();
1420 if (Copy->getOpcode() != ISD::CopyToReg &&
1421 Copy->getOpcode() != ISD::FP_EXTEND)
1424 bool HasRet = false;
1425 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
1427 if (UI->getOpcode() != X86ISD::RET_FLAG)
1436 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
1437 ISD::NodeType ExtendKind) const {
1439 // TODO: Is this also valid on 32-bit?
1440 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
1441 ReturnMVT = MVT::i8;
1443 ReturnMVT = MVT::i32;
1445 EVT MinVT = getRegisterType(Context, ReturnMVT);
1446 return VT.bitsLT(MinVT) ? MinVT : VT;
1449 /// LowerCallResult - Lower the result values of a call into the
1450 /// appropriate copies out of appropriate physical registers.
1453 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1454 CallingConv::ID CallConv, bool isVarArg,
1455 const SmallVectorImpl<ISD::InputArg> &Ins,
1456 DebugLoc dl, SelectionDAG &DAG,
1457 SmallVectorImpl<SDValue> &InVals) const {
1459 // Assign locations to each value returned by this call.
1460 SmallVector<CCValAssign, 16> RVLocs;
1461 bool Is64Bit = Subtarget->is64Bit();
1462 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1463 getTargetMachine(), RVLocs, *DAG.getContext());
1464 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
1466 // Copy all of the result registers out of their specified physreg.
1467 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1468 CCValAssign &VA = RVLocs[i];
1469 EVT CopyVT = VA.getValVT();
1471 // If this is x86-64, and we disabled SSE, we can't return FP values
1472 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
1473 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasXMM())) {
1474 report_fatal_error("SSE register return with SSE disabled");
1479 // If this is a call to a function that returns an fp value on the floating
1480 // point stack, we must guarantee the the value is popped from the stack, so
1481 // a CopyFromReg is not good enough - the copy instruction may be eliminated
1482 // if the return value is not used. We use the FpPOP_RETVAL instruction
1484 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) {
1485 // If we prefer to use the value in xmm registers, copy it out as f80 and
1486 // use a truncate to move it from fp stack reg to xmm reg.
1487 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80;
1488 SDValue Ops[] = { Chain, InFlag };
1489 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT,
1490 MVT::Other, MVT::Glue, Ops, 2), 1);
1491 Val = Chain.getValue(0);
1493 // Round the f80 to the right size, which also moves it to the appropriate
1495 if (CopyVT != VA.getValVT())
1496 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
1497 // This truncation won't change the value.
1498 DAG.getIntPtrConstant(1));
1500 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
1501 CopyVT, InFlag).getValue(1);
1502 Val = Chain.getValue(0);
1504 InFlag = Chain.getValue(2);
1505 InVals.push_back(Val);
1512 //===----------------------------------------------------------------------===//
1513 // C & StdCall & Fast Calling Convention implementation
1514 //===----------------------------------------------------------------------===//
1515 // StdCall calling convention seems to be standard for many Windows' API
1516 // routines and around. It differs from C calling convention just a little:
1517 // callee should clean up the stack, not caller. Symbols should be also
1518 // decorated in some fancy way :) It doesn't support any vector arguments.
1519 // For info on fast calling convention see Fast Calling Convention (tail call)
1520 // implementation LowerX86_32FastCCCallTo.
1522 /// CallIsStructReturn - Determines whether a call uses struct return
1524 static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
1528 return Outs[0].Flags.isSRet();
1531 /// ArgsAreStructReturn - Determines whether a function uses struct
1532 /// return semantics.
1534 ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
1538 return Ins[0].Flags.isSRet();
1541 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
1542 /// by "Src" to address "Dst" with size and alignment information specified by
1543 /// the specific parameter attribute. The copy will be passed as a byval
1544 /// function parameter.
1546 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
1547 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1549 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
1551 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
1552 /*isVolatile*/false, /*AlwaysInline=*/true,
1553 MachinePointerInfo(), MachinePointerInfo());
1556 /// IsTailCallConvention - Return true if the calling convention is one that
1557 /// supports tail call optimization.
1558 static bool IsTailCallConvention(CallingConv::ID CC) {
1559 return (CC == CallingConv::Fast || CC == CallingConv::GHC);
1562 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
1563 if (!CI->isTailCall())
1567 CallingConv::ID CalleeCC = CS.getCallingConv();
1568 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1574 /// FuncIsMadeTailCallSafe - Return true if the function is being made into
1575 /// a tailcall target by changing its ABI.
1576 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) {
1577 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
1581 X86TargetLowering::LowerMemArgument(SDValue Chain,
1582 CallingConv::ID CallConv,
1583 const SmallVectorImpl<ISD::InputArg> &Ins,
1584 DebugLoc dl, SelectionDAG &DAG,
1585 const CCValAssign &VA,
1586 MachineFrameInfo *MFI,
1588 // Create the nodes corresponding to a load from this parameter slot.
1589 ISD::ArgFlagsTy Flags = Ins[i].Flags;
1590 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv);
1591 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
1594 // If value is passed by pointer we have address passed instead of the value
1596 if (VA.getLocInfo() == CCValAssign::Indirect)
1597 ValVT = VA.getLocVT();
1599 ValVT = VA.getValVT();
1601 // FIXME: For now, all byval parameter objects are marked mutable. This can be
1602 // changed with more analysis.
1603 // In case of tail call optimization mark all arguments mutable. Since they
1604 // could be overwritten by lowering of arguments in case of a tail call.
1605 if (Flags.isByVal()) {
1606 unsigned Bytes = Flags.getByValSize();
1607 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
1608 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
1609 return DAG.getFrameIndex(FI, getPointerTy());
1611 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
1612 VA.getLocMemOffset(), isImmutable);
1613 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1614 return DAG.getLoad(ValVT, dl, Chain, FIN,
1615 MachinePointerInfo::getFixedStack(FI),
1621 X86TargetLowering::LowerFormalArguments(SDValue Chain,
1622 CallingConv::ID CallConv,
1624 const SmallVectorImpl<ISD::InputArg> &Ins,
1627 SmallVectorImpl<SDValue> &InVals)
1629 MachineFunction &MF = DAG.getMachineFunction();
1630 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1632 const Function* Fn = MF.getFunction();
1633 if (Fn->hasExternalLinkage() &&
1634 Subtarget->isTargetCygMing() &&
1635 Fn->getName() == "main")
1636 FuncInfo->setForceFramePointer(true);
1638 MachineFrameInfo *MFI = MF.getFrameInfo();
1639 bool Is64Bit = Subtarget->is64Bit();
1640 bool IsWin64 = Subtarget->isTargetWin64();
1642 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
1643 "Var args not supported with calling convention fastcc or ghc");
1645 // Assign locations to all of the incoming arguments.
1646 SmallVector<CCValAssign, 16> ArgLocs;
1647 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
1648 ArgLocs, *DAG.getContext());
1650 // Allocate shadow area for Win64
1652 CCInfo.AllocateStack(32, 8);
1655 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
1657 unsigned LastVal = ~0U;
1659 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1660 CCValAssign &VA = ArgLocs[i];
1661 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
1663 assert(VA.getValNo() != LastVal &&
1664 "Don't support value assigned to multiple locs yet");
1665 LastVal = VA.getValNo();
1667 if (VA.isRegLoc()) {
1668 EVT RegVT = VA.getLocVT();
1669 TargetRegisterClass *RC = NULL;
1670 if (RegVT == MVT::i32)
1671 RC = X86::GR32RegisterClass;
1672 else if (Is64Bit && RegVT == MVT::i64)
1673 RC = X86::GR64RegisterClass;
1674 else if (RegVT == MVT::f32)
1675 RC = X86::FR32RegisterClass;
1676 else if (RegVT == MVT::f64)
1677 RC = X86::FR64RegisterClass;
1678 else if (RegVT.isVector() && RegVT.getSizeInBits() == 256)
1679 RC = X86::VR256RegisterClass;
1680 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
1681 RC = X86::VR128RegisterClass;
1682 else if (RegVT == MVT::x86mmx)
1683 RC = X86::VR64RegisterClass;
1685 llvm_unreachable("Unknown argument type!");
1687 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1688 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1690 // If this is an 8 or 16-bit value, it is really passed promoted to 32
1691 // bits. Insert an assert[sz]ext to capture this, then truncate to the
1693 if (VA.getLocInfo() == CCValAssign::SExt)
1694 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1695 DAG.getValueType(VA.getValVT()));
1696 else if (VA.getLocInfo() == CCValAssign::ZExt)
1697 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1698 DAG.getValueType(VA.getValVT()));
1699 else if (VA.getLocInfo() == CCValAssign::BCvt)
1700 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1702 if (VA.isExtInLoc()) {
1703 // Handle MMX values passed in XMM regs.
1704 if (RegVT.isVector()) {
1705 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(),
1708 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1711 assert(VA.isMemLoc());
1712 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
1715 // If value is passed via pointer - do a load.
1716 if (VA.getLocInfo() == CCValAssign::Indirect)
1717 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
1718 MachinePointerInfo(), false, false, 0);
1720 InVals.push_back(ArgValue);
1723 // The x86-64 ABI for returning structs by value requires that we copy
1724 // the sret argument into %rax for the return. Save the argument into
1725 // a virtual register so that we can access it from the return points.
1726 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) {
1727 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1728 unsigned Reg = FuncInfo->getSRetReturnReg();
1730 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
1731 FuncInfo->setSRetReturnReg(Reg);
1733 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
1734 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
1737 unsigned StackSize = CCInfo.getNextStackOffset();
1738 // Align stack specially for tail calls.
1739 if (FuncIsMadeTailCallSafe(CallConv))
1740 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1742 // If the function takes variable number of arguments, make a frame index for
1743 // the start of the first vararg value... for expansion of llvm.va_start.
1745 if (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
1746 CallConv != CallingConv::X86_ThisCall)) {
1747 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true));
1750 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0;
1752 // FIXME: We should really autogenerate these arrays
1753 static const unsigned GPR64ArgRegsWin64[] = {
1754 X86::RCX, X86::RDX, X86::R8, X86::R9
1756 static const unsigned GPR64ArgRegs64Bit[] = {
1757 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1759 static const unsigned XMMArgRegs64Bit[] = {
1760 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1761 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1763 const unsigned *GPR64ArgRegs;
1764 unsigned NumXMMRegs = 0;
1767 // The XMM registers which might contain var arg parameters are shadowed
1768 // in their paired GPR. So we only need to save the GPR to their home
1770 TotalNumIntRegs = 4;
1771 GPR64ArgRegs = GPR64ArgRegsWin64;
1773 TotalNumIntRegs = 6; TotalNumXMMRegs = 8;
1774 GPR64ArgRegs = GPR64ArgRegs64Bit;
1776 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, TotalNumXMMRegs);
1778 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
1781 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat);
1782 assert(!(NumXMMRegs && !Subtarget->hasXMM()) &&
1783 "SSE register cannot be used when SSE is disabled!");
1784 assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) &&
1785 "SSE register cannot be used when SSE is disabled!");
1786 if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasXMM())
1787 // Kernel mode asks for SSE to be disabled, so don't push them
1789 TotalNumXMMRegs = 0;
1792 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering();
1793 // Get to the caller-allocated home save location. Add 8 to account
1794 // for the return address.
1795 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
1796 FuncInfo->setRegSaveFrameIndex(
1797 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
1798 // Fixup to set vararg frame on shadow area (4 x i64).
1800 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
1802 // For X86-64, if there are vararg parameters that are passed via
1803 // registers, then we must store them to their spots on the stack so they
1804 // may be loaded by deferencing the result of va_next.
1805 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
1806 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16);
1807 FuncInfo->setRegSaveFrameIndex(
1808 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16,
1812 // Store the integer parameter registers.
1813 SmallVector<SDValue, 8> MemOps;
1814 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
1816 unsigned Offset = FuncInfo->getVarArgsGPOffset();
1817 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
1818 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
1819 DAG.getIntPtrConstant(Offset));
1820 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
1821 X86::GR64RegisterClass);
1822 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
1824 DAG.getStore(Val.getValue(1), dl, Val, FIN,
1825 MachinePointerInfo::getFixedStack(
1826 FuncInfo->getRegSaveFrameIndex(), Offset),
1828 MemOps.push_back(Store);
1832 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) {
1833 // Now store the XMM (fp + vector) parameter registers.
1834 SmallVector<SDValue, 11> SaveXMMOps;
1835 SaveXMMOps.push_back(Chain);
1837 unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass);
1838 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8);
1839 SaveXMMOps.push_back(ALVal);
1841 SaveXMMOps.push_back(DAG.getIntPtrConstant(
1842 FuncInfo->getRegSaveFrameIndex()));
1843 SaveXMMOps.push_back(DAG.getIntPtrConstant(
1844 FuncInfo->getVarArgsFPOffset()));
1846 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
1847 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs],
1848 X86::VR128RegisterClass);
1849 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32);
1850 SaveXMMOps.push_back(Val);
1852 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
1854 &SaveXMMOps[0], SaveXMMOps.size()));
1857 if (!MemOps.empty())
1858 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1859 &MemOps[0], MemOps.size());
1863 // Some CCs need callee pop.
1864 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) {
1865 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
1867 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
1868 // If this is an sret function, the return should pop the hidden pointer.
1869 if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins))
1870 FuncInfo->setBytesToPopOnReturn(4);
1874 // RegSaveFrameIndex is X86-64 only.
1875 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
1876 if (CallConv == CallingConv::X86_FastCall ||
1877 CallConv == CallingConv::X86_ThisCall)
1878 // fastcc functions can't have varargs.
1879 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
1886 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
1887 SDValue StackPtr, SDValue Arg,
1888 DebugLoc dl, SelectionDAG &DAG,
1889 const CCValAssign &VA,
1890 ISD::ArgFlagsTy Flags) const {
1891 unsigned LocMemOffset = VA.getLocMemOffset();
1892 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
1893 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1894 if (Flags.isByVal())
1895 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
1897 return DAG.getStore(Chain, dl, Arg, PtrOff,
1898 MachinePointerInfo::getStack(LocMemOffset),
1902 /// EmitTailCallLoadRetAddr - Emit a load of return address if tail call
1903 /// optimization is performed and it is required.
1905 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
1906 SDValue &OutRetAddr, SDValue Chain,
1907 bool IsTailCall, bool Is64Bit,
1908 int FPDiff, DebugLoc dl) const {
1909 // Adjust the Return address stack slot.
1910 EVT VT = getPointerTy();
1911 OutRetAddr = getReturnAddressFrameIndex(DAG);
1913 // Load the "old" Return address.
1914 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
1916 return SDValue(OutRetAddr.getNode(), 1);
1919 /// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call
1920 /// optimization is performed and it is required (FPDiff!=0).
1922 EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
1923 SDValue Chain, SDValue RetAddrFrIdx,
1924 bool Is64Bit, int FPDiff, DebugLoc dl) {
1925 // Store the return address to the appropriate stack slot.
1926 if (!FPDiff) return Chain;
1927 // Calculate the new stack slot for the return address.
1928 int SlotSize = Is64Bit ? 8 : 4;
1929 int NewReturnAddrFI =
1930 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false);
1931 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
1932 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
1933 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
1934 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
1940 X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1941 CallingConv::ID CallConv, bool isVarArg,
1943 const SmallVectorImpl<ISD::OutputArg> &Outs,
1944 const SmallVectorImpl<SDValue> &OutVals,
1945 const SmallVectorImpl<ISD::InputArg> &Ins,
1946 DebugLoc dl, SelectionDAG &DAG,
1947 SmallVectorImpl<SDValue> &InVals) const {
1948 MachineFunction &MF = DAG.getMachineFunction();
1949 bool Is64Bit = Subtarget->is64Bit();
1950 bool IsWin64 = Subtarget->isTargetWin64();
1951 bool IsStructRet = CallIsStructReturn(Outs);
1952 bool IsSibcall = false;
1955 // Check if it's really possible to do a tail call.
1956 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1957 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1958 Outs, OutVals, Ins, DAG);
1960 // Sibcalls are automatically detected tailcalls which do not require
1962 if (!GuaranteedTailCallOpt && isTailCall)
1969 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
1970 "Var args not supported with calling convention fastcc or ghc");
1972 // Analyze operands of the call, assigning locations to each operand.
1973 SmallVector<CCValAssign, 16> ArgLocs;
1974 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
1975 ArgLocs, *DAG.getContext());
1977 // Allocate shadow area for Win64
1979 CCInfo.AllocateStack(32, 8);
1982 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
1984 // Get a count of how many bytes are to be pushed on the stack.
1985 unsigned NumBytes = CCInfo.getNextStackOffset();
1987 // This is a sibcall. The memory operands are available in caller's
1988 // own caller's stack.
1990 else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv))
1991 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
1994 if (isTailCall && !IsSibcall) {
1995 // Lower arguments at fp - stackoffset + fpdiff.
1996 unsigned NumBytesCallerPushed =
1997 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
1998 FPDiff = NumBytesCallerPushed - NumBytes;
2000 // Set the delta of movement of the returnaddr stackslot.
2001 // But only set if delta is greater than previous delta.
2002 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
2003 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
2007 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
2009 SDValue RetAddrFrIdx;
2010 // Load return address for tail calls.
2011 if (isTailCall && FPDiff)
2012 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2013 Is64Bit, FPDiff, dl);
2015 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2016 SmallVector<SDValue, 8> MemOpChains;
2019 // Walk the register/memloc assignments, inserting copies/loads. In the case
2020 // of tail call optimization arguments are handle later.
2021 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2022 CCValAssign &VA = ArgLocs[i];
2023 EVT RegVT = VA.getLocVT();
2024 SDValue Arg = OutVals[i];
2025 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2026 bool isByVal = Flags.isByVal();
2028 // Promote the value if needed.
2029 switch (VA.getLocInfo()) {
2030 default: llvm_unreachable("Unknown loc info!");
2031 case CCValAssign::Full: break;
2032 case CCValAssign::SExt:
2033 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2035 case CCValAssign::ZExt:
2036 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2038 case CCValAssign::AExt:
2039 if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
2040 // Special case: passing MMX values in XMM registers.
2041 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2042 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2043 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2045 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2047 case CCValAssign::BCvt:
2048 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2050 case CCValAssign::Indirect: {
2051 // Store the argument.
2052 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2053 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2054 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2055 MachinePointerInfo::getFixedStack(FI),
2062 if (VA.isRegLoc()) {
2063 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2064 if (isVarArg && IsWin64) {
2065 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2066 // shadow reg if callee is a varargs function.
2067 unsigned ShadowReg = 0;
2068 switch (VA.getLocReg()) {
2069 case X86::XMM0: ShadowReg = X86::RCX; break;
2070 case X86::XMM1: ShadowReg = X86::RDX; break;
2071 case X86::XMM2: ShadowReg = X86::R8; break;
2072 case X86::XMM3: ShadowReg = X86::R9; break;
2075 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2077 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2078 assert(VA.isMemLoc());
2079 if (StackPtr.getNode() == 0)
2080 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy());
2081 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2082 dl, DAG, VA, Flags));
2086 if (!MemOpChains.empty())
2087 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2088 &MemOpChains[0], MemOpChains.size());
2090 // Build a sequence of copy-to-reg nodes chained together with token chain
2091 // and flag operands which copy the outgoing args into registers.
2093 // Tail call byval lowering might overwrite argument registers so in case of
2094 // tail call optimization the copies to registers are lowered later.
2096 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2097 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2098 RegsToPass[i].second, InFlag);
2099 InFlag = Chain.getValue(1);
2102 if (Subtarget->isPICStyleGOT()) {
2103 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2106 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
2107 DAG.getNode(X86ISD::GlobalBaseReg,
2108 DebugLoc(), getPointerTy()),
2110 InFlag = Chain.getValue(1);
2112 // If we are tail calling and generating PIC/GOT style code load the
2113 // address of the callee into ECX. The value in ecx is used as target of
2114 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2115 // for tail calls on PIC/GOT architectures. Normally we would just put the
2116 // address of GOT into ebx and then call target@PLT. But for tail calls
2117 // ebx would be restored (since ebx is callee saved) before jumping to the
2120 // Note: The actual moving to ECX is done further down.
2121 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2122 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2123 !G->getGlobal()->hasProtectedVisibility())
2124 Callee = LowerGlobalAddress(Callee, DAG);
2125 else if (isa<ExternalSymbolSDNode>(Callee))
2126 Callee = LowerExternalSymbol(Callee, DAG);
2130 if (Is64Bit && isVarArg && !IsWin64) {
2131 // From AMD64 ABI document:
2132 // For calls that may call functions that use varargs or stdargs
2133 // (prototype-less calls or calls to functions containing ellipsis (...) in
2134 // the declaration) %al is used as hidden argument to specify the number
2135 // of SSE registers used. The contents of %al do not need to match exactly
2136 // the number of registers, but must be an ubound on the number of SSE
2137 // registers used and is in the range 0 - 8 inclusive.
2139 // Count the number of XMM registers allocated.
2140 static const unsigned XMMArgRegs[] = {
2141 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2142 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2144 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
2145 assert((Subtarget->hasXMM() || !NumXMMRegs)
2146 && "SSE registers cannot be used when SSE is disabled");
2148 Chain = DAG.getCopyToReg(Chain, dl, X86::AL,
2149 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
2150 InFlag = Chain.getValue(1);
2154 // For tail calls lower the arguments to the 'real' stack slot.
2156 // Force all the incoming stack arguments to be loaded from the stack
2157 // before any new outgoing arguments are stored to the stack, because the
2158 // outgoing stack slots may alias the incoming argument stack slots, and
2159 // the alias isn't otherwise explicit. This is slightly more conservative
2160 // than necessary, because it means that each store effectively depends
2161 // on every argument instead of just those arguments it would clobber.
2162 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
2164 SmallVector<SDValue, 8> MemOpChains2;
2167 // Do not flag preceding copytoreg stuff together with the following stuff.
2169 if (GuaranteedTailCallOpt) {
2170 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2171 CCValAssign &VA = ArgLocs[i];
2174 assert(VA.isMemLoc());
2175 SDValue Arg = OutVals[i];
2176 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2177 // Create frame index.
2178 int32_t Offset = VA.getLocMemOffset()+FPDiff;
2179 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
2180 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
2181 FIN = DAG.getFrameIndex(FI, getPointerTy());
2183 if (Flags.isByVal()) {
2184 // Copy relative to framepointer.
2185 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
2186 if (StackPtr.getNode() == 0)
2187 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr,
2189 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
2191 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
2195 // Store relative to framepointer.
2196 MemOpChains2.push_back(
2197 DAG.getStore(ArgChain, dl, Arg, FIN,
2198 MachinePointerInfo::getFixedStack(FI),
2204 if (!MemOpChains2.empty())
2205 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2206 &MemOpChains2[0], MemOpChains2.size());
2208 // Copy arguments to their registers.
2209 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2210 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2211 RegsToPass[i].second, InFlag);
2212 InFlag = Chain.getValue(1);
2216 // Store the return address to the appropriate stack slot.
2217 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit,
2221 if (getTargetMachine().getCodeModel() == CodeModel::Large) {
2222 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
2223 // In the 64-bit large code model, we have to make all calls
2224 // through a register, since the call instruction's 32-bit
2225 // pc-relative offset may not be large enough to hold the whole
2227 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2228 // If the callee is a GlobalAddress node (quite common, every direct call
2229 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
2232 // We should use extra load for direct calls to dllimported functions in
2234 const GlobalValue *GV = G->getGlobal();
2235 if (!GV->hasDLLImportLinkage()) {
2236 unsigned char OpFlags = 0;
2237 bool ExtraLoad = false;
2238 unsigned WrapperKind = ISD::DELETED_NODE;
2240 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
2241 // external symbols most go through the PLT in PIC mode. If the symbol
2242 // has hidden or protected visibility, or if it is static or local, then
2243 // we don't need to use the PLT - we can directly call it.
2244 if (Subtarget->isTargetELF() &&
2245 getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
2246 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
2247 OpFlags = X86II::MO_PLT;
2248 } else if (Subtarget->isPICStyleStubAny() &&
2249 (GV->isDeclaration() || GV->isWeakForLinker()) &&
2250 (!Subtarget->getTargetTriple().isMacOSX() ||
2251 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
2252 // PC-relative references to external symbols should go through $stub,
2253 // unless we're building with the leopard linker or later, which
2254 // automatically synthesizes these stubs.
2255 OpFlags = X86II::MO_DARWIN_STUB;
2256 } else if (Subtarget->isPICStyleRIPRel() &&
2257 isa<Function>(GV) &&
2258 cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) {
2259 // If the function is marked as non-lazy, generate an indirect call
2260 // which loads from the GOT directly. This avoids runtime overhead
2261 // at the cost of eager binding (and one extra byte of encoding).
2262 OpFlags = X86II::MO_GOTPCREL;
2263 WrapperKind = X86ISD::WrapperRIP;
2267 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
2268 G->getOffset(), OpFlags);
2270 // Add a wrapper if needed.
2271 if (WrapperKind != ISD::DELETED_NODE)
2272 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
2273 // Add extra indirection if needed.
2275 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
2276 MachinePointerInfo::getGOT(),
2279 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2280 unsigned char OpFlags = 0;
2282 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
2283 // external symbols should go through the PLT.
2284 if (Subtarget->isTargetELF() &&
2285 getTargetMachine().getRelocationModel() == Reloc::PIC_) {
2286 OpFlags = X86II::MO_PLT;
2287 } else if (Subtarget->isPICStyleStubAny() &&
2288 (!Subtarget->getTargetTriple().isMacOSX() ||
2289 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
2290 // PC-relative references to external symbols should go through $stub,
2291 // unless we're building with the leopard linker or later, which
2292 // automatically synthesizes these stubs.
2293 OpFlags = X86II::MO_DARWIN_STUB;
2296 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
2300 // Returns a chain & a flag for retval copy to use.
2301 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2302 SmallVector<SDValue, 8> Ops;
2304 if (!IsSibcall && isTailCall) {
2305 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
2306 DAG.getIntPtrConstant(0, true), InFlag);
2307 InFlag = Chain.getValue(1);
2310 Ops.push_back(Chain);
2311 Ops.push_back(Callee);
2314 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
2316 // Add argument registers to the end of the list so that they are known live
2318 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2319 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2320 RegsToPass[i].second.getValueType()));
2322 // Add an implicit use GOT pointer in EBX.
2323 if (!isTailCall && Subtarget->isPICStyleGOT())
2324 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
2326 // Add an implicit use of AL for non-Windows x86 64-bit vararg functions.
2327 if (Is64Bit && isVarArg && !IsWin64)
2328 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
2330 if (InFlag.getNode())
2331 Ops.push_back(InFlag);
2335 //// If this is the first return lowered for this function, add the regs
2336 //// to the liveout set for the function.
2337 // This isn't right, although it's probably harmless on x86; liveouts
2338 // should be computed from returns not tail calls. Consider a void
2339 // function making a tail call to a function returning int.
2340 return DAG.getNode(X86ISD::TC_RETURN, dl,
2341 NodeTys, &Ops[0], Ops.size());
2344 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
2345 InFlag = Chain.getValue(1);
2347 // Create the CALLSEQ_END node.
2348 unsigned NumBytesForCalleeToPush;
2349 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt))
2350 NumBytesForCalleeToPush = NumBytes; // Callee pops everything
2351 else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet)
2352 // If this is a call to a struct-return function, the callee
2353 // pops the hidden struct pointer, so we have to push it back.
2354 // This is common for Darwin/X86, Linux & Mingw32 targets.
2355 NumBytesForCalleeToPush = 4;
2357 NumBytesForCalleeToPush = 0; // Callee pops nothing.
2359 // Returns a flag for retval copy to use.
2361 Chain = DAG.getCALLSEQ_END(Chain,
2362 DAG.getIntPtrConstant(NumBytes, true),
2363 DAG.getIntPtrConstant(NumBytesForCalleeToPush,
2366 InFlag = Chain.getValue(1);
2369 // Handle result values, copying them out of physregs into vregs that we
2371 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
2372 Ins, dl, DAG, InVals);
2376 //===----------------------------------------------------------------------===//
2377 // Fast Calling Convention (tail call) implementation
2378 //===----------------------------------------------------------------------===//
2380 // Like std call, callee cleans arguments, convention except that ECX is
2381 // reserved for storing the tail called function address. Only 2 registers are
2382 // free for argument passing (inreg). Tail call optimization is performed
2384 // * tailcallopt is enabled
2385 // * caller/callee are fastcc
2386 // On X86_64 architecture with GOT-style position independent code only local
2387 // (within module) calls are supported at the moment.
2388 // To keep the stack aligned according to platform abi the function
2389 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
2390 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
2391 // If a tail called function callee has more arguments than the caller the
2392 // caller needs to make sure that there is room to move the RETADDR to. This is
2393 // achieved by reserving an area the size of the argument delta right after the
2394 // original REtADDR, but before the saved framepointer or the spilled registers
2395 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
2407 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
2408 /// for a 16 byte align requirement.
2410 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
2411 SelectionDAG& DAG) const {
2412 MachineFunction &MF = DAG.getMachineFunction();
2413 const TargetMachine &TM = MF.getTarget();
2414 const TargetFrameLowering &TFI = *TM.getFrameLowering();
2415 unsigned StackAlignment = TFI.getStackAlignment();
2416 uint64_t AlignMask = StackAlignment - 1;
2417 int64_t Offset = StackSize;
2418 uint64_t SlotSize = TD->getPointerSize();
2419 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
2420 // Number smaller than 12 so just add the difference.
2421 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
2423 // Mask out lower bits, add stackalignment once plus the 12 bytes.
2424 Offset = ((~AlignMask) & Offset) + StackAlignment +
2425 (StackAlignment-SlotSize);
2430 /// MatchingStackOffset - Return true if the given stack call argument is
2431 /// already available in the same position (relatively) of the caller's
2432 /// incoming argument stack.
2434 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2435 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
2436 const X86InstrInfo *TII) {
2437 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
2439 if (Arg.getOpcode() == ISD::CopyFromReg) {
2440 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2441 if (!TargetRegisterInfo::isVirtualRegister(VR))
2443 MachineInstr *Def = MRI->getVRegDef(VR);
2446 if (!Flags.isByVal()) {
2447 if (!TII->isLoadFromStackSlot(Def, FI))
2450 unsigned Opcode = Def->getOpcode();
2451 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) &&
2452 Def->getOperand(1).isFI()) {
2453 FI = Def->getOperand(1).getIndex();
2454 Bytes = Flags.getByValSize();
2458 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2459 if (Flags.isByVal())
2460 // ByVal argument is passed in as a pointer but it's now being
2461 // dereferenced. e.g.
2462 // define @foo(%struct.X* %A) {
2463 // tail call @bar(%struct.X* byval %A)
2466 SDValue Ptr = Ld->getBasePtr();
2467 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2470 FI = FINode->getIndex();
2471 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
2472 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
2473 FI = FINode->getIndex();
2474 Bytes = Flags.getByValSize();
2478 assert(FI != INT_MAX);
2479 if (!MFI->isFixedObjectIndex(FI))
2481 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
2484 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2485 /// for tail call optimization. Targets which want to do tail call
2486 /// optimization should implement this function.
2488 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
2489 CallingConv::ID CalleeCC,
2491 bool isCalleeStructRet,
2492 bool isCallerStructRet,
2493 const SmallVectorImpl<ISD::OutputArg> &Outs,
2494 const SmallVectorImpl<SDValue> &OutVals,
2495 const SmallVectorImpl<ISD::InputArg> &Ins,
2496 SelectionDAG& DAG) const {
2497 if (!IsTailCallConvention(CalleeCC) &&
2498 CalleeCC != CallingConv::C)
2501 // If -tailcallopt is specified, make fastcc functions tail-callable.
2502 const MachineFunction &MF = DAG.getMachineFunction();
2503 const Function *CallerF = DAG.getMachineFunction().getFunction();
2504 CallingConv::ID CallerCC = CallerF->getCallingConv();
2505 bool CCMatch = CallerCC == CalleeCC;
2507 if (GuaranteedTailCallOpt) {
2508 if (IsTailCallConvention(CalleeCC) && CCMatch)
2513 // Look for obvious safe cases to perform tail call optimization that do not
2514 // require ABI changes. This is what gcc calls sibcall.
2516 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
2517 // emit a special epilogue.
2518 if (RegInfo->needsStackRealignment(MF))
2521 // Also avoid sibcall optimization if either caller or callee uses struct
2522 // return semantics.
2523 if (isCalleeStructRet || isCallerStructRet)
2526 // An stdcall caller is expected to clean up its arguments; the callee
2527 // isn't going to do that.
2528 if (!CCMatch && CallerCC==CallingConv::X86_StdCall)
2531 // Do not sibcall optimize vararg calls unless all arguments are passed via
2533 if (isVarArg && !Outs.empty()) {
2535 // Optimizing for varargs on Win64 is unlikely to be safe without
2536 // additional testing.
2537 if (Subtarget->isTargetWin64())
2540 SmallVector<CCValAssign, 16> ArgLocs;
2541 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
2542 getTargetMachine(), ArgLocs, *DAG.getContext());
2544 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2545 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
2546 if (!ArgLocs[i].isRegLoc())
2550 // If the call result is in ST0 / ST1, it needs to be popped off the x87 stack.
2551 // Therefore if it's not used by the call it is not safe to optimize this into
2553 bool Unused = false;
2554 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
2561 SmallVector<CCValAssign, 16> RVLocs;
2562 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(),
2563 getTargetMachine(), RVLocs, *DAG.getContext());
2564 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2565 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2566 CCValAssign &VA = RVLocs[i];
2567 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1)
2572 // If the calling conventions do not match, then we'd better make sure the
2573 // results are returned in the same way as what the caller expects.
2575 SmallVector<CCValAssign, 16> RVLocs1;
2576 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
2577 getTargetMachine(), RVLocs1, *DAG.getContext());
2578 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
2580 SmallVector<CCValAssign, 16> RVLocs2;
2581 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
2582 getTargetMachine(), RVLocs2, *DAG.getContext());
2583 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
2585 if (RVLocs1.size() != RVLocs2.size())
2587 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
2588 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
2590 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
2592 if (RVLocs1[i].isRegLoc()) {
2593 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
2596 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
2602 // If the callee takes no arguments then go on to check the results of the
2604 if (!Outs.empty()) {
2605 // Check if stack adjustment is needed. For now, do not do this if any
2606 // argument is passed on the stack.
2607 SmallVector<CCValAssign, 16> ArgLocs;
2608 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
2609 getTargetMachine(), ArgLocs, *DAG.getContext());
2611 // Allocate shadow area for Win64
2612 if (Subtarget->isTargetWin64()) {
2613 CCInfo.AllocateStack(32, 8);
2616 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2617 if (CCInfo.getNextStackOffset()) {
2618 MachineFunction &MF = DAG.getMachineFunction();
2619 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
2622 // Check if the arguments are already laid out in the right way as
2623 // the caller's fixed stack objects.
2624 MachineFrameInfo *MFI = MF.getFrameInfo();
2625 const MachineRegisterInfo *MRI = &MF.getRegInfo();
2626 const X86InstrInfo *TII =
2627 ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
2628 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2629 CCValAssign &VA = ArgLocs[i];
2630 SDValue Arg = OutVals[i];
2631 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2632 if (VA.getLocInfo() == CCValAssign::Indirect)
2634 if (!VA.isRegLoc()) {
2635 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2642 // If the tailcall address may be in a register, then make sure it's
2643 // possible to register allocate for it. In 32-bit, the call address can
2644 // only target EAX, EDX, or ECX since the tail call must be scheduled after
2645 // callee-saved registers are restored. These happen to be the same
2646 // registers used to pass 'inreg' arguments so watch out for those.
2647 if (!Subtarget->is64Bit() &&
2648 !isa<GlobalAddressSDNode>(Callee) &&
2649 !isa<ExternalSymbolSDNode>(Callee)) {
2650 unsigned NumInRegs = 0;
2651 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2652 CCValAssign &VA = ArgLocs[i];
2655 unsigned Reg = VA.getLocReg();
2658 case X86::EAX: case X86::EDX: case X86::ECX:
2659 if (++NumInRegs == 3)
2671 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
2672 return X86::createFastISel(funcInfo);
2676 //===----------------------------------------------------------------------===//
2677 // Other Lowering Hooks
2678 //===----------------------------------------------------------------------===//
2680 static bool MayFoldLoad(SDValue Op) {
2681 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
2684 static bool MayFoldIntoStore(SDValue Op) {
2685 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
2688 static bool isTargetShuffle(unsigned Opcode) {
2690 default: return false;
2691 case X86ISD::PSHUFD:
2692 case X86ISD::PSHUFHW:
2693 case X86ISD::PSHUFLW:
2694 case X86ISD::SHUFPD:
2695 case X86ISD::PALIGN:
2696 case X86ISD::SHUFPS:
2697 case X86ISD::MOVLHPS:
2698 case X86ISD::MOVLHPD:
2699 case X86ISD::MOVHLPS:
2700 case X86ISD::MOVLPS:
2701 case X86ISD::MOVLPD:
2702 case X86ISD::MOVSHDUP:
2703 case X86ISD::MOVSLDUP:
2704 case X86ISD::MOVDDUP:
2707 case X86ISD::UNPCKLPS:
2708 case X86ISD::UNPCKLPD:
2709 case X86ISD::VUNPCKLPSY:
2710 case X86ISD::VUNPCKLPDY:
2711 case X86ISD::PUNPCKLWD:
2712 case X86ISD::PUNPCKLBW:
2713 case X86ISD::PUNPCKLDQ:
2714 case X86ISD::PUNPCKLQDQ:
2715 case X86ISD::UNPCKHPS:
2716 case X86ISD::UNPCKHPD:
2717 case X86ISD::VUNPCKHPSY:
2718 case X86ISD::VUNPCKHPDY:
2719 case X86ISD::PUNPCKHWD:
2720 case X86ISD::PUNPCKHBW:
2721 case X86ISD::PUNPCKHDQ:
2722 case X86ISD::PUNPCKHQDQ:
2723 case X86ISD::VPERMILPS:
2724 case X86ISD::VPERMILPSY:
2725 case X86ISD::VPERMILPD:
2726 case X86ISD::VPERMILPDY:
2732 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
2733 SDValue V1, SelectionDAG &DAG) {
2735 default: llvm_unreachable("Unknown x86 shuffle node");
2736 case X86ISD::MOVSHDUP:
2737 case X86ISD::MOVSLDUP:
2738 case X86ISD::MOVDDUP:
2739 return DAG.getNode(Opc, dl, VT, V1);
2745 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
2746 SDValue V1, unsigned TargetMask, SelectionDAG &DAG) {
2748 default: llvm_unreachable("Unknown x86 shuffle node");
2749 case X86ISD::PSHUFD:
2750 case X86ISD::PSHUFHW:
2751 case X86ISD::PSHUFLW:
2752 case X86ISD::VPERMILPS:
2753 case X86ISD::VPERMILPSY:
2754 case X86ISD::VPERMILPD:
2755 case X86ISD::VPERMILPDY:
2756 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
2762 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
2763 SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) {
2765 default: llvm_unreachable("Unknown x86 shuffle node");
2766 case X86ISD::PALIGN:
2767 case X86ISD::SHUFPD:
2768 case X86ISD::SHUFPS:
2769 return DAG.getNode(Opc, dl, VT, V1, V2,
2770 DAG.getConstant(TargetMask, MVT::i8));
2775 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
2776 SDValue V1, SDValue V2, SelectionDAG &DAG) {
2778 default: llvm_unreachable("Unknown x86 shuffle node");
2779 case X86ISD::MOVLHPS:
2780 case X86ISD::MOVLHPD:
2781 case X86ISD::MOVHLPS:
2782 case X86ISD::MOVLPS:
2783 case X86ISD::MOVLPD:
2786 case X86ISD::UNPCKLPS:
2787 case X86ISD::UNPCKLPD:
2788 case X86ISD::VUNPCKLPSY:
2789 case X86ISD::VUNPCKLPDY:
2790 case X86ISD::PUNPCKLWD:
2791 case X86ISD::PUNPCKLBW:
2792 case X86ISD::PUNPCKLDQ:
2793 case X86ISD::PUNPCKLQDQ:
2794 case X86ISD::UNPCKHPS:
2795 case X86ISD::UNPCKHPD:
2796 case X86ISD::VUNPCKHPSY:
2797 case X86ISD::VUNPCKHPDY:
2798 case X86ISD::PUNPCKHWD:
2799 case X86ISD::PUNPCKHBW:
2800 case X86ISD::PUNPCKHDQ:
2801 case X86ISD::PUNPCKHQDQ:
2802 return DAG.getNode(Opc, dl, VT, V1, V2);
2807 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
2808 MachineFunction &MF = DAG.getMachineFunction();
2809 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2810 int ReturnAddrIndex = FuncInfo->getRAIndex();
2812 if (ReturnAddrIndex == 0) {
2813 // Set up a frame object for the return address.
2814 uint64_t SlotSize = TD->getPointerSize();
2815 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
2817 FuncInfo->setRAIndex(ReturnAddrIndex);
2820 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
2824 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
2825 bool hasSymbolicDisplacement) {
2826 // Offset should fit into 32 bit immediate field.
2827 if (!isInt<32>(Offset))
2830 // If we don't have a symbolic displacement - we don't have any extra
2832 if (!hasSymbolicDisplacement)
2835 // FIXME: Some tweaks might be needed for medium code model.
2836 if (M != CodeModel::Small && M != CodeModel::Kernel)
2839 // For small code model we assume that latest object is 16MB before end of 31
2840 // bits boundary. We may also accept pretty large negative constants knowing
2841 // that all objects are in the positive half of address space.
2842 if (M == CodeModel::Small && Offset < 16*1024*1024)
2845 // For kernel code model we know that all object resist in the negative half
2846 // of 32bits address space. We may not accept negative offsets, since they may
2847 // be just off and we may accept pretty large positive ones.
2848 if (M == CodeModel::Kernel && Offset > 0)
2854 /// isCalleePop - Determines whether the callee is required to pop its
2855 /// own arguments. Callee pop is necessary to support tail calls.
2856 bool X86::isCalleePop(CallingConv::ID CallingConv,
2857 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
2861 switch (CallingConv) {
2864 case CallingConv::X86_StdCall:
2866 case CallingConv::X86_FastCall:
2868 case CallingConv::X86_ThisCall:
2870 case CallingConv::Fast:
2872 case CallingConv::GHC:
2877 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
2878 /// specific condition code, returning the condition code and the LHS/RHS of the
2879 /// comparison to make.
2880 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
2881 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
2883 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2884 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
2885 // X > -1 -> X == 0, jump !sign.
2886 RHS = DAG.getConstant(0, RHS.getValueType());
2887 return X86::COND_NS;
2888 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
2889 // X < 0 -> X == 0, jump on sign.
2891 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
2893 RHS = DAG.getConstant(0, RHS.getValueType());
2894 return X86::COND_LE;
2898 switch (SetCCOpcode) {
2899 default: llvm_unreachable("Invalid integer condition!");
2900 case ISD::SETEQ: return X86::COND_E;
2901 case ISD::SETGT: return X86::COND_G;
2902 case ISD::SETGE: return X86::COND_GE;
2903 case ISD::SETLT: return X86::COND_L;
2904 case ISD::SETLE: return X86::COND_LE;
2905 case ISD::SETNE: return X86::COND_NE;
2906 case ISD::SETULT: return X86::COND_B;
2907 case ISD::SETUGT: return X86::COND_A;
2908 case ISD::SETULE: return X86::COND_BE;
2909 case ISD::SETUGE: return X86::COND_AE;
2913 // First determine if it is required or is profitable to flip the operands.
2915 // If LHS is a foldable load, but RHS is not, flip the condition.
2916 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
2917 !ISD::isNON_EXTLoad(RHS.getNode())) {
2918 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
2919 std::swap(LHS, RHS);
2922 switch (SetCCOpcode) {
2928 std::swap(LHS, RHS);
2932 // On a floating point condition, the flags are set as follows:
2934 // 0 | 0 | 0 | X > Y
2935 // 0 | 0 | 1 | X < Y
2936 // 1 | 0 | 0 | X == Y
2937 // 1 | 1 | 1 | unordered
2938 switch (SetCCOpcode) {
2939 default: llvm_unreachable("Condcode should be pre-legalized away");
2941 case ISD::SETEQ: return X86::COND_E;
2942 case ISD::SETOLT: // flipped
2944 case ISD::SETGT: return X86::COND_A;
2945 case ISD::SETOLE: // flipped
2947 case ISD::SETGE: return X86::COND_AE;
2948 case ISD::SETUGT: // flipped
2950 case ISD::SETLT: return X86::COND_B;
2951 case ISD::SETUGE: // flipped
2953 case ISD::SETLE: return X86::COND_BE;
2955 case ISD::SETNE: return X86::COND_NE;
2956 case ISD::SETUO: return X86::COND_P;
2957 case ISD::SETO: return X86::COND_NP;
2959 case ISD::SETUNE: return X86::COND_INVALID;
2963 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
2964 /// code. Current x86 isa includes the following FP cmov instructions:
2965 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
2966 static bool hasFPCMov(unsigned X86CC) {
2982 /// isFPImmLegal - Returns true if the target can instruction select the
2983 /// specified FP immediate natively. If false, the legalizer will
2984 /// materialize the FP immediate as a load from a constant pool.
2985 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
2986 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
2987 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
2993 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
2994 /// the specified range (L, H].
2995 static bool isUndefOrInRange(int Val, int Low, int Hi) {
2996 return (Val < 0) || (Val >= Low && Val < Hi);
2999 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3000 /// specified value.
3001 static bool isUndefOrEqual(int Val, int CmpVal) {
3002 if (Val < 0 || Val == CmpVal)
3007 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3008 /// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference
3009 /// the second operand.
3010 static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) {
3011 if (VT == MVT::v4f32 || VT == MVT::v4i32 )
3012 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
3013 if (VT == MVT::v2f64 || VT == MVT::v2i64)
3014 return (Mask[0] < 2 && Mask[1] < 2);
3018 bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) {
3019 SmallVector<int, 8> M;
3021 return ::isPSHUFDMask(M, N->getValueType(0));
3024 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3025 /// is suitable for input to PSHUFHW.
3026 static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
3027 if (VT != MVT::v8i16)
3030 // Lower quadword copied in order or undef.
3031 for (int i = 0; i != 4; ++i)
3032 if (Mask[i] >= 0 && Mask[i] != i)
3035 // Upper quadword shuffled.
3036 for (int i = 4; i != 8; ++i)
3037 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7))
3043 bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) {
3044 SmallVector<int, 8> M;
3046 return ::isPSHUFHWMask(M, N->getValueType(0));
3049 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3050 /// is suitable for input to PSHUFLW.
3051 static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
3052 if (VT != MVT::v8i16)
3055 // Upper quadword copied in order.
3056 for (int i = 4; i != 8; ++i)
3057 if (Mask[i] >= 0 && Mask[i] != i)
3060 // Lower quadword shuffled.
3061 for (int i = 0; i != 4; ++i)
3068 bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) {
3069 SmallVector<int, 8> M;
3071 return ::isPSHUFLWMask(M, N->getValueType(0));
3074 /// isPALIGNRMask - Return true if the node specifies a shuffle of elements that
3075 /// is suitable for input to PALIGNR.
3076 static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT,
3078 int i, e = VT.getVectorNumElements();
3079 if (VT.getSizeInBits() != 128 && VT.getSizeInBits() != 64)
3082 // Do not handle v2i64 / v2f64 shuffles with palignr.
3083 if (e < 4 || !hasSSSE3)
3086 for (i = 0; i != e; ++i)
3090 // All undef, not a palignr.
3094 // Make sure we're shifting in the right direction.
3098 int s = Mask[i] - i;
3100 // Check the rest of the elements to see if they are consecutive.
3101 for (++i; i != e; ++i) {
3103 if (m >= 0 && m != s+i)
3109 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
3110 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
3111 static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
3112 int NumElems = VT.getVectorNumElements();
3113 if (NumElems != 2 && NumElems != 4)
3116 int Half = NumElems / 2;
3117 for (int i = 0; i < Half; ++i)
3118 if (!isUndefOrInRange(Mask[i], 0, NumElems))
3120 for (int i = Half; i < NumElems; ++i)
3121 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2))
3127 bool X86::isSHUFPMask(ShuffleVectorSDNode *N) {
3128 SmallVector<int, 8> M;
3130 return ::isSHUFPMask(M, N->getValueType(0));
3133 /// isCommutedSHUFP - Returns true if the shuffle mask is exactly
3134 /// the reverse of what x86 shuffles want. x86 shuffles requires the lower
3135 /// half elements to come from vector 1 (which would equal the dest.) and
3136 /// the upper half to come from vector 2.
3137 static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
3138 int NumElems = VT.getVectorNumElements();
3140 if (NumElems != 2 && NumElems != 4)
3143 int Half = NumElems / 2;
3144 for (int i = 0; i < Half; ++i)
3145 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2))
3147 for (int i = Half; i < NumElems; ++i)
3148 if (!isUndefOrInRange(Mask[i], 0, NumElems))
3153 static bool isCommutedSHUFP(ShuffleVectorSDNode *N) {
3154 SmallVector<int, 8> M;
3156 return isCommutedSHUFPMask(M, N->getValueType(0));
3159 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
3160 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
3161 bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) {
3162 if (N->getValueType(0).getVectorNumElements() != 4)
3165 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
3166 return isUndefOrEqual(N->getMaskElt(0), 6) &&
3167 isUndefOrEqual(N->getMaskElt(1), 7) &&
3168 isUndefOrEqual(N->getMaskElt(2), 2) &&
3169 isUndefOrEqual(N->getMaskElt(3), 3);
3172 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
3173 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
3175 bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) {
3176 unsigned NumElems = N->getValueType(0).getVectorNumElements();
3181 return isUndefOrEqual(N->getMaskElt(0), 2) &&
3182 isUndefOrEqual(N->getMaskElt(1), 3) &&
3183 isUndefOrEqual(N->getMaskElt(2), 2) &&
3184 isUndefOrEqual(N->getMaskElt(3), 3);
3187 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
3188 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
3189 bool X86::isMOVLPMask(ShuffleVectorSDNode *N) {
3190 unsigned NumElems = N->getValueType(0).getVectorNumElements();
3192 if (NumElems != 2 && NumElems != 4)
3195 for (unsigned i = 0; i < NumElems/2; ++i)
3196 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems))
3199 for (unsigned i = NumElems/2; i < NumElems; ++i)
3200 if (!isUndefOrEqual(N->getMaskElt(i), i))
3206 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
3207 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
3208 bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) {
3209 unsigned NumElems = N->getValueType(0).getVectorNumElements();
3211 if ((NumElems != 2 && NumElems != 4)
3212 || N->getValueType(0).getSizeInBits() > 128)
3215 for (unsigned i = 0; i < NumElems/2; ++i)
3216 if (!isUndefOrEqual(N->getMaskElt(i), i))
3219 for (unsigned i = 0; i < NumElems/2; ++i)
3220 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems))
3226 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
3227 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
3228 static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT,
3229 bool V2IsSplat = false) {
3230 int NumElts = VT.getVectorNumElements();
3232 assert((VT.is128BitVector() || VT.is256BitVector()) &&
3233 "Unsupported vector type for unpckh");
3235 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8)
3238 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
3239 // independently on 128-bit lanes.
3240 unsigned NumLanes = VT.getSizeInBits()/128;
3241 unsigned NumLaneElts = NumElts/NumLanes;
3244 unsigned End = NumLaneElts;
3245 for (unsigned s = 0; s < NumLanes; ++s) {
3246 for (unsigned i = Start, j = s * NumLaneElts;
3250 int BitI1 = Mask[i+1];
3251 if (!isUndefOrEqual(BitI, j))
3254 if (!isUndefOrEqual(BitI1, NumElts))
3257 if (!isUndefOrEqual(BitI1, j + NumElts))
3261 // Process the next 128 bits.
3262 Start += NumLaneElts;
3269 bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat) {
3270 SmallVector<int, 8> M;
3272 return ::isUNPCKLMask(M, N->getValueType(0), V2IsSplat);
3275 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
3276 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
3277 static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT,
3278 bool V2IsSplat = false) {
3279 int NumElts = VT.getVectorNumElements();
3281 assert((VT.is128BitVector() || VT.is256BitVector()) &&
3282 "Unsupported vector type for unpckh");
3284 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8)
3287 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
3288 // independently on 128-bit lanes.
3289 unsigned NumLanes = VT.getSizeInBits()/128;
3290 unsigned NumLaneElts = NumElts/NumLanes;
3293 unsigned End = NumLaneElts;
3294 for (unsigned l = 0; l != NumLanes; ++l) {
3295 for (unsigned i = Start, j = (l*NumLaneElts)+NumLaneElts/2;
3296 i != End; i += 2, ++j) {
3298 int BitI1 = Mask[i+1];
3299 if (!isUndefOrEqual(BitI, j))
3302 if (isUndefOrEqual(BitI1, NumElts))
3305 if (!isUndefOrEqual(BitI1, j+NumElts))
3309 // Process the next 128 bits.
3310 Start += NumLaneElts;
3316 bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat) {
3317 SmallVector<int, 8> M;
3319 return ::isUNPCKHMask(M, N->getValueType(0), V2IsSplat);
3322 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
3323 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
3325 static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
3326 int NumElems = VT.getVectorNumElements();
3327 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
3330 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
3331 // independently on 128-bit lanes.
3332 unsigned NumLanes = VT.getSizeInBits() / 128;
3333 unsigned NumLaneElts = NumElems / NumLanes;
3335 for (unsigned s = 0; s < NumLanes; ++s) {
3336 for (unsigned i = s * NumLaneElts, j = s * NumLaneElts;
3337 i != NumLaneElts * (s + 1);
3340 int BitI1 = Mask[i+1];
3342 if (!isUndefOrEqual(BitI, j))
3344 if (!isUndefOrEqual(BitI1, j))
3352 bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N) {
3353 SmallVector<int, 8> M;
3355 return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0));
3358 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
3359 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
3361 static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
3362 int NumElems = VT.getVectorNumElements();
3363 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
3366 for (int i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
3368 int BitI1 = Mask[i+1];
3369 if (!isUndefOrEqual(BitI, j))
3371 if (!isUndefOrEqual(BitI1, j))
3377 bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N) {
3378 SmallVector<int, 8> M;
3380 return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0));
3383 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
3384 /// specifies a shuffle of elements that is suitable for input to MOVSS,
3385 /// MOVSD, and MOVD, i.e. setting the lowest element.
3386 static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) {
3387 if (VT.getVectorElementType().getSizeInBits() < 32)
3390 int NumElts = VT.getVectorNumElements();
3392 if (!isUndefOrEqual(Mask[0], NumElts))
3395 for (int i = 1; i < NumElts; ++i)
3396 if (!isUndefOrEqual(Mask[i], i))
3402 bool X86::isMOVLMask(ShuffleVectorSDNode *N) {
3403 SmallVector<int, 8> M;
3405 return ::isMOVLMask(M, N->getValueType(0));
3408 /// isVPERMILPDMask - Return true if the specified VECTOR_SHUFFLE operand
3409 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
3410 /// Note that VPERMIL mask matching is different depending whether theunderlying
3411 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
3412 /// to the same elements of the low, but to the higher half of the source.
3413 /// In VPERMILPD the two lanes could be shuffled independently of each other
3414 /// with the same restriction that lanes can't be crossed.
3415 static bool isVPERMILPDMask(const SmallVectorImpl<int> &Mask, EVT VT,
3416 const X86Subtarget *Subtarget) {
3417 int NumElts = VT.getVectorNumElements();
3418 int NumLanes = VT.getSizeInBits()/128;
3420 if (!Subtarget->hasAVX())
3423 // Match any permutation of 128-bit vector with 64-bit types
3424 if (NumLanes == 1 && NumElts != 2)
3427 // Only match 256-bit with 32 types
3428 if (VT.getSizeInBits() == 256 && NumElts != 4)
3431 // The mask on the high lane is independent of the low. Both can match
3432 // any element in inside its own lane, but can't cross.
3433 int LaneSize = NumElts/NumLanes;
3434 for (int l = 0; l < NumLanes; ++l)
3435 for (int i = l*LaneSize; i < LaneSize*(l+1); ++i) {
3436 int LaneStart = l*LaneSize;
3437 if (!isUndefOrInRange(Mask[i], LaneStart, LaneStart+LaneSize))
3444 /// isVPERMILPSMask - Return true if the specified VECTOR_SHUFFLE operand
3445 /// specifies a shuffle of elements that is suitable for input to VPERMILPS*.
3446 /// Note that VPERMIL mask matching is different depending whether theunderlying
3447 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
3448 /// to the same elements of the low, but to the higher half of the source.
3449 /// In VPERMILPD the two lanes could be shuffled independently of each other
3450 /// with the same restriction that lanes can't be crossed.
3451 static bool isVPERMILPSMask(const SmallVectorImpl<int> &Mask, EVT VT,
3452 const X86Subtarget *Subtarget) {
3453 unsigned NumElts = VT.getVectorNumElements();
3454 unsigned NumLanes = VT.getSizeInBits()/128;
3456 if (!Subtarget->hasAVX())
3459 // Match any permutation of 128-bit vector with 32-bit types
3460 if (NumLanes == 1 && NumElts != 4)
3463 // Only match 256-bit with 32 types
3464 if (VT.getSizeInBits() == 256 && NumElts != 8)
3467 // The mask on the high lane should be the same as the low. Actually,
3468 // they can differ if any of the corresponding index in a lane is undef.
3469 int LaneSize = NumElts/NumLanes;
3470 for (int i = 0; i < LaneSize; ++i) {
3471 int HighElt = i+LaneSize;
3472 if (Mask[i] < 0 || Mask[HighElt] < 0)
3474 if (Mask[HighElt]-Mask[i] != LaneSize)
3481 /// getShuffleVPERMILPSImmediate - Return the appropriate immediate to shuffle
3482 /// the specified VECTOR_MASK mask with VPERMILPS* instructions.
3483 static unsigned getShuffleVPERMILPSImmediate(SDNode *N) {
3484 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
3485 EVT VT = SVOp->getValueType(0);
3487 int NumElts = VT.getVectorNumElements();
3488 int NumLanes = VT.getSizeInBits()/128;
3491 for (int i = 0; i < NumElts/NumLanes /* lane size */; ++i)
3492 Mask |= SVOp->getMaskElt(i) << (i*2);
3497 /// getShuffleVPERMILPDImmediate - Return the appropriate immediate to shuffle
3498 /// the specified VECTOR_MASK mask with VPERMILPD* instructions.
3499 static unsigned getShuffleVPERMILPDImmediate(SDNode *N) {
3500 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
3501 EVT VT = SVOp->getValueType(0);
3503 int NumElts = VT.getVectorNumElements();
3504 int NumLanes = VT.getSizeInBits()/128;
3507 int LaneSize = NumElts/NumLanes;
3508 for (int l = 0; l < NumLanes; ++l)
3509 for (int i = l*LaneSize; i < LaneSize*(l+1); ++i)
3510 Mask |= (SVOp->getMaskElt(i)-l*LaneSize) << i;
3515 /// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
3516 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
3517 /// element of vector 2 and the other elements to come from vector 1 in order.
3518 static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT,
3519 bool V2IsSplat = false, bool V2IsUndef = false) {
3520 int NumOps = VT.getVectorNumElements();
3521 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
3524 if (!isUndefOrEqual(Mask[0], 0))
3527 for (int i = 1; i < NumOps; ++i)
3528 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
3529 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
3530 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
3536 static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false,
3537 bool V2IsUndef = false) {
3538 SmallVector<int, 8> M;
3540 return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef);
3543 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
3544 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
3545 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
3546 bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N,
3547 const X86Subtarget *Subtarget) {
3548 if (!Subtarget->hasSSE3() && !Subtarget->hasAVX())
3551 // The second vector must be undef
3552 if (N->getOperand(1).getOpcode() != ISD::UNDEF)
3555 EVT VT = N->getValueType(0);
3556 unsigned NumElems = VT.getVectorNumElements();
3558 if ((VT.getSizeInBits() == 128 && NumElems != 4) ||
3559 (VT.getSizeInBits() == 256 && NumElems != 8))
3562 // "i+1" is the value the indexed mask element must have
3563 for (unsigned i = 0; i < NumElems; i += 2)
3564 if (!isUndefOrEqual(N->getMaskElt(i), i+1) ||
3565 !isUndefOrEqual(N->getMaskElt(i+1), i+1))
3571 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
3572 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
3573 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
3574 bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N,
3575 const X86Subtarget *Subtarget) {
3576 if (!Subtarget->hasSSE3() && !Subtarget->hasAVX())
3579 // The second vector must be undef
3580 if (N->getOperand(1).getOpcode() != ISD::UNDEF)
3583 EVT VT = N->getValueType(0);
3584 unsigned NumElems = VT.getVectorNumElements();
3586 if ((VT.getSizeInBits() == 128 && NumElems != 4) ||
3587 (VT.getSizeInBits() == 256 && NumElems != 8))
3590 // "i" is the value the indexed mask element must have
3591 for (unsigned i = 0; i < NumElems; i += 2)
3592 if (!isUndefOrEqual(N->getMaskElt(i), i) ||
3593 !isUndefOrEqual(N->getMaskElt(i+1), i))
3599 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
3600 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
3601 bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) {
3602 int e = N->getValueType(0).getVectorNumElements() / 2;
3604 for (int i = 0; i < e; ++i)
3605 if (!isUndefOrEqual(N->getMaskElt(i), i))
3607 for (int i = 0; i < e; ++i)
3608 if (!isUndefOrEqual(N->getMaskElt(e+i), i))
3613 /// isVEXTRACTF128Index - Return true if the specified
3614 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
3615 /// suitable for input to VEXTRACTF128.
3616 bool X86::isVEXTRACTF128Index(SDNode *N) {
3617 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
3620 // The index should be aligned on a 128-bit boundary.
3622 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
3624 unsigned VL = N->getValueType(0).getVectorNumElements();
3625 unsigned VBits = N->getValueType(0).getSizeInBits();
3626 unsigned ElSize = VBits / VL;
3627 bool Result = (Index * ElSize) % 128 == 0;
3632 /// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR
3633 /// operand specifies a subvector insert that is suitable for input to
3635 bool X86::isVINSERTF128Index(SDNode *N) {
3636 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
3639 // The index should be aligned on a 128-bit boundary.
3641 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
3643 unsigned VL = N->getValueType(0).getVectorNumElements();
3644 unsigned VBits = N->getValueType(0).getSizeInBits();
3645 unsigned ElSize = VBits / VL;
3646 bool Result = (Index * ElSize) % 128 == 0;
3651 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
3652 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
3653 unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
3654 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
3655 int NumOperands = SVOp->getValueType(0).getVectorNumElements();
3657 unsigned Shift = (NumOperands == 4) ? 2 : 1;
3659 for (int i = 0; i < NumOperands; ++i) {
3660 int Val = SVOp->getMaskElt(NumOperands-i-1);
3661 if (Val < 0) Val = 0;
3662 if (Val >= NumOperands) Val -= NumOperands;
3664 if (i != NumOperands - 1)
3670 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
3671 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
3672 unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
3673 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
3675 // 8 nodes, but we only care about the last 4.
3676 for (unsigned i = 7; i >= 4; --i) {
3677 int Val = SVOp->getMaskElt(i);
3686 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
3687 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
3688 unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
3689 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
3691 // 8 nodes, but we only care about the first 4.
3692 for (int i = 3; i >= 0; --i) {
3693 int Val = SVOp->getMaskElt(i);
3702 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
3703 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
3704 unsigned X86::getShufflePALIGNRImmediate(SDNode *N) {
3705 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
3706 EVT VVT = N->getValueType(0);
3707 unsigned EltSize = VVT.getVectorElementType().getSizeInBits() >> 3;
3711 for (i = 0, e = VVT.getVectorNumElements(); i != e; ++i) {
3712 Val = SVOp->getMaskElt(i);
3716 assert(Val - i > 0 && "PALIGNR imm should be positive");
3717 return (Val - i) * EltSize;
3720 /// getExtractVEXTRACTF128Immediate - Return the appropriate immediate
3721 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
3723 unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) {
3724 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
3725 llvm_unreachable("Illegal extract subvector for VEXTRACTF128");
3728 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
3730 EVT VecVT = N->getOperand(0).getValueType();
3731 EVT ElVT = VecVT.getVectorElementType();
3733 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits();
3734 return Index / NumElemsPerChunk;
3737 /// getInsertVINSERTF128Immediate - Return the appropriate immediate
3738 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
3740 unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) {
3741 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
3742 llvm_unreachable("Illegal insert subvector for VINSERTF128");
3745 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
3747 EVT VecVT = N->getValueType(0);
3748 EVT ElVT = VecVT.getVectorElementType();
3750 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits();
3751 return Index / NumElemsPerChunk;
3754 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
3756 bool X86::isZeroNode(SDValue Elt) {
3757 return ((isa<ConstantSDNode>(Elt) &&
3758 cast<ConstantSDNode>(Elt)->isNullValue()) ||
3759 (isa<ConstantFPSDNode>(Elt) &&
3760 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
3763 /// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in
3764 /// their permute mask.
3765 static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
3766 SelectionDAG &DAG) {
3767 EVT VT = SVOp->getValueType(0);
3768 unsigned NumElems = VT.getVectorNumElements();
3769 SmallVector<int, 8> MaskVec;
3771 for (unsigned i = 0; i != NumElems; ++i) {
3772 int idx = SVOp->getMaskElt(i);
3774 MaskVec.push_back(idx);
3775 else if (idx < (int)NumElems)
3776 MaskVec.push_back(idx + NumElems);
3778 MaskVec.push_back(idx - NumElems);
3780 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1),
3781 SVOp->getOperand(0), &MaskVec[0]);
3784 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
3785 /// the two vector operands have swapped position.
3786 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) {
3787 unsigned NumElems = VT.getVectorNumElements();
3788 for (unsigned i = 0; i != NumElems; ++i) {
3792 else if (idx < (int)NumElems)
3793 Mask[i] = idx + NumElems;
3795 Mask[i] = idx - NumElems;
3799 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
3800 /// match movhlps. The lower half elements should come from upper half of
3801 /// V1 (and in order), and the upper half elements should come from the upper
3802 /// half of V2 (and in order).
3803 static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) {
3804 if (Op->getValueType(0).getVectorNumElements() != 4)
3806 for (unsigned i = 0, e = 2; i != e; ++i)
3807 if (!isUndefOrEqual(Op->getMaskElt(i), i+2))
3809 for (unsigned i = 2; i != 4; ++i)
3810 if (!isUndefOrEqual(Op->getMaskElt(i), i+4))
3815 /// isScalarLoadToVector - Returns true if the node is a scalar load that
3816 /// is promoted to a vector. It also returns the LoadSDNode by reference if
3818 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) {
3819 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
3821 N = N->getOperand(0).getNode();
3822 if (!ISD::isNON_EXTLoad(N))
3825 *LD = cast<LoadSDNode>(N);
3829 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
3830 /// match movlp{s|d}. The lower half elements should come from lower half of
3831 /// V1 (and in order), and the upper half elements should come from the upper
3832 /// half of V2 (and in order). And since V1 will become the source of the
3833 /// MOVLP, it must be either a vector load or a scalar load to vector.
3834 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
3835 ShuffleVectorSDNode *Op) {
3836 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
3838 // Is V2 is a vector load, don't do this transformation. We will try to use
3839 // load folding shufps op.
3840 if (ISD::isNON_EXTLoad(V2))
3843 unsigned NumElems = Op->getValueType(0).getVectorNumElements();
3845 if (NumElems != 2 && NumElems != 4)
3847 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
3848 if (!isUndefOrEqual(Op->getMaskElt(i), i))
3850 for (unsigned i = NumElems/2; i != NumElems; ++i)
3851 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems))
3856 /// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
3858 static bool isSplatVector(SDNode *N) {
3859 if (N->getOpcode() != ISD::BUILD_VECTOR)
3862 SDValue SplatValue = N->getOperand(0);
3863 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
3864 if (N->getOperand(i) != SplatValue)
3869 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
3870 /// to an zero vector.
3871 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
3872 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
3873 SDValue V1 = N->getOperand(0);
3874 SDValue V2 = N->getOperand(1);
3875 unsigned NumElems = N->getValueType(0).getVectorNumElements();
3876 for (unsigned i = 0; i != NumElems; ++i) {
3877 int Idx = N->getMaskElt(i);
3878 if (Idx >= (int)NumElems) {
3879 unsigned Opc = V2.getOpcode();
3880 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
3882 if (Opc != ISD::BUILD_VECTOR ||
3883 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
3885 } else if (Idx >= 0) {
3886 unsigned Opc = V1.getOpcode();
3887 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
3889 if (Opc != ISD::BUILD_VECTOR ||
3890 !X86::isZeroNode(V1.getOperand(Idx)))
3897 /// getZeroVector - Returns a vector of specified type with all zero elements.
3899 static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
3901 assert(VT.isVector() && "Expected a vector type");
3903 // Always build SSE zero vectors as <4 x i32> bitcasted
3904 // to their dest type. This ensures they get CSE'd.
3906 if (VT.getSizeInBits() == 128) { // SSE
3907 if (HasSSE2) { // SSE2
3908 SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
3909 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
3911 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
3912 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
3914 } else if (VT.getSizeInBits() == 256) { // AVX
3915 // 256-bit logic and arithmetic instructions in AVX are
3916 // all floating-point, no support for integer ops. Default
3917 // to emitting fp zeroed vectors then.
3918 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
3919 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
3920 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
3922 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
3925 /// getOnesVector - Returns a vector of specified type with all bits set.
3926 /// Always build ones vectors as <4 x i32>. For 256-bit types, use two
3927 /// <4 x i32> inserted in a <8 x i32> appropriately. Then bitcast to their
3928 /// original type, ensuring they get CSE'd.
3929 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
3930 assert(VT.isVector() && "Expected a vector type");
3931 assert((VT.is128BitVector() || VT.is256BitVector())
3932 && "Expected a 128-bit or 256-bit vector type");
3934 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
3935 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
3936 Cst, Cst, Cst, Cst);
3938 if (VT.is256BitVector()) {
3939 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32),
3940 Vec, DAG.getConstant(0, MVT::i32), DAG, dl);
3941 Vec = Insert128BitVector(InsV, Vec,
3942 DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl);
3945 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
3948 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
3949 /// that point to V2 points to its first element.
3950 static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
3951 EVT VT = SVOp->getValueType(0);
3952 unsigned NumElems = VT.getVectorNumElements();
3954 bool Changed = false;
3955 SmallVector<int, 8> MaskVec;
3956 SVOp->getMask(MaskVec);
3958 for (unsigned i = 0; i != NumElems; ++i) {
3959 if (MaskVec[i] > (int)NumElems) {
3960 MaskVec[i] = NumElems;
3965 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0),
3966 SVOp->getOperand(1), &MaskVec[0]);
3967 return SDValue(SVOp, 0);
3970 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
3971 /// operation of specified width.
3972 static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
3974 unsigned NumElems = VT.getVectorNumElements();
3975 SmallVector<int, 8> Mask;
3976 Mask.push_back(NumElems);
3977 for (unsigned i = 1; i != NumElems; ++i)
3979 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
3982 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
3983 static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
3985 unsigned NumElems = VT.getVectorNumElements();
3986 SmallVector<int, 8> Mask;
3987 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
3989 Mask.push_back(i + NumElems);
3991 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
3994 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
3995 static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
3997 unsigned NumElems = VT.getVectorNumElements();
3998 unsigned Half = NumElems/2;
3999 SmallVector<int, 8> Mask;
4000 for (unsigned i = 0; i != Half; ++i) {
4001 Mask.push_back(i + Half);
4002 Mask.push_back(i + NumElems + Half);
4004 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
4007 // PromoteSplatv8v16 - All i16 and i8 vector types can't be used directly by
4008 // a generic shuffle instruction because the target has no such instructions.
4009 // Generate shuffles which repeat i16 and i8 several times until they can be
4010 // represented by v4f32 and then be manipulated by target suported shuffles.
4011 static SDValue PromoteSplatv8v16(SDValue V, SelectionDAG &DAG, int &EltNo) {
4012 EVT VT = V.getValueType();
4013 int NumElems = VT.getVectorNumElements();
4014 DebugLoc dl = V.getDebugLoc();
4016 while (NumElems > 4) {
4017 if (EltNo < NumElems/2) {
4018 V = getUnpackl(DAG, dl, VT, V, V);
4020 V = getUnpackh(DAG, dl, VT, V, V);
4021 EltNo -= NumElems/2;
4028 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
4029 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
4030 EVT VT = V.getValueType();
4031 DebugLoc dl = V.getDebugLoc();
4032 assert((VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256)
4033 && "Vector size not supported");
4035 bool Is128 = VT.getSizeInBits() == 128;
4036 EVT NVT = Is128 ? MVT::v4f32 : MVT::v8f32;
4037 V = DAG.getNode(ISD::BITCAST, dl, NVT, V);
4040 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
4041 V = DAG.getVectorShuffle(NVT, dl, V, DAG.getUNDEF(NVT), &SplatMask[0]);
4043 // The second half of indicies refer to the higher part, which is a
4044 // duplication of the lower one. This makes this shuffle a perfect match
4045 // for the VPERM instruction.
4046 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
4047 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
4048 V = DAG.getVectorShuffle(NVT, dl, V, DAG.getUNDEF(NVT), &SplatMask[0]);
4051 return DAG.getNode(ISD::BITCAST, dl, VT, V);
4054 /// PromoteVectorToScalarSplat - Since there's no native support for
4055 /// scalar_to_vector for 256-bit AVX, a 128-bit scalar_to_vector +
4056 /// INSERT_SUBVECTOR is generated. Recognize this idiom and do the
4057 /// shuffle before the insertion, this yields less instructions in the end.
4058 static SDValue PromoteVectorToScalarSplat(ShuffleVectorSDNode *SV,
4059 SelectionDAG &DAG) {
4060 EVT SrcVT = SV->getValueType(0);
4061 SDValue V1 = SV->getOperand(0);
4062 DebugLoc dl = SV->getDebugLoc();
4063 int NumElems = SrcVT.getVectorNumElements();
4065 assert(SrcVT.is256BitVector() && "unknown howto handle vector type");
4067 SmallVector<int, 4> Mask;
4068 for (int i = 0; i < NumElems/2; ++i)
4069 Mask.push_back(SV->getMaskElt(i));
4071 EVT SVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getVectorElementType(),
4073 SDValue SV1 = DAG.getVectorShuffle(SVT, dl, V1.getOperand(1),
4074 DAG.getUNDEF(SVT), &Mask[0]);
4075 SDValue InsV = Insert128BitVector(DAG.getUNDEF(SrcVT), SV1,
4076 DAG.getConstant(0, MVT::i32), DAG, dl);
4078 return Insert128BitVector(InsV, SV1,
4079 DAG.getConstant(NumElems/2, MVT::i32), DAG, dl);
4082 /// PromoteSplat - Promote a splat of v4i32, v8i16 or v16i8 to v4f32 and
4083 /// v8i32, v16i16 or v32i8 to v8f32.
4084 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
4085 EVT SrcVT = SV->getValueType(0);
4086 SDValue V1 = SV->getOperand(0);
4087 DebugLoc dl = SV->getDebugLoc();
4089 int EltNo = SV->getSplatIndex();
4090 int NumElems = SrcVT.getVectorNumElements();
4091 unsigned Size = SrcVT.getSizeInBits();
4093 // Extract the 128-bit part containing the splat element and update
4094 // the splat element index when it refers to the higher register.
4096 unsigned Idx = (EltNo > NumElems/2) ? NumElems/2 : 0;
4097 V1 = Extract128BitVector(V1, DAG.getConstant(Idx, MVT::i32), DAG, dl);
4099 EltNo -= NumElems/2;
4102 // Make this 128-bit vector duplicate i8 and i16 elements
4104 V1 = PromoteSplatv8v16(V1, DAG, EltNo);
4106 // Recreate the 256-bit vector and place the same 128-bit vector
4107 // into the low and high part. This is necessary because we want
4108 // to use VPERM to shuffle the v8f32 vector, and VPERM only shuffles
4109 // inside each separate v4f32 lane.
4111 SDValue InsV = Insert128BitVector(DAG.getUNDEF(SrcVT), V1,
4112 DAG.getConstant(0, MVT::i32), DAG, dl);
4113 V1 = Insert128BitVector(InsV, V1,
4114 DAG.getConstant(NumElems/2, MVT::i32), DAG, dl);
4117 return getLegalSplat(DAG, V1, EltNo);
4120 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
4121 /// vector of zero or undef vector. This produces a shuffle where the low
4122 /// element of V2 is swizzled into the zero/undef vector, landing at element
4123 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
4124 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
4125 bool isZero, bool HasSSE2,
4126 SelectionDAG &DAG) {
4127 EVT VT = V2.getValueType();
4129 ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT);
4130 unsigned NumElems = VT.getVectorNumElements();
4131 SmallVector<int, 16> MaskVec;
4132 for (unsigned i = 0; i != NumElems; ++i)
4133 // If this is the insertion idx, put the low elt of V2 here.
4134 MaskVec.push_back(i == Idx ? NumElems : i);
4135 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]);
4138 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
4139 /// element of the result of the vector shuffle.
4140 static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
4143 return SDValue(); // Limit search depth.
4145 SDValue V = SDValue(N, 0);
4146 EVT VT = V.getValueType();
4147 unsigned Opcode = V.getOpcode();
4149 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
4150 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
4151 Index = SV->getMaskElt(Index);
4154 return DAG.getUNDEF(VT.getVectorElementType());
4156 int NumElems = VT.getVectorNumElements();
4157 SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1);
4158 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1);
4161 // Recurse into target specific vector shuffles to find scalars.
4162 if (isTargetShuffle(Opcode)) {
4163 int NumElems = VT.getVectorNumElements();
4164 SmallVector<unsigned, 16> ShuffleMask;
4168 case X86ISD::SHUFPS:
4169 case X86ISD::SHUFPD:
4170 ImmN = N->getOperand(N->getNumOperands()-1);
4171 DecodeSHUFPSMask(NumElems,
4172 cast<ConstantSDNode>(ImmN)->getZExtValue(),
4175 case X86ISD::PUNPCKHBW:
4176 case X86ISD::PUNPCKHWD:
4177 case X86ISD::PUNPCKHDQ:
4178 case X86ISD::PUNPCKHQDQ:
4179 DecodePUNPCKHMask(NumElems, ShuffleMask);
4181 case X86ISD::UNPCKHPS:
4182 case X86ISD::UNPCKHPD:
4183 case X86ISD::VUNPCKHPSY:
4184 case X86ISD::VUNPCKHPDY:
4185 DecodeUNPCKHPMask(NumElems, ShuffleMask);
4187 case X86ISD::PUNPCKLBW:
4188 case X86ISD::PUNPCKLWD:
4189 case X86ISD::PUNPCKLDQ:
4190 case X86ISD::PUNPCKLQDQ:
4191 DecodePUNPCKLMask(VT, ShuffleMask);
4193 case X86ISD::UNPCKLPS:
4194 case X86ISD::UNPCKLPD:
4195 case X86ISD::VUNPCKLPSY:
4196 case X86ISD::VUNPCKLPDY:
4197 DecodeUNPCKLPMask(VT, ShuffleMask);
4199 case X86ISD::MOVHLPS:
4200 DecodeMOVHLPSMask(NumElems, ShuffleMask);
4202 case X86ISD::MOVLHPS:
4203 DecodeMOVLHPSMask(NumElems, ShuffleMask);
4205 case X86ISD::PSHUFD:
4206 ImmN = N->getOperand(N->getNumOperands()-1);
4207 DecodePSHUFMask(NumElems,
4208 cast<ConstantSDNode>(ImmN)->getZExtValue(),
4211 case X86ISD::PSHUFHW:
4212 ImmN = N->getOperand(N->getNumOperands()-1);
4213 DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(),
4216 case X86ISD::PSHUFLW:
4217 ImmN = N->getOperand(N->getNumOperands()-1);
4218 DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(),
4222 case X86ISD::MOVSD: {
4223 // The index 0 always comes from the first element of the second source,
4224 // this is why MOVSS and MOVSD are used in the first place. The other
4225 // elements come from the other positions of the first source vector.
4226 unsigned OpNum = (Index == 0) ? 1 : 0;
4227 return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG,
4230 case X86ISD::VPERMILPS:
4231 case X86ISD::VPERMILPSY:
4232 // FIXME: Implement the other types
4233 ImmN = N->getOperand(N->getNumOperands()-1);
4234 DecodeVPERMILMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(),
4237 assert("not implemented for target shuffle node");
4241 Index = ShuffleMask[Index];
4243 return DAG.getUNDEF(VT.getVectorElementType());
4245 SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1);
4246 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG,
4250 // Actual nodes that may contain scalar elements
4251 if (Opcode == ISD::BITCAST) {
4252 V = V.getOperand(0);
4253 EVT SrcVT = V.getValueType();
4254 unsigned NumElems = VT.getVectorNumElements();
4256 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
4260 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
4261 return (Index == 0) ? V.getOperand(0)
4262 : DAG.getUNDEF(VT.getVectorElementType());
4264 if (V.getOpcode() == ISD::BUILD_VECTOR)
4265 return V.getOperand(Index);
4270 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
4271 /// shuffle operation which come from a consecutively from a zero. The
4272 /// search can start in two different directions, from left or right.
4274 unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems,
4275 bool ZerosFromLeft, SelectionDAG &DAG) {
4278 while (i < NumElems) {
4279 unsigned Index = ZerosFromLeft ? i : NumElems-i-1;
4280 SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0);
4281 if (!(Elt.getNode() &&
4282 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt))))
4290 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to
4291 /// MaskE correspond consecutively to elements from one of the vector operands,
4292 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
4294 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE,
4295 int OpIdx, int NumElems, unsigned &OpNum) {
4296 bool SeenV1 = false;
4297 bool SeenV2 = false;
4299 for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) {
4300 int Idx = SVOp->getMaskElt(i);
4301 // Ignore undef indicies
4310 // Only accept consecutive elements from the same vector
4311 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
4315 OpNum = SeenV1 ? 0 : 1;
4319 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
4320 /// logical left shift of a vector.
4321 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
4322 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
4323 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements();
4324 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems,
4325 false /* check zeros from right */, DAG);
4331 // Considering the elements in the mask that are not consecutive zeros,
4332 // check if they consecutively come from only one of the source vectors.
4334 // V1 = {X, A, B, C} 0
4336 // vector_shuffle V1, V2 <1, 2, 3, X>
4338 if (!isShuffleMaskConsecutive(SVOp,
4339 0, // Mask Start Index
4340 NumElems-NumZeros-1, // Mask End Index
4341 NumZeros, // Where to start looking in the src vector
4342 NumElems, // Number of elements in vector
4343 OpSrc)) // Which source operand ?
4348 ShVal = SVOp->getOperand(OpSrc);
4352 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
4353 /// logical left shift of a vector.
4354 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
4355 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
4356 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements();
4357 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems,
4358 true /* check zeros from left */, DAG);
4364 // Considering the elements in the mask that are not consecutive zeros,
4365 // check if they consecutively come from only one of the source vectors.
4367 // 0 { A, B, X, X } = V2
4369 // vector_shuffle V1, V2 <X, X, 4, 5>
4371 if (!isShuffleMaskConsecutive(SVOp,
4372 NumZeros, // Mask Start Index
4373 NumElems-1, // Mask End Index
4374 0, // Where to start looking in the src vector
4375 NumElems, // Number of elements in vector
4376 OpSrc)) // Which source operand ?
4381 ShVal = SVOp->getOperand(OpSrc);
4385 /// isVectorShift - Returns true if the shuffle can be implemented as a
4386 /// logical left or right shift of a vector.
4387 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
4388 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
4389 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
4390 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
4396 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
4398 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
4399 unsigned NumNonZero, unsigned NumZero,
4401 const TargetLowering &TLI) {
4405 DebugLoc dl = Op.getDebugLoc();
4408 for (unsigned i = 0; i < 16; ++i) {
4409 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
4410 if (ThisIsNonZero && First) {
4412 V = getZeroVector(MVT::v8i16, true, DAG, dl);
4414 V = DAG.getUNDEF(MVT::v8i16);
4419 SDValue ThisElt(0, 0), LastElt(0, 0);
4420 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
4421 if (LastIsNonZero) {
4422 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
4423 MVT::i16, Op.getOperand(i-1));
4425 if (ThisIsNonZero) {
4426 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
4427 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
4428 ThisElt, DAG.getConstant(8, MVT::i8));
4430 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
4434 if (ThisElt.getNode())
4435 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
4436 DAG.getIntPtrConstant(i/2));
4440 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
4443 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
4445 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
4446 unsigned NumNonZero, unsigned NumZero,
4448 const TargetLowering &TLI) {
4452 DebugLoc dl = Op.getDebugLoc();
4455 for (unsigned i = 0; i < 8; ++i) {
4456 bool isNonZero = (NonZeros & (1 << i)) != 0;
4460 V = getZeroVector(MVT::v8i16, true, DAG, dl);
4462 V = DAG.getUNDEF(MVT::v8i16);
4465 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
4466 MVT::v8i16, V, Op.getOperand(i),
4467 DAG.getIntPtrConstant(i));
4474 /// getVShift - Return a vector logical shift node.
4476 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
4477 unsigned NumBits, SelectionDAG &DAG,
4478 const TargetLowering &TLI, DebugLoc dl) {
4479 EVT ShVT = MVT::v2i64;
4480 unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
4481 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
4482 return DAG.getNode(ISD::BITCAST, dl, VT,
4483 DAG.getNode(Opc, dl, ShVT, SrcOp,
4484 DAG.getConstant(NumBits,
4485 TLI.getShiftAmountTy(SrcOp.getValueType()))));
4489 X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
4490 SelectionDAG &DAG) const {
4492 // Check if the scalar load can be widened into a vector load. And if
4493 // the address is "base + cst" see if the cst can be "absorbed" into
4494 // the shuffle mask.
4495 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
4496 SDValue Ptr = LD->getBasePtr();
4497 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
4499 EVT PVT = LD->getValueType(0);
4500 if (PVT != MVT::i32 && PVT != MVT::f32)
4505 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
4506 FI = FINode->getIndex();
4508 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
4509 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
4510 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4511 Offset = Ptr.getConstantOperandVal(1);
4512 Ptr = Ptr.getOperand(0);
4517 SDValue Chain = LD->getChain();
4518 // Make sure the stack object alignment is at least 16.
4519 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
4520 if (DAG.InferPtrAlignment(Ptr) < 16) {
4521 if (MFI->isFixedObjectIndex(FI)) {
4522 // Can't change the alignment. FIXME: It's possible to compute
4523 // the exact stack offset and reference FI + adjust offset instead.
4524 // If someone *really* cares about this. That's the way to implement it.
4527 MFI->setObjectAlignment(FI, 16);
4531 // (Offset % 16) must be multiple of 4. Then address is then
4532 // Ptr + (Offset & ~15).
4535 if ((Offset % 16) & 3)
4537 int64_t StartOffset = Offset & ~15;
4539 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(),
4540 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
4542 int EltNo = (Offset - StartOffset) >> 2;
4543 int Mask[4] = { EltNo, EltNo, EltNo, EltNo };
4544 EVT VT = (PVT == MVT::i32) ? MVT::v4i32 : MVT::v4f32;
4545 SDValue V1 = DAG.getLoad(VT, dl, Chain, Ptr,
4546 LD->getPointerInfo().getWithOffset(StartOffset),
4548 // Canonicalize it to a v4i32 shuffle.
4549 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
4550 return DAG.getNode(ISD::BITCAST, dl, VT,
4551 DAG.getVectorShuffle(MVT::v4i32, dl, V1,
4552 DAG.getUNDEF(MVT::v4i32),&Mask[0]));
4558 /// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
4559 /// vector of type 'VT', see if the elements can be replaced by a single large
4560 /// load which has the same value as a build_vector whose operands are 'elts'.
4562 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
4564 /// FIXME: we'd also like to handle the case where the last elements are zero
4565 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
4566 /// There's even a handy isZeroNode for that purpose.
4567 static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
4568 DebugLoc &DL, SelectionDAG &DAG) {
4569 EVT EltVT = VT.getVectorElementType();
4570 unsigned NumElems = Elts.size();
4572 LoadSDNode *LDBase = NULL;
4573 unsigned LastLoadedElt = -1U;
4575 // For each element in the initializer, see if we've found a load or an undef.
4576 // If we don't find an initial load element, or later load elements are
4577 // non-consecutive, bail out.
4578 for (unsigned i = 0; i < NumElems; ++i) {
4579 SDValue Elt = Elts[i];
4581 if (!Elt.getNode() ||
4582 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
4585 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
4587 LDBase = cast<LoadSDNode>(Elt.getNode());
4591 if (Elt.getOpcode() == ISD::UNDEF)
4594 LoadSDNode *LD = cast<LoadSDNode>(Elt);
4595 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
4600 // If we have found an entire vector of loads and undefs, then return a large
4601 // load of the entire vector width starting at the base pointer. If we found
4602 // consecutive loads for the low half, generate a vzext_load node.
4603 if (LastLoadedElt == NumElems - 1) {
4604 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16)
4605 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
4606 LDBase->getPointerInfo(),
4607 LDBase->isVolatile(), LDBase->isNonTemporal(), 0);
4608 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
4609 LDBase->getPointerInfo(),
4610 LDBase->isVolatile(), LDBase->isNonTemporal(),
4611 LDBase->getAlignment());
4612 } else if (NumElems == 4 && LastLoadedElt == 1 &&
4613 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
4614 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
4615 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
4616 SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys,
4618 LDBase->getMemOperand());
4619 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
4625 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
4626 DebugLoc dl = Op.getDebugLoc();
4628 EVT VT = Op.getValueType();
4629 EVT ExtVT = VT.getVectorElementType();
4630 unsigned NumElems = Op.getNumOperands();
4633 // - pxor (SSE2), xorps (SSE1), vpxor (128 AVX), xorp[s|d] (256 AVX)
4635 // - pcmpeqd (SSE2 and 128 AVX), fallback to constant pools (256 AVX)
4636 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
4637 ISD::isBuildVectorAllOnes(Op.getNode())) {
4638 // Canonicalize this to <4 x i32> or <8 x 32> (SSE) to
4639 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
4640 // eliminated on x86-32 hosts.
4641 if (Op.getValueType() == MVT::v4i32 ||
4642 Op.getValueType() == MVT::v8i32)
4645 if (ISD::isBuildVectorAllOnes(Op.getNode()))
4646 return getOnesVector(Op.getValueType(), DAG, dl);
4647 return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl);
4650 unsigned EVTBits = ExtVT.getSizeInBits();
4652 unsigned NumZero = 0;
4653 unsigned NumNonZero = 0;
4654 unsigned NonZeros = 0;
4655 bool IsAllConstants = true;
4656 SmallSet<SDValue, 8> Values;
4657 for (unsigned i = 0; i < NumElems; ++i) {
4658 SDValue Elt = Op.getOperand(i);
4659 if (Elt.getOpcode() == ISD::UNDEF)
4662 if (Elt.getOpcode() != ISD::Constant &&
4663 Elt.getOpcode() != ISD::ConstantFP)
4664 IsAllConstants = false;
4665 if (X86::isZeroNode(Elt))
4668 NonZeros |= (1 << i);
4673 // All undef vector. Return an UNDEF. All zero vectors were handled above.
4674 if (NumNonZero == 0)
4675 return DAG.getUNDEF(VT);
4677 // Special case for single non-zero, non-undef, element.
4678 if (NumNonZero == 1) {
4679 unsigned Idx = CountTrailingZeros_32(NonZeros);
4680 SDValue Item = Op.getOperand(Idx);
4682 // If this is an insertion of an i64 value on x86-32, and if the top bits of
4683 // the value are obviously zero, truncate the value to i32 and do the
4684 // insertion that way. Only do this if the value is non-constant or if the
4685 // value is a constant being inserted into element 0. It is cheaper to do
4686 // a constant pool load than it is to do a movd + shuffle.
4687 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
4688 (!IsAllConstants || Idx == 0)) {
4689 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
4691 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
4692 EVT VecVT = MVT::v4i32;
4693 unsigned VecElts = 4;
4695 // Truncate the value (which may itself be a constant) to i32, and
4696 // convert it to a vector with movd (S2V+shuffle to zero extend).
4697 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
4698 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
4699 Item = getShuffleVectorZeroOrUndef(Item, 0, true,
4700 Subtarget->hasSSE2(), DAG);
4702 // Now we have our 32-bit value zero extended in the low element of
4703 // a vector. If Idx != 0, swizzle it into place.
4705 SmallVector<int, 4> Mask;
4706 Mask.push_back(Idx);
4707 for (unsigned i = 1; i != VecElts; ++i)
4709 Item = DAG.getVectorShuffle(VecVT, dl, Item,
4710 DAG.getUNDEF(Item.getValueType()),
4713 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item);
4717 // If we have a constant or non-constant insertion into the low element of
4718 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
4719 // the rest of the elements. This will be matched as movd/movq/movss/movsd
4720 // depending on what the source datatype is.
4723 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
4724 } else if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
4725 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
4726 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
4727 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
4728 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(),
4730 } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
4731 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
4732 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
4733 EVT MiddleVT = MVT::v4i32;
4734 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
4735 Item = getShuffleVectorZeroOrUndef(Item, 0, true,
4736 Subtarget->hasSSE2(), DAG);
4737 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
4741 // Is it a vector logical left shift?
4742 if (NumElems == 2 && Idx == 1 &&
4743 X86::isZeroNode(Op.getOperand(0)) &&
4744 !X86::isZeroNode(Op.getOperand(1))) {
4745 unsigned NumBits = VT.getSizeInBits();
4746 return getVShift(true, VT,
4747 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
4748 VT, Op.getOperand(1)),
4749 NumBits/2, DAG, *this, dl);
4752 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
4755 // Otherwise, if this is a vector with i32 or f32 elements, and the element
4756 // is a non-constant being inserted into an element other than the low one,
4757 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
4758 // movd/movss) to move this into the low element, then shuffle it into
4760 if (EVTBits == 32) {
4761 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
4763 // Turn it into a shuffle of zero and zero-extended scalar to vector.
4764 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
4765 Subtarget->hasSSE2(), DAG);
4766 SmallVector<int, 8> MaskVec;
4767 for (unsigned i = 0; i < NumElems; i++)
4768 MaskVec.push_back(i == Idx ? 0 : 1);
4769 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
4773 // Splat is obviously ok. Let legalizer expand it to a shuffle.
4774 if (Values.size() == 1) {
4775 if (EVTBits == 32) {
4776 // Instead of a shuffle like this:
4777 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
4778 // Check if it's possible to issue this instead.
4779 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
4780 unsigned Idx = CountTrailingZeros_32(NonZeros);
4781 SDValue Item = Op.getOperand(Idx);
4782 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
4783 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
4788 // A vector full of immediates; various special cases are already
4789 // handled, so this is best done with a single constant-pool load.
4793 // For AVX-length vectors, build the individual 128-bit pieces and use
4794 // shuffles to put them in place.
4795 if (VT.getSizeInBits() == 256 && !ISD::isBuildVectorAllZeros(Op.getNode())) {
4796 SmallVector<SDValue, 32> V;
4797 for (unsigned i = 0; i < NumElems; ++i)
4798 V.push_back(Op.getOperand(i));
4800 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
4802 // Build both the lower and upper subvector.
4803 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2);
4804 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2],
4807 // Recreate the wider vector with the lower and upper part.
4808 SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Lower,
4809 DAG.getConstant(0, MVT::i32), DAG, dl);
4810 return Insert128BitVector(Vec, Upper, DAG.getConstant(NumElems/2, MVT::i32),
4814 // Let legalizer expand 2-wide build_vectors.
4815 if (EVTBits == 64) {
4816 if (NumNonZero == 1) {
4817 // One half is zero or undef.
4818 unsigned Idx = CountTrailingZeros_32(NonZeros);
4819 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
4820 Op.getOperand(Idx));
4821 return getShuffleVectorZeroOrUndef(V2, Idx, true,
4822 Subtarget->hasSSE2(), DAG);
4827 // If element VT is < 32 bits, convert it to inserts into a zero vector.
4828 if (EVTBits == 8 && NumElems == 16) {
4829 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
4831 if (V.getNode()) return V;
4834 if (EVTBits == 16 && NumElems == 8) {
4835 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
4837 if (V.getNode()) return V;
4840 // If element VT is == 32 bits, turn it into a number of shuffles.
4841 SmallVector<SDValue, 8> V;
4843 if (NumElems == 4 && NumZero > 0) {
4844 for (unsigned i = 0; i < 4; ++i) {
4845 bool isZero = !(NonZeros & (1 << i));
4847 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl);
4849 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
4852 for (unsigned i = 0; i < 2; ++i) {
4853 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
4856 V[i] = V[i*2]; // Must be a zero vector.
4859 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
4862 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
4865 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
4870 SmallVector<int, 8> MaskVec;
4871 bool Reverse = (NonZeros & 0x3) == 2;
4872 for (unsigned i = 0; i < 2; ++i)
4873 MaskVec.push_back(Reverse ? 1-i : i);
4874 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
4875 for (unsigned i = 0; i < 2; ++i)
4876 MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems);
4877 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
4880 if (Values.size() > 1 && VT.getSizeInBits() == 128) {
4881 // Check for a build vector of consecutive loads.
4882 for (unsigned i = 0; i < NumElems; ++i)
4883 V[i] = Op.getOperand(i);
4885 // Check for elements which are consecutive loads.
4886 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG);
4890 // For SSE 4.1, use insertps to put the high elements into the low element.
4891 if (getSubtarget()->hasSSE41()) {
4893 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
4894 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
4896 Result = DAG.getUNDEF(VT);
4898 for (unsigned i = 1; i < NumElems; ++i) {
4899 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
4900 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
4901 Op.getOperand(i), DAG.getIntPtrConstant(i));
4906 // Otherwise, expand into a number of unpckl*, start by extending each of
4907 // our (non-undef) elements to the full vector width with the element in the
4908 // bottom slot of the vector (which generates no code for SSE).
4909 for (unsigned i = 0; i < NumElems; ++i) {
4910 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
4911 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
4913 V[i] = DAG.getUNDEF(VT);
4916 // Next, we iteratively mix elements, e.g. for v4f32:
4917 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
4918 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
4919 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
4920 unsigned EltStride = NumElems >> 1;
4921 while (EltStride != 0) {
4922 for (unsigned i = 0; i < EltStride; ++i) {
4923 // If V[i+EltStride] is undef and this is the first round of mixing,
4924 // then it is safe to just drop this shuffle: V[i] is already in the
4925 // right place, the one element (since it's the first round) being
4926 // inserted as undef can be dropped. This isn't safe for successive
4927 // rounds because they will permute elements within both vectors.
4928 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
4929 EltStride == NumElems/2)
4932 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
4942 X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
4943 // We support concatenate two MMX registers and place them in a MMX
4944 // register. This is better than doing a stack convert.
4945 DebugLoc dl = Op.getDebugLoc();
4946 EVT ResVT = Op.getValueType();
4947 assert(Op.getNumOperands() == 2);
4948 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 ||
4949 ResVT == MVT::v8i16 || ResVT == MVT::v16i8);
4951 SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0));
4952 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
4953 InVec = Op.getOperand(1);
4954 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
4955 unsigned NumElts = ResVT.getVectorNumElements();
4956 VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
4957 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp,
4958 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1));
4960 InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec);
4961 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
4962 Mask[0] = 0; Mask[1] = 2;
4963 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask);
4965 return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
4968 // v8i16 shuffles - Prefer shuffles in the following order:
4969 // 1. [all] pshuflw, pshufhw, optional move
4970 // 2. [ssse3] 1 x pshufb
4971 // 3. [ssse3] 2 x pshufb + 1 x por
4972 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
4974 X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
4975 SelectionDAG &DAG) const {
4976 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
4977 SDValue V1 = SVOp->getOperand(0);
4978 SDValue V2 = SVOp->getOperand(1);
4979 DebugLoc dl = SVOp->getDebugLoc();
4980 SmallVector<int, 8> MaskVals;
4982 // Determine if more than 1 of the words in each of the low and high quadwords
4983 // of the result come from the same quadword of one of the two inputs. Undef
4984 // mask values count as coming from any quadword, for better codegen.
4985 SmallVector<unsigned, 4> LoQuad(4);
4986 SmallVector<unsigned, 4> HiQuad(4);
4987 BitVector InputQuads(4);
4988 for (unsigned i = 0; i < 8; ++i) {
4989 SmallVectorImpl<unsigned> &Quad = i < 4 ? LoQuad : HiQuad;
4990 int EltIdx = SVOp->getMaskElt(i);
4991 MaskVals.push_back(EltIdx);
5000 InputQuads.set(EltIdx / 4);
5003 int BestLoQuad = -1;
5004 unsigned MaxQuad = 1;
5005 for (unsigned i = 0; i < 4; ++i) {
5006 if (LoQuad[i] > MaxQuad) {
5008 MaxQuad = LoQuad[i];
5012 int BestHiQuad = -1;
5014 for (unsigned i = 0; i < 4; ++i) {
5015 if (HiQuad[i] > MaxQuad) {
5017 MaxQuad = HiQuad[i];
5021 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
5022 // of the two input vectors, shuffle them into one input vector so only a
5023 // single pshufb instruction is necessary. If There are more than 2 input
5024 // quads, disable the next transformation since it does not help SSSE3.
5025 bool V1Used = InputQuads[0] || InputQuads[1];
5026 bool V2Used = InputQuads[2] || InputQuads[3];
5027 if (Subtarget->hasSSSE3()) {
5028 if (InputQuads.count() == 2 && V1Used && V2Used) {
5029 BestLoQuad = InputQuads.find_first();
5030 BestHiQuad = InputQuads.find_next(BestLoQuad);
5032 if (InputQuads.count() > 2) {
5038 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
5039 // the shuffle mask. If a quad is scored as -1, that means that it contains
5040 // words from all 4 input quadwords.
5042 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
5043 SmallVector<int, 8> MaskV;
5044 MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad);
5045 MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad);
5046 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
5047 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
5048 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
5049 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
5051 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
5052 // source words for the shuffle, to aid later transformations.
5053 bool AllWordsInNewV = true;
5054 bool InOrder[2] = { true, true };
5055 for (unsigned i = 0; i != 8; ++i) {
5056 int idx = MaskVals[i];
5058 InOrder[i/4] = false;
5059 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
5061 AllWordsInNewV = false;
5065 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
5066 if (AllWordsInNewV) {
5067 for (int i = 0; i != 8; ++i) {
5068 int idx = MaskVals[i];
5071 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
5072 if ((idx != i) && idx < 4)
5074 if ((idx != i) && idx > 3)
5083 // If we've eliminated the use of V2, and the new mask is a pshuflw or
5084 // pshufhw, that's as cheap as it gets. Return the new shuffle.
5085 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
5086 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
5087 unsigned TargetMask = 0;
5088 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
5089 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
5090 TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()):
5091 X86::getShufflePSHUFLWImmediate(NewV.getNode());
5092 V1 = NewV.getOperand(0);
5093 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
5097 // If we have SSSE3, and all words of the result are from 1 input vector,
5098 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
5099 // is present, fall back to case 4.
5100 if (Subtarget->hasSSSE3()) {
5101 SmallVector<SDValue,16> pshufbMask;
5103 // If we have elements from both input vectors, set the high bit of the
5104 // shuffle mask element to zero out elements that come from V2 in the V1
5105 // mask, and elements that come from V1 in the V2 mask, so that the two
5106 // results can be OR'd together.
5107 bool TwoInputs = V1Used && V2Used;
5108 for (unsigned i = 0; i != 8; ++i) {
5109 int EltIdx = MaskVals[i] * 2;
5110 if (TwoInputs && (EltIdx >= 16)) {
5111 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
5112 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
5115 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
5116 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8));
5118 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1);
5119 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
5120 DAG.getNode(ISD::BUILD_VECTOR, dl,
5121 MVT::v16i8, &pshufbMask[0], 16));
5123 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
5125 // Calculate the shuffle mask for the second input, shuffle it, and
5126 // OR it with the first shuffled input.
5128 for (unsigned i = 0; i != 8; ++i) {
5129 int EltIdx = MaskVals[i] * 2;
5131 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
5132 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
5135 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
5136 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8));
5138 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2);
5139 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
5140 DAG.getNode(ISD::BUILD_VECTOR, dl,
5141 MVT::v16i8, &pshufbMask[0], 16));
5142 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
5143 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
5146 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
5147 // and update MaskVals with new element order.
5148 BitVector InOrder(8);
5149 if (BestLoQuad >= 0) {
5150 SmallVector<int, 8> MaskV;
5151 for (int i = 0; i != 4; ++i) {
5152 int idx = MaskVals[i];
5154 MaskV.push_back(-1);
5156 } else if ((idx / 4) == BestLoQuad) {
5157 MaskV.push_back(idx & 3);
5160 MaskV.push_back(-1);
5163 for (unsigned i = 4; i != 8; ++i)
5165 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
5168 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3())
5169 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
5171 X86::getShufflePSHUFLWImmediate(NewV.getNode()),
5175 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
5176 // and update MaskVals with the new element order.
5177 if (BestHiQuad >= 0) {
5178 SmallVector<int, 8> MaskV;
5179 for (unsigned i = 0; i != 4; ++i)
5181 for (unsigned i = 4; i != 8; ++i) {
5182 int idx = MaskVals[i];
5184 MaskV.push_back(-1);
5186 } else if ((idx / 4) == BestHiQuad) {
5187 MaskV.push_back((idx & 3) + 4);
5190 MaskV.push_back(-1);
5193 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
5196 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3())
5197 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
5199 X86::getShufflePSHUFHWImmediate(NewV.getNode()),
5203 // In case BestHi & BestLo were both -1, which means each quadword has a word
5204 // from each of the four input quadwords, calculate the InOrder bitvector now
5205 // before falling through to the insert/extract cleanup.
5206 if (BestLoQuad == -1 && BestHiQuad == -1) {
5208 for (int i = 0; i != 8; ++i)
5209 if (MaskVals[i] < 0 || MaskVals[i] == i)
5213 // The other elements are put in the right place using pextrw and pinsrw.
5214 for (unsigned i = 0; i != 8; ++i) {
5217 int EltIdx = MaskVals[i];
5220 SDValue ExtOp = (EltIdx < 8)
5221 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
5222 DAG.getIntPtrConstant(EltIdx))
5223 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
5224 DAG.getIntPtrConstant(EltIdx - 8));
5225 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
5226 DAG.getIntPtrConstant(i));
5231 // v16i8 shuffles - Prefer shuffles in the following order:
5232 // 1. [ssse3] 1 x pshufb
5233 // 2. [ssse3] 2 x pshufb + 1 x por
5234 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
5236 SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
5238 const X86TargetLowering &TLI) {
5239 SDValue V1 = SVOp->getOperand(0);
5240 SDValue V2 = SVOp->getOperand(1);
5241 DebugLoc dl = SVOp->getDebugLoc();
5242 SmallVector<int, 16> MaskVals;
5243 SVOp->getMask(MaskVals);
5245 // If we have SSSE3, case 1 is generated when all result bytes come from
5246 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
5247 // present, fall back to case 3.
5248 // FIXME: kill V2Only once shuffles are canonizalized by getNode.
5251 for (unsigned i = 0; i < 16; ++i) {
5252 int EltIdx = MaskVals[i];
5261 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
5262 if (TLI.getSubtarget()->hasSSSE3()) {
5263 SmallVector<SDValue,16> pshufbMask;
5265 // If all result elements are from one input vector, then only translate
5266 // undef mask values to 0x80 (zero out result) in the pshufb mask.
5268 // Otherwise, we have elements from both input vectors, and must zero out
5269 // elements that come from V2 in the first mask, and V1 in the second mask
5270 // so that we can OR them together.
5271 bool TwoInputs = !(V1Only || V2Only);
5272 for (unsigned i = 0; i != 16; ++i) {
5273 int EltIdx = MaskVals[i];
5274 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) {
5275 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
5278 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
5280 // If all the elements are from V2, assign it to V1 and return after
5281 // building the first pshufb.
5284 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
5285 DAG.getNode(ISD::BUILD_VECTOR, dl,
5286 MVT::v16i8, &pshufbMask[0], 16));
5290 // Calculate the shuffle mask for the second input, shuffle it, and
5291 // OR it with the first shuffled input.
5293 for (unsigned i = 0; i != 16; ++i) {
5294 int EltIdx = MaskVals[i];
5296 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
5299 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
5301 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
5302 DAG.getNode(ISD::BUILD_VECTOR, dl,
5303 MVT::v16i8, &pshufbMask[0], 16));
5304 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
5307 // No SSSE3 - Calculate in place words and then fix all out of place words
5308 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
5309 // the 16 different words that comprise the two doublequadword input vectors.
5310 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
5311 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
5312 SDValue NewV = V2Only ? V2 : V1;
5313 for (int i = 0; i != 8; ++i) {
5314 int Elt0 = MaskVals[i*2];
5315 int Elt1 = MaskVals[i*2+1];
5317 // This word of the result is all undef, skip it.
5318 if (Elt0 < 0 && Elt1 < 0)
5321 // This word of the result is already in the correct place, skip it.
5322 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1))
5324 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17))
5327 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
5328 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
5331 // If Elt0 and Elt1 are defined, are consecutive, and can be load
5332 // using a single extract together, load it and store it.
5333 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
5334 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
5335 DAG.getIntPtrConstant(Elt1 / 2));
5336 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
5337 DAG.getIntPtrConstant(i));
5341 // If Elt1 is defined, extract it from the appropriate source. If the
5342 // source byte is not also odd, shift the extracted word left 8 bits
5343 // otherwise clear the bottom 8 bits if we need to do an or.
5345 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
5346 DAG.getIntPtrConstant(Elt1 / 2));
5347 if ((Elt1 & 1) == 0)
5348 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
5350 TLI.getShiftAmountTy(InsElt.getValueType())));
5352 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
5353 DAG.getConstant(0xFF00, MVT::i16));
5355 // If Elt0 is defined, extract it from the appropriate source. If the
5356 // source byte is not also even, shift the extracted word right 8 bits. If
5357 // Elt1 was also defined, OR the extracted values together before
5358 // inserting them in the result.
5360 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
5361 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
5362 if ((Elt0 & 1) != 0)
5363 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
5365 TLI.getShiftAmountTy(InsElt0.getValueType())));
5367 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
5368 DAG.getConstant(0x00FF, MVT::i16));
5369 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
5372 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
5373 DAG.getIntPtrConstant(i));
5375 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
5378 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
5379 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
5380 /// done when every pair / quad of shuffle mask elements point to elements in
5381 /// the right sequence. e.g.
5382 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
5384 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
5385 SelectionDAG &DAG, DebugLoc dl) {
5386 EVT VT = SVOp->getValueType(0);
5387 SDValue V1 = SVOp->getOperand(0);
5388 SDValue V2 = SVOp->getOperand(1);
5389 unsigned NumElems = VT.getVectorNumElements();
5390 unsigned NewWidth = (NumElems == 4) ? 2 : 4;
5392 switch (VT.getSimpleVT().SimpleTy) {
5393 default: assert(false && "Unexpected!");
5394 case MVT::v4f32: NewVT = MVT::v2f64; break;
5395 case MVT::v4i32: NewVT = MVT::v2i64; break;
5396 case MVT::v8i16: NewVT = MVT::v4i32; break;
5397 case MVT::v16i8: NewVT = MVT::v4i32; break;
5400 int Scale = NumElems / NewWidth;
5401 SmallVector<int, 8> MaskVec;
5402 for (unsigned i = 0; i < NumElems; i += Scale) {
5404 for (int j = 0; j < Scale; ++j) {
5405 int EltIdx = SVOp->getMaskElt(i+j);
5409 StartIdx = EltIdx - (EltIdx % Scale);
5410 if (EltIdx != StartIdx + j)
5414 MaskVec.push_back(-1);
5416 MaskVec.push_back(StartIdx / Scale);
5419 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
5420 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
5421 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
5424 /// getVZextMovL - Return a zero-extending vector move low node.
5426 static SDValue getVZextMovL(EVT VT, EVT OpVT,
5427 SDValue SrcOp, SelectionDAG &DAG,
5428 const X86Subtarget *Subtarget, DebugLoc dl) {
5429 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
5430 LoadSDNode *LD = NULL;
5431 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
5432 LD = dyn_cast<LoadSDNode>(SrcOp);
5434 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
5436 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
5437 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
5438 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
5439 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
5440 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
5442 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
5443 return DAG.getNode(ISD::BITCAST, dl, VT,
5444 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
5445 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
5453 return DAG.getNode(ISD::BITCAST, dl, VT,
5454 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
5455 DAG.getNode(ISD::BITCAST, dl,
5459 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
5460 /// which could not be matched by any known target speficic shuffle
5462 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
5466 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
5467 /// 4 elements, and match them with several different shuffle types.
5469 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
5470 SDValue V1 = SVOp->getOperand(0);
5471 SDValue V2 = SVOp->getOperand(1);
5472 DebugLoc dl = SVOp->getDebugLoc();
5473 EVT VT = SVOp->getValueType(0);
5475 assert(VT.getSizeInBits() == 128 && "Unsupported vector size");
5477 SmallVector<std::pair<int, int>, 8> Locs;
5479 SmallVector<int, 8> Mask1(4U, -1);
5480 SmallVector<int, 8> PermMask;
5481 SVOp->getMask(PermMask);
5485 for (unsigned i = 0; i != 4; ++i) {
5486 int Idx = PermMask[i];
5488 Locs[i] = std::make_pair(-1, -1);
5490 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
5492 Locs[i] = std::make_pair(0, NumLo);
5496 Locs[i] = std::make_pair(1, NumHi);
5498 Mask1[2+NumHi] = Idx;
5504 if (NumLo <= 2 && NumHi <= 2) {
5505 // If no more than two elements come from either vector. This can be
5506 // implemented with two shuffles. First shuffle gather the elements.
5507 // The second shuffle, which takes the first shuffle as both of its
5508 // vector operands, put the elements into the right order.
5509 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
5511 SmallVector<int, 8> Mask2(4U, -1);
5513 for (unsigned i = 0; i != 4; ++i) {
5514 if (Locs[i].first == -1)
5517 unsigned Idx = (i < 2) ? 0 : 4;
5518 Idx += Locs[i].first * 2 + Locs[i].second;
5523 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
5524 } else if (NumLo == 3 || NumHi == 3) {
5525 // Otherwise, we must have three elements from one vector, call it X, and
5526 // one element from the other, call it Y. First, use a shufps to build an
5527 // intermediate vector with the one element from Y and the element from X
5528 // that will be in the same half in the final destination (the indexes don't
5529 // matter). Then, use a shufps to build the final vector, taking the half
5530 // containing the element from Y from the intermediate, and the other half
5533 // Normalize it so the 3 elements come from V1.
5534 CommuteVectorShuffleMask(PermMask, VT);
5538 // Find the element from V2.
5540 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
5541 int Val = PermMask[HiIndex];
5548 Mask1[0] = PermMask[HiIndex];
5550 Mask1[2] = PermMask[HiIndex^1];
5552 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
5555 Mask1[0] = PermMask[0];
5556 Mask1[1] = PermMask[1];
5557 Mask1[2] = HiIndex & 1 ? 6 : 4;
5558 Mask1[3] = HiIndex & 1 ? 4 : 6;
5559 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
5561 Mask1[0] = HiIndex & 1 ? 2 : 0;
5562 Mask1[1] = HiIndex & 1 ? 0 : 2;
5563 Mask1[2] = PermMask[2];
5564 Mask1[3] = PermMask[3];
5569 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
5573 // Break it into (shuffle shuffle_hi, shuffle_lo).
5576 SmallVector<int,8> LoMask(4U, -1);
5577 SmallVector<int,8> HiMask(4U, -1);
5579 SmallVector<int,8> *MaskPtr = &LoMask;
5580 unsigned MaskIdx = 0;
5583 for (unsigned i = 0; i != 4; ++i) {
5590 int Idx = PermMask[i];
5592 Locs[i] = std::make_pair(-1, -1);
5593 } else if (Idx < 4) {
5594 Locs[i] = std::make_pair(MaskIdx, LoIdx);
5595 (*MaskPtr)[LoIdx] = Idx;
5598 Locs[i] = std::make_pair(MaskIdx, HiIdx);
5599 (*MaskPtr)[HiIdx] = Idx;
5604 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
5605 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
5606 SmallVector<int, 8> MaskOps;
5607 for (unsigned i = 0; i != 4; ++i) {
5608 if (Locs[i].first == -1) {
5609 MaskOps.push_back(-1);
5611 unsigned Idx = Locs[i].first * 4 + Locs[i].second;
5612 MaskOps.push_back(Idx);
5615 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
5618 static bool MayFoldVectorLoad(SDValue V) {
5619 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
5620 V = V.getOperand(0);
5621 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5622 V = V.getOperand(0);
5628 // FIXME: the version above should always be used. Since there's
5629 // a bug where several vector shuffles can't be folded because the
5630 // DAG is not updated during lowering and a node claims to have two
5631 // uses while it only has one, use this version, and let isel match
5632 // another instruction if the load really happens to have more than
5633 // one use. Remove this version after this bug get fixed.
5634 // rdar://8434668, PR8156
5635 static bool RelaxedMayFoldVectorLoad(SDValue V) {
5636 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
5637 V = V.getOperand(0);
5638 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5639 V = V.getOperand(0);
5640 if (ISD::isNormalLoad(V.getNode()))
5645 /// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by
5646 /// a vector extract, and if both can be later optimized into a single load.
5647 /// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked
5648 /// here because otherwise a target specific shuffle node is going to be
5649 /// emitted for this shuffle, and the optimization not done.
5650 /// FIXME: This is probably not the best approach, but fix the problem
5651 /// until the right path is decided.
5653 bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
5654 const TargetLowering &TLI) {
5655 EVT VT = V.getValueType();
5656 ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V);
5658 // Be sure that the vector shuffle is present in a pattern like this:
5659 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr)
5663 SDNode *N = *V.getNode()->use_begin();
5664 if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
5667 SDValue EltNo = N->getOperand(1);
5668 if (!isa<ConstantSDNode>(EltNo))
5671 // If the bit convert changed the number of elements, it is unsafe
5672 // to examine the mask.
5673 bool HasShuffleIntoBitcast = false;
5674 if (V.getOpcode() == ISD::BITCAST) {
5675 EVT SrcVT = V.getOperand(0).getValueType();
5676 if (SrcVT.getVectorNumElements() != VT.getVectorNumElements())
5678 V = V.getOperand(0);
5679 HasShuffleIntoBitcast = true;
5682 // Select the input vector, guarding against out of range extract vector.
5683 unsigned NumElems = VT.getVectorNumElements();
5684 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
5685 int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt);
5686 V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1);
5688 // Skip one more bit_convert if necessary
5689 if (V.getOpcode() == ISD::BITCAST)
5690 V = V.getOperand(0);
5692 if (ISD::isNormalLoad(V.getNode())) {
5693 // Is the original load suitable?
5694 LoadSDNode *LN0 = cast<LoadSDNode>(V);
5696 // FIXME: avoid the multi-use bug that is preventing lots of
5697 // of foldings to be detected, this is still wrong of course, but
5698 // give the temporary desired behavior, and if it happens that
5699 // the load has real more uses, during isel it will not fold, and
5700 // will generate poor code.
5701 if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse()
5704 if (!HasShuffleIntoBitcast)
5707 // If there's a bitcast before the shuffle, check if the load type and
5708 // alignment is valid.
5709 unsigned Align = LN0->getAlignment();
5711 TLI.getTargetData()->getABITypeAlignment(
5712 VT.getTypeForEVT(*DAG.getContext()));
5714 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT))
5722 SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
5723 EVT VT = Op.getValueType();
5725 // Canonizalize to v2f64.
5726 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
5727 return DAG.getNode(ISD::BITCAST, dl, VT,
5728 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
5733 SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG,
5735 SDValue V1 = Op.getOperand(0);
5736 SDValue V2 = Op.getOperand(1);
5737 EVT VT = Op.getValueType();
5739 assert(VT != MVT::v2i64 && "unsupported shuffle type");
5741 if (HasSSE2 && VT == MVT::v2f64)
5742 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
5745 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V2, DAG);
5749 SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) {
5750 SDValue V1 = Op.getOperand(0);
5751 SDValue V2 = Op.getOperand(1);
5752 EVT VT = Op.getValueType();
5754 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
5755 "unsupported shuffle type");
5757 if (V2.getOpcode() == ISD::UNDEF)
5761 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
5765 SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
5766 SDValue V1 = Op.getOperand(0);
5767 SDValue V2 = Op.getOperand(1);
5768 EVT VT = Op.getValueType();
5769 unsigned NumElems = VT.getVectorNumElements();
5771 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
5772 // operand of these instructions is only memory, so check if there's a
5773 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
5775 bool CanFoldLoad = false;
5777 // Trivial case, when V2 comes from a load.
5778 if (MayFoldVectorLoad(V2))
5781 // When V1 is a load, it can be folded later into a store in isel, example:
5782 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
5784 // (MOVLPSmr addr:$src1, VR128:$src2)
5785 // So, recognize this potential and also use MOVLPS or MOVLPD
5786 if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
5789 // Both of them can't be memory operations though.
5790 if (MayFoldVectorLoad(V1) && MayFoldVectorLoad(V2))
5791 CanFoldLoad = false;
5794 if (HasSSE2 && NumElems == 2)
5795 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
5798 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
5801 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
5802 // movl and movlp will both match v2i64, but v2i64 is never matched by
5803 // movl earlier because we make it strict to avoid messing with the movlp load
5804 // folding logic (see the code above getMOVLP call). Match it here then,
5805 // this is horrible, but will stay like this until we move all shuffle
5806 // matching to x86 specific nodes. Note that for the 1st condition all
5807 // types are matched with movsd.
5808 if ((HasSSE2 && NumElems == 2) || !X86::isMOVLMask(SVOp))
5809 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
5811 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
5814 assert(VT != MVT::v4i32 && "unsupported shuffle type");
5816 // Invert the operand order and use SHUFPS to match it.
5817 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V2, V1,
5818 X86::getShuffleSHUFImmediate(SVOp), DAG);
5821 static inline unsigned getUNPCKLOpcode(EVT VT) {
5822 switch(VT.getSimpleVT().SimpleTy) {
5823 case MVT::v4i32: return X86ISD::PUNPCKLDQ;
5824 case MVT::v2i64: return X86ISD::PUNPCKLQDQ;
5825 case MVT::v4f32: return X86ISD::UNPCKLPS;
5826 case MVT::v2f64: return X86ISD::UNPCKLPD;
5827 case MVT::v8f32: return X86ISD::VUNPCKLPSY;
5828 case MVT::v4f64: return X86ISD::VUNPCKLPDY;
5829 case MVT::v16i8: return X86ISD::PUNPCKLBW;
5830 case MVT::v8i16: return X86ISD::PUNPCKLWD;
5832 llvm_unreachable("Unknown type for unpckl");
5837 static inline unsigned getUNPCKHOpcode(EVT VT) {
5838 switch(VT.getSimpleVT().SimpleTy) {
5839 case MVT::v4i32: return X86ISD::PUNPCKHDQ;
5840 case MVT::v2i64: return X86ISD::PUNPCKHQDQ;
5841 case MVT::v4f32: return X86ISD::UNPCKHPS;
5842 case MVT::v2f64: return X86ISD::UNPCKHPD;
5843 case MVT::v8f32: return X86ISD::VUNPCKHPSY;
5844 case MVT::v4f64: return X86ISD::VUNPCKHPDY;
5845 case MVT::v16i8: return X86ISD::PUNPCKHBW;
5846 case MVT::v8i16: return X86ISD::PUNPCKHWD;
5848 llvm_unreachable("Unknown type for unpckh");
5853 static inline unsigned getVPERMILOpcode(EVT VT) {
5854 switch(VT.getSimpleVT().SimpleTy) {
5856 case MVT::v4f32: return X86ISD::VPERMILPS;
5858 case MVT::v2f64: return X86ISD::VPERMILPD;
5860 case MVT::v8f32: return X86ISD::VPERMILPSY;
5862 case MVT::v4f64: return X86ISD::VPERMILPDY;
5864 llvm_unreachable("Unknown type for vpermil");
5870 SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG,
5871 const TargetLowering &TLI,
5872 const X86Subtarget *Subtarget) {
5873 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
5874 EVT VT = Op.getValueType();
5875 DebugLoc dl = Op.getDebugLoc();
5876 SDValue V1 = Op.getOperand(0);
5877 SDValue V2 = Op.getOperand(1);
5879 if (isZeroShuffle(SVOp))
5880 return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl);
5882 // Handle splat operations
5883 if (SVOp->isSplat()) {
5884 unsigned NumElem = VT.getVectorNumElements();
5885 // Special case, this is the only place now where it's allowed to return
5886 // a vector_shuffle operation without using a target specific node, because
5887 // *hopefully* it will be optimized away by the dag combiner. FIXME: should
5888 // this be moved to DAGCombine instead?
5889 if (NumElem <= 4 && CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI))
5892 // Since there's no native support for scalar_to_vector for 256-bit AVX, a
5893 // 128-bit scalar_to_vector + INSERT_SUBVECTOR is generated. Recognize this
5894 // idiom and do the shuffle before the insertion, this yields less
5895 // instructions in the end.
5896 if (VT.is256BitVector() &&
5897 V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
5898 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
5899 V1.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR)
5900 return PromoteVectorToScalarSplat(SVOp, DAG);
5902 // Handle splats by matching through known shuffle masks
5903 if ((VT.is128BitVector() && NumElem <= 4) ||
5904 (VT.is256BitVector() && NumElem <= 8))
5907 // All i16 and i8 vector types can't be used directly by a generic shuffle
5908 // instruction because the target has no such instruction. Generate shuffles
5909 // which repeat i16 and i8 several times until they fit in i32, and then can
5910 // be manipulated by target suported shuffles. After the insertion of the
5911 // necessary shuffles, the result is bitcasted back to v4f32 or v8f32.
5912 return PromoteSplat(SVOp, DAG);
5915 // If the shuffle can be profitably rewritten as a narrower shuffle, then
5917 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
5918 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
5919 if (NewOp.getNode())
5920 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
5921 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
5922 // FIXME: Figure out a cleaner way to do this.
5923 // Try to make use of movq to zero out the top part.
5924 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
5925 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
5926 if (NewOp.getNode()) {
5927 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false))
5928 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0),
5929 DAG, Subtarget, dl);
5931 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
5932 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
5933 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)))
5934 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1),
5935 DAG, Subtarget, dl);
5942 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
5943 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
5944 SDValue V1 = Op.getOperand(0);
5945 SDValue V2 = Op.getOperand(1);
5946 EVT VT = Op.getValueType();
5947 DebugLoc dl = Op.getDebugLoc();
5948 unsigned NumElems = VT.getVectorNumElements();
5949 bool isMMX = VT.getSizeInBits() == 64;
5950 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
5951 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
5952 bool V1IsSplat = false;
5953 bool V2IsSplat = false;
5954 bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX();
5955 bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX();
5956 bool HasSSSE3 = Subtarget->hasSSSE3() || Subtarget->hasAVX();
5957 MachineFunction &MF = DAG.getMachineFunction();
5958 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
5960 // Shuffle operations on MMX not supported.
5964 // Vector shuffle lowering takes 3 steps:
5966 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
5967 // narrowing and commutation of operands should be handled.
5968 // 2) Matching of shuffles with known shuffle masks to x86 target specific
5970 // 3) Rewriting of unmatched masks into new generic shuffle operations,
5971 // so the shuffle can be broken into other shuffles and the legalizer can
5972 // try the lowering again.
5974 // The general ideia is that no vector_shuffle operation should be left to
5975 // be matched during isel, all of them must be converted to a target specific
5978 // Normalize the input vectors. Here splats, zeroed vectors, profitable
5979 // narrowing and commutation of operands should be handled. The actual code
5980 // doesn't include all of those, work in progress...
5981 SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget);
5982 if (NewOp.getNode())
5985 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
5986 // unpckh_undef). Only use pshufd if speed is more important than size.
5987 if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp))
5988 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG);
5989 if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp))
5990 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
5992 if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef &&
5993 RelaxedMayFoldVectorLoad(V1))
5994 return getMOVDDup(Op, dl, V1, DAG);
5996 if (X86::isMOVHLPS_v_undef_Mask(SVOp))
5997 return getMOVHighToLow(Op, dl, DAG);
5999 // Use to match splats
6000 if (HasSSE2 && X86::isUNPCKHMask(SVOp) && V2IsUndef &&
6001 (VT == MVT::v2f64 || VT == MVT::v2i64))
6002 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
6004 if (X86::isPSHUFDMask(SVOp)) {
6005 // The actual implementation will match the mask in the if above and then
6006 // during isel it can match several different instructions, not only pshufd
6007 // as its name says, sad but true, emulate the behavior for now...
6008 if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
6009 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
6011 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp);
6013 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
6014 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
6016 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
6017 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V1,
6020 if (VT == MVT::v4f32)
6021 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V1,
6025 // Check if this can be converted into a logical shift.
6026 bool isLeft = false;
6029 bool isShift = getSubtarget()->hasSSE2() &&
6030 isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
6031 if (isShift && ShVal.hasOneUse()) {
6032 // If the shifted value has multiple uses, it may be cheaper to use
6033 // v_set0 + movlhps or movhlps, etc.
6034 EVT EltVT = VT.getVectorElementType();
6035 ShAmt *= EltVT.getSizeInBits();
6036 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
6039 if (X86::isMOVLMask(SVOp)) {
6042 if (ISD::isBuildVectorAllZeros(V1.getNode()))
6043 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
6044 if (!X86::isMOVLPMask(SVOp)) {
6045 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
6046 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
6048 if (VT == MVT::v4i32 || VT == MVT::v4f32)
6049 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
6053 // FIXME: fold these into legal mask.
6054 if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp))
6055 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
6057 if (X86::isMOVHLPSMask(SVOp))
6058 return getMOVHighToLow(Op, dl, DAG);
6060 if (X86::isMOVSHDUPMask(SVOp, Subtarget))
6061 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
6063 if (X86::isMOVSLDUPMask(SVOp, Subtarget))
6064 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
6066 if (X86::isMOVLPMask(SVOp))
6067 return getMOVLP(Op, dl, DAG, HasSSE2);
6069 if (ShouldXformToMOVHLPS(SVOp) ||
6070 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp))
6071 return CommuteVectorShuffle(SVOp, DAG);
6074 // No better options. Use a vshl / vsrl.
6075 EVT EltVT = VT.getVectorElementType();
6076 ShAmt *= EltVT.getSizeInBits();
6077 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
6080 bool Commuted = false;
6081 // FIXME: This should also accept a bitcast of a splat? Be careful, not
6082 // 1,1,1,1 -> v8i16 though.
6083 V1IsSplat = isSplatVector(V1.getNode());
6084 V2IsSplat = isSplatVector(V2.getNode());
6086 // Canonicalize the splat or undef, if present, to be on the RHS.
6087 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
6088 Op = CommuteVectorShuffle(SVOp, DAG);
6089 SVOp = cast<ShuffleVectorSDNode>(Op);
6090 V1 = SVOp->getOperand(0);
6091 V2 = SVOp->getOperand(1);
6092 std::swap(V1IsSplat, V2IsSplat);
6093 std::swap(V1IsUndef, V2IsUndef);
6097 if (isCommutedMOVL(SVOp, V2IsSplat, V2IsUndef)) {
6098 // Shuffling low element of v1 into undef, just return v1.
6101 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
6102 // the instruction selector will not match, so get a canonical MOVL with
6103 // swapped operands to undo the commute.
6104 return getMOVL(DAG, dl, VT, V2, V1);
6107 if (X86::isUNPCKLMask(SVOp))
6108 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG);
6110 if (X86::isUNPCKHMask(SVOp))
6111 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG);
6114 // Normalize mask so all entries that point to V2 points to its first
6115 // element then try to match unpck{h|l} again. If match, return a
6116 // new vector_shuffle with the corrected mask.
6117 SDValue NewMask = NormalizeMask(SVOp, DAG);
6118 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask);
6119 if (NSVOp != SVOp) {
6120 if (X86::isUNPCKLMask(NSVOp, true)) {
6122 } else if (X86::isUNPCKHMask(NSVOp, true)) {
6129 // Commute is back and try unpck* again.
6130 // FIXME: this seems wrong.
6131 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG);
6132 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp);
6134 if (X86::isUNPCKLMask(NewSVOp))
6135 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG);
6137 if (X86::isUNPCKHMask(NewSVOp))
6138 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG);
6141 // Normalize the node to match x86 shuffle ops if needed
6142 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp))
6143 return CommuteVectorShuffle(SVOp, DAG);
6145 // The checks below are all present in isShuffleMaskLegal, but they are
6146 // inlined here right now to enable us to directly emit target specific
6147 // nodes, and remove one by one until they don't return Op anymore.
6148 SmallVector<int, 16> M;
6151 if (isPALIGNRMask(M, VT, HasSSSE3))
6152 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2,
6153 X86::getShufflePALIGNRImmediate(SVOp),
6156 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
6157 SVOp->getSplatIndex() == 0 && V2IsUndef) {
6158 if (VT == MVT::v2f64)
6159 return getTargetShuffleNode(X86ISD::UNPCKLPD, dl, VT, V1, V1, DAG);
6160 if (VT == MVT::v2i64)
6161 return getTargetShuffleNode(X86ISD::PUNPCKLQDQ, dl, VT, V1, V1, DAG);
6164 if (isPSHUFHWMask(M, VT))
6165 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
6166 X86::getShufflePSHUFHWImmediate(SVOp),
6169 if (isPSHUFLWMask(M, VT))
6170 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
6171 X86::getShufflePSHUFLWImmediate(SVOp),
6174 if (isSHUFPMask(M, VT)) {
6175 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp);
6176 if (VT == MVT::v4f32 || VT == MVT::v4i32)
6177 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V2,
6179 if (VT == MVT::v2f64 || VT == MVT::v2i64)
6180 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V2,
6184 if (X86::isUNPCKL_v_undef_Mask(SVOp))
6185 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG);
6186 if (X86::isUNPCKH_v_undef_Mask(SVOp))
6187 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
6189 //===--------------------------------------------------------------------===//
6190 // Generate target specific nodes for 128 or 256-bit shuffles only
6191 // supported in the AVX instruction set.
6194 // Handle VPERMILPS* permutations
6195 if (isVPERMILPSMask(M, VT, Subtarget))
6196 return getTargetShuffleNode(getVPERMILOpcode(VT), dl, VT, V1,
6197 getShuffleVPERMILPSImmediate(SVOp), DAG);
6199 // Handle VPERMILPD* permutations
6200 if (isVPERMILPDMask(M, VT, Subtarget))
6201 return getTargetShuffleNode(getVPERMILOpcode(VT), dl, VT, V1,
6202 getShuffleVPERMILPDImmediate(SVOp), DAG);
6204 //===--------------------------------------------------------------------===//
6205 // Since no target specific shuffle was selected for this generic one,
6206 // lower it into other known shuffles. FIXME: this isn't true yet, but
6207 // this is the plan.
6210 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
6211 if (VT == MVT::v8i16) {
6212 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG);
6213 if (NewOp.getNode())
6217 if (VT == MVT::v16i8) {
6218 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this);
6219 if (NewOp.getNode())
6223 // Handle all 128-bit wide vectors with 4 elements, and match them with
6224 // several different shuffle types.
6225 if (NumElems == 4 && VT.getSizeInBits() == 128)
6226 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
6228 // Handle general 256-bit shuffles
6229 if (VT.is256BitVector())
6230 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
6236 X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
6237 SelectionDAG &DAG) const {
6238 EVT VT = Op.getValueType();
6239 DebugLoc dl = Op.getDebugLoc();
6241 if (Op.getOperand(0).getValueType().getSizeInBits() != 128)
6244 if (VT.getSizeInBits() == 8) {
6245 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
6246 Op.getOperand(0), Op.getOperand(1));
6247 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
6248 DAG.getValueType(VT));
6249 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
6250 } else if (VT.getSizeInBits() == 16) {
6251 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6252 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
6254 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
6255 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
6256 DAG.getNode(ISD::BITCAST, dl,
6260 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
6261 Op.getOperand(0), Op.getOperand(1));
6262 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
6263 DAG.getValueType(VT));
6264 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
6265 } else if (VT == MVT::f32) {
6266 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
6267 // the result back to FR32 register. It's only worth matching if the
6268 // result has a single use which is a store or a bitcast to i32. And in
6269 // the case of a store, it's not worth it if the index is a constant 0,
6270 // because a MOVSSmr can be used instead, which is smaller and faster.
6271 if (!Op.hasOneUse())
6273 SDNode *User = *Op.getNode()->use_begin();
6274 if ((User->getOpcode() != ISD::STORE ||
6275 (isa<ConstantSDNode>(Op.getOperand(1)) &&
6276 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
6277 (User->getOpcode() != ISD::BITCAST ||
6278 User->getValueType(0) != MVT::i32))
6280 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
6281 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
6284 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
6285 } else if (VT == MVT::i32) {
6286 // ExtractPS works with constant index.
6287 if (isa<ConstantSDNode>(Op.getOperand(1)))
6295 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
6296 SelectionDAG &DAG) const {
6297 if (!isa<ConstantSDNode>(Op.getOperand(1)))
6300 SDValue Vec = Op.getOperand(0);
6301 EVT VecVT = Vec.getValueType();
6303 // If this is a 256-bit vector result, first extract the 128-bit vector and
6304 // then extract the element from the 128-bit vector.
6305 if (VecVT.getSizeInBits() == 256) {
6306 DebugLoc dl = Op.getNode()->getDebugLoc();
6307 unsigned NumElems = VecVT.getVectorNumElements();
6308 SDValue Idx = Op.getOperand(1);
6309 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
6311 // Get the 128-bit vector.
6312 bool Upper = IdxVal >= NumElems/2;
6313 Vec = Extract128BitVector(Vec,
6314 DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32), DAG, dl);
6316 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
6317 Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : Idx);
6320 assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length");
6322 if (Subtarget->hasSSE41() || Subtarget->hasAVX()) {
6323 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
6328 EVT VT = Op.getValueType();
6329 DebugLoc dl = Op.getDebugLoc();
6330 // TODO: handle v16i8.
6331 if (VT.getSizeInBits() == 16) {
6332 SDValue Vec = Op.getOperand(0);
6333 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6335 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
6336 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
6337 DAG.getNode(ISD::BITCAST, dl,
6340 // Transform it so it match pextrw which produces a 32-bit result.
6341 EVT EltVT = MVT::i32;
6342 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
6343 Op.getOperand(0), Op.getOperand(1));
6344 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
6345 DAG.getValueType(VT));
6346 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
6347 } else if (VT.getSizeInBits() == 32) {
6348 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6352 // SHUFPS the element to the lowest double word, then movss.
6353 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
6354 EVT VVT = Op.getOperand(0).getValueType();
6355 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
6356 DAG.getUNDEF(VVT), Mask);
6357 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
6358 DAG.getIntPtrConstant(0));
6359 } else if (VT.getSizeInBits() == 64) {
6360 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
6361 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
6362 // to match extract_elt for f64.
6363 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6367 // UNPCKHPD the element to the lowest double word, then movsd.
6368 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
6369 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
6370 int Mask[2] = { 1, -1 };
6371 EVT VVT = Op.getOperand(0).getValueType();
6372 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
6373 DAG.getUNDEF(VVT), Mask);
6374 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
6375 DAG.getIntPtrConstant(0));
6382 X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
6383 SelectionDAG &DAG) const {
6384 EVT VT = Op.getValueType();
6385 EVT EltVT = VT.getVectorElementType();
6386 DebugLoc dl = Op.getDebugLoc();
6388 SDValue N0 = Op.getOperand(0);
6389 SDValue N1 = Op.getOperand(1);
6390 SDValue N2 = Op.getOperand(2);
6392 if (VT.getSizeInBits() == 256)
6395 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) &&
6396 isa<ConstantSDNode>(N2)) {
6398 if (VT == MVT::v8i16)
6399 Opc = X86ISD::PINSRW;
6400 else if (VT == MVT::v16i8)
6401 Opc = X86ISD::PINSRB;
6403 Opc = X86ISD::PINSRB;
6405 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
6407 if (N1.getValueType() != MVT::i32)
6408 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
6409 if (N2.getValueType() != MVT::i32)
6410 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
6411 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
6412 } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) {
6413 // Bits [7:6] of the constant are the source select. This will always be
6414 // zero here. The DAG Combiner may combine an extract_elt index into these
6415 // bits. For example (insert (extract, 3), 2) could be matched by putting
6416 // the '3' into bits [7:6] of X86ISD::INSERTPS.
6417 // Bits [5:4] of the constant are the destination select. This is the
6418 // value of the incoming immediate.
6419 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
6420 // combine either bitwise AND or insert of float 0.0 to set these bits.
6421 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4);
6422 // Create this as a scalar to vector..
6423 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
6424 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
6425 } else if (EltVT == MVT::i32 && isa<ConstantSDNode>(N2)) {
6426 // PINSR* works with constant index.
6433 X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
6434 EVT VT = Op.getValueType();
6435 EVT EltVT = VT.getVectorElementType();
6437 DebugLoc dl = Op.getDebugLoc();
6438 SDValue N0 = Op.getOperand(0);
6439 SDValue N1 = Op.getOperand(1);
6440 SDValue N2 = Op.getOperand(2);
6442 // If this is a 256-bit vector result, first extract the 128-bit vector,
6443 // insert the element into the extracted half and then place it back.
6444 if (VT.getSizeInBits() == 256) {
6445 if (!isa<ConstantSDNode>(N2))
6448 // Get the desired 128-bit vector half.
6449 unsigned NumElems = VT.getVectorNumElements();
6450 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue();
6451 bool Upper = IdxVal >= NumElems/2;
6452 SDValue Ins128Idx = DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32);
6453 SDValue V = Extract128BitVector(N0, Ins128Idx, DAG, dl);
6455 // Insert the element into the desired half.
6456 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V,
6457 N1, Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : N2);
6459 // Insert the changed part back to the 256-bit vector
6460 return Insert128BitVector(N0, V, Ins128Idx, DAG, dl);
6463 if (Subtarget->hasSSE41() || Subtarget->hasAVX())
6464 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
6466 if (EltVT == MVT::i8)
6469 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) {
6470 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
6471 // as its second argument.
6472 if (N1.getValueType() != MVT::i32)
6473 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
6474 if (N2.getValueType() != MVT::i32)
6475 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
6476 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
6482 X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6483 LLVMContext *Context = DAG.getContext();
6484 DebugLoc dl = Op.getDebugLoc();
6485 EVT OpVT = Op.getValueType();
6487 // If this is a 256-bit vector result, first insert into a 128-bit
6488 // vector and then insert into the 256-bit vector.
6489 if (OpVT.getSizeInBits() > 128) {
6490 // Insert into a 128-bit vector.
6491 EVT VT128 = EVT::getVectorVT(*Context,
6492 OpVT.getVectorElementType(),
6493 OpVT.getVectorNumElements() / 2);
6495 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
6497 // Insert the 128-bit vector.
6498 return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op,
6499 DAG.getConstant(0, MVT::i32),
6503 if (Op.getValueType() == MVT::v1i64 &&
6504 Op.getOperand(0).getValueType() == MVT::i64)
6505 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
6507 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
6508 assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 &&
6509 "Expected an SSE type!");
6510 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(),
6511 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
6514 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
6515 // a simple subregister reference or explicit instructions to grab
6516 // upper bits of a vector.
6518 X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
6519 if (Subtarget->hasAVX()) {
6520 DebugLoc dl = Op.getNode()->getDebugLoc();
6521 SDValue Vec = Op.getNode()->getOperand(0);
6522 SDValue Idx = Op.getNode()->getOperand(1);
6524 if (Op.getNode()->getValueType(0).getSizeInBits() == 128
6525 && Vec.getNode()->getValueType(0).getSizeInBits() == 256) {
6526 return Extract128BitVector(Vec, Idx, DAG, dl);
6532 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
6533 // simple superregister reference or explicit instructions to insert
6534 // the upper bits of a vector.
6536 X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
6537 if (Subtarget->hasAVX()) {
6538 DebugLoc dl = Op.getNode()->getDebugLoc();
6539 SDValue Vec = Op.getNode()->getOperand(0);
6540 SDValue SubVec = Op.getNode()->getOperand(1);
6541 SDValue Idx = Op.getNode()->getOperand(2);
6543 if (Op.getNode()->getValueType(0).getSizeInBits() == 256
6544 && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) {
6545 return Insert128BitVector(Vec, SubVec, Idx, DAG, dl);
6551 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
6552 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
6553 // one of the above mentioned nodes. It has to be wrapped because otherwise
6554 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
6555 // be used to form addressing mode. These wrapped nodes will be selected
6558 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
6559 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
6561 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
6563 unsigned char OpFlag = 0;
6564 unsigned WrapperKind = X86ISD::Wrapper;
6565 CodeModel::Model M = getTargetMachine().getCodeModel();
6567 if (Subtarget->isPICStyleRIPRel() &&
6568 (M == CodeModel::Small || M == CodeModel::Kernel))
6569 WrapperKind = X86ISD::WrapperRIP;
6570 else if (Subtarget->isPICStyleGOT())
6571 OpFlag = X86II::MO_GOTOFF;
6572 else if (Subtarget->isPICStyleStubPIC())
6573 OpFlag = X86II::MO_PIC_BASE_OFFSET;
6575 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
6577 CP->getOffset(), OpFlag);
6578 DebugLoc DL = CP->getDebugLoc();
6579 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
6580 // With PIC, the address is actually $g + Offset.
6582 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
6583 DAG.getNode(X86ISD::GlobalBaseReg,
6584 DebugLoc(), getPointerTy()),
6591 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
6592 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
6594 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
6596 unsigned char OpFlag = 0;
6597 unsigned WrapperKind = X86ISD::Wrapper;
6598 CodeModel::Model M = getTargetMachine().getCodeModel();
6600 if (Subtarget->isPICStyleRIPRel() &&
6601 (M == CodeModel::Small || M == CodeModel::Kernel))
6602 WrapperKind = X86ISD::WrapperRIP;
6603 else if (Subtarget->isPICStyleGOT())
6604 OpFlag = X86II::MO_GOTOFF;
6605 else if (Subtarget->isPICStyleStubPIC())
6606 OpFlag = X86II::MO_PIC_BASE_OFFSET;
6608 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
6610 DebugLoc DL = JT->getDebugLoc();
6611 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
6613 // With PIC, the address is actually $g + Offset.
6615 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
6616 DAG.getNode(X86ISD::GlobalBaseReg,
6617 DebugLoc(), getPointerTy()),
6624 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
6625 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
6627 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
6629 unsigned char OpFlag = 0;
6630 unsigned WrapperKind = X86ISD::Wrapper;
6631 CodeModel::Model M = getTargetMachine().getCodeModel();
6633 if (Subtarget->isPICStyleRIPRel() &&
6634 (M == CodeModel::Small || M == CodeModel::Kernel))
6635 WrapperKind = X86ISD::WrapperRIP;
6636 else if (Subtarget->isPICStyleGOT())
6637 OpFlag = X86II::MO_GOTOFF;
6638 else if (Subtarget->isPICStyleStubPIC())
6639 OpFlag = X86II::MO_PIC_BASE_OFFSET;
6641 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
6643 DebugLoc DL = Op.getDebugLoc();
6644 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
6647 // With PIC, the address is actually $g + Offset.
6648 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
6649 !Subtarget->is64Bit()) {
6650 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
6651 DAG.getNode(X86ISD::GlobalBaseReg,
6652 DebugLoc(), getPointerTy()),
6660 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
6661 // Create the TargetBlockAddressAddress node.
6662 unsigned char OpFlags =
6663 Subtarget->ClassifyBlockAddressReference();
6664 CodeModel::Model M = getTargetMachine().getCodeModel();
6665 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
6666 DebugLoc dl = Op.getDebugLoc();
6667 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(),
6668 /*isTarget=*/true, OpFlags);
6670 if (Subtarget->isPICStyleRIPRel() &&
6671 (M == CodeModel::Small || M == CodeModel::Kernel))
6672 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
6674 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
6676 // With PIC, the address is actually $g + Offset.
6677 if (isGlobalRelativeToPICBase(OpFlags)) {
6678 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
6679 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
6687 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
6689 SelectionDAG &DAG) const {
6690 // Create the TargetGlobalAddress node, folding in the constant
6691 // offset if it is legal.
6692 unsigned char OpFlags =
6693 Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
6694 CodeModel::Model M = getTargetMachine().getCodeModel();
6696 if (OpFlags == X86II::MO_NO_FLAG &&
6697 X86::isOffsetSuitableForCodeModel(Offset, M)) {
6698 // A direct static reference to a global.
6699 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
6702 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
6705 if (Subtarget->isPICStyleRIPRel() &&
6706 (M == CodeModel::Small || M == CodeModel::Kernel))
6707 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
6709 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
6711 // With PIC, the address is actually $g + Offset.
6712 if (isGlobalRelativeToPICBase(OpFlags)) {
6713 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
6714 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
6718 // For globals that require a load from a stub to get the address, emit the
6720 if (isGlobalStubReference(OpFlags))
6721 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
6722 MachinePointerInfo::getGOT(), false, false, 0);
6724 // If there was a non-zero offset that we didn't fold, create an explicit
6727 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
6728 DAG.getConstant(Offset, getPointerTy()));
6734 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
6735 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
6736 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
6737 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
6741 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
6742 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
6743 unsigned char OperandFlags) {
6744 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
6745 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6746 DebugLoc dl = GA->getDebugLoc();
6747 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
6748 GA->getValueType(0),
6752 SDValue Ops[] = { Chain, TGA, *InFlag };
6753 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3);
6755 SDValue Ops[] = { Chain, TGA };
6756 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2);
6759 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
6760 MFI->setAdjustsStack(true);
6762 SDValue Flag = Chain.getValue(1);
6763 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
6766 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
6768 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
6771 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better
6772 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
6773 DAG.getNode(X86ISD::GlobalBaseReg,
6774 DebugLoc(), PtrVT), InFlag);
6775 InFlag = Chain.getValue(1);
6777 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
6780 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
6782 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
6784 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT,
6785 X86::RAX, X86II::MO_TLSGD);
6788 // Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
6789 // "local exec" model.
6790 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
6791 const EVT PtrVT, TLSModel::Model model,
6793 DebugLoc dl = GA->getDebugLoc();
6795 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
6796 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
6797 is64Bit ? 257 : 256));
6799 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
6800 DAG.getIntPtrConstant(0),
6801 MachinePointerInfo(Ptr), false, false, 0);
6803 unsigned char OperandFlags = 0;
6804 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
6806 unsigned WrapperKind = X86ISD::Wrapper;
6807 if (model == TLSModel::LocalExec) {
6808 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
6809 } else if (is64Bit) {
6810 assert(model == TLSModel::InitialExec);
6811 OperandFlags = X86II::MO_GOTTPOFF;
6812 WrapperKind = X86ISD::WrapperRIP;
6814 assert(model == TLSModel::InitialExec);
6815 OperandFlags = X86II::MO_INDNTPOFF;
6818 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
6820 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
6821 GA->getValueType(0),
6822 GA->getOffset(), OperandFlags);
6823 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
6825 if (model == TLSModel::InitialExec)
6826 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
6827 MachinePointerInfo::getGOT(), false, false, 0);
6829 // The address of the thread local variable is the add of the thread
6830 // pointer with the offset of the variable.
6831 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
6835 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
6837 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
6838 const GlobalValue *GV = GA->getGlobal();
6840 if (Subtarget->isTargetELF()) {
6841 // TODO: implement the "local dynamic" model
6842 // TODO: implement the "initial exec"model for pic executables
6844 // If GV is an alias then use the aliasee for determining
6845 // thread-localness.
6846 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
6847 GV = GA->resolveAliasedGlobal(false);
6849 TLSModel::Model model
6850 = getTLSModel(GV, getTargetMachine().getRelocationModel());
6853 case TLSModel::GeneralDynamic:
6854 case TLSModel::LocalDynamic: // not implemented
6855 if (Subtarget->is64Bit())
6856 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
6857 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
6859 case TLSModel::InitialExec:
6860 case TLSModel::LocalExec:
6861 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
6862 Subtarget->is64Bit());
6864 } else if (Subtarget->isTargetDarwin()) {
6865 // Darwin only has one model of TLS. Lower to that.
6866 unsigned char OpFlag = 0;
6867 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
6868 X86ISD::WrapperRIP : X86ISD::Wrapper;
6870 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
6872 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) &&
6873 !Subtarget->is64Bit();
6875 OpFlag = X86II::MO_TLVP_PIC_BASE;
6877 OpFlag = X86II::MO_TLVP;
6878 DebugLoc DL = Op.getDebugLoc();
6879 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
6880 GA->getValueType(0),
6881 GA->getOffset(), OpFlag);
6882 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
6884 // With PIC32, the address is actually $g + Offset.
6886 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
6887 DAG.getNode(X86ISD::GlobalBaseReg,
6888 DebugLoc(), getPointerTy()),
6891 // Lowering the machine isd will make sure everything is in the right
6893 SDValue Chain = DAG.getEntryNode();
6894 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6895 SDValue Args[] = { Chain, Offset };
6896 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2);
6898 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
6899 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
6900 MFI->setAdjustsStack(true);
6902 // And our return value (tls address) is in the standard call return value
6904 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
6905 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
6909 "TLS not implemented for this target.");
6911 llvm_unreachable("Unreachable");
6916 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values and
6917 /// take a 2 x i32 value to shift plus a shift amount.
6918 SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const {
6919 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
6920 EVT VT = Op.getValueType();
6921 unsigned VTBits = VT.getSizeInBits();
6922 DebugLoc dl = Op.getDebugLoc();
6923 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
6924 SDValue ShOpLo = Op.getOperand(0);
6925 SDValue ShOpHi = Op.getOperand(1);
6926 SDValue ShAmt = Op.getOperand(2);
6927 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
6928 DAG.getConstant(VTBits - 1, MVT::i8))
6929 : DAG.getConstant(0, VT);
6932 if (Op.getOpcode() == ISD::SHL_PARTS) {
6933 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
6934 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
6936 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
6937 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt);
6940 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
6941 DAG.getConstant(VTBits, MVT::i8));
6942 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
6943 AndNode, DAG.getConstant(0, MVT::i8));
6946 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
6947 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
6948 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
6950 if (Op.getOpcode() == ISD::SHL_PARTS) {
6951 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4);
6952 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4);
6954 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4);
6955 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4);
6958 SDValue Ops[2] = { Lo, Hi };
6959 return DAG.getMergeValues(Ops, 2, dl);
6962 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
6963 SelectionDAG &DAG) const {
6964 EVT SrcVT = Op.getOperand(0).getValueType();
6966 if (SrcVT.isVector())
6969 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
6970 "Unknown SINT_TO_FP to lower!");
6972 // These are really Legal; return the operand so the caller accepts it as
6974 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
6976 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
6977 Subtarget->is64Bit()) {
6981 DebugLoc dl = Op.getDebugLoc();
6982 unsigned Size = SrcVT.getSizeInBits()/8;
6983 MachineFunction &MF = DAG.getMachineFunction();
6984 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
6985 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
6986 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
6988 MachinePointerInfo::getFixedStack(SSFI),
6990 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
6993 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
6995 SelectionDAG &DAG) const {
6997 DebugLoc DL = Op.getDebugLoc();
6999 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
7001 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
7003 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
7005 unsigned ByteSize = SrcVT.getSizeInBits()/8;
7007 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
7008 MachineMemOperand *MMO;
7010 int SSFI = FI->getIndex();
7012 DAG.getMachineFunction()
7013 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
7014 MachineMemOperand::MOLoad, ByteSize, ByteSize);
7016 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
7017 StackSlot = StackSlot.getOperand(1);
7019 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
7020 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
7022 Tys, Ops, array_lengthof(Ops),
7026 Chain = Result.getValue(1);
7027 SDValue InFlag = Result.getValue(2);
7029 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
7030 // shouldn't be necessary except that RFP cannot be live across
7031 // multiple blocks. When stackifier is fixed, they can be uncoupled.
7032 MachineFunction &MF = DAG.getMachineFunction();
7033 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
7034 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
7035 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
7036 Tys = DAG.getVTList(MVT::Other);
7038 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
7040 MachineMemOperand *MMO =
7041 DAG.getMachineFunction()
7042 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
7043 MachineMemOperand::MOStore, SSFISize, SSFISize);
7045 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
7046 Ops, array_lengthof(Ops),
7047 Op.getValueType(), MMO);
7048 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
7049 MachinePointerInfo::getFixedStack(SSFI),
7056 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
7057 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
7058 SelectionDAG &DAG) const {
7059 // This algorithm is not obvious. Here it is in C code, more or less:
7061 double uint64_to_double( uint32_t hi, uint32_t lo ) {
7062 static const __m128i exp = { 0x4330000045300000ULL, 0 };
7063 static const __m128d bias = { 0x1.0p84, 0x1.0p52 };
7065 // Copy ints to xmm registers.
7066 __m128i xh = _mm_cvtsi32_si128( hi );
7067 __m128i xl = _mm_cvtsi32_si128( lo );
7069 // Combine into low half of a single xmm register.
7070 __m128i x = _mm_unpacklo_epi32( xh, xl );
7074 // Merge in appropriate exponents to give the integer bits the right
7076 x = _mm_unpacklo_epi32( x, exp );
7078 // Subtract away the biases to deal with the IEEE-754 double precision
7080 d = _mm_sub_pd( (__m128d) x, bias );
7082 // All conversions up to here are exact. The correctly rounded result is
7083 // calculated using the current rounding mode using the following
7085 d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) );
7086 _mm_store_sd( &sd, d ); // Because we are returning doubles in XMM, this
7087 // store doesn't really need to be here (except
7088 // maybe to zero the other double)
7093 DebugLoc dl = Op.getDebugLoc();
7094 LLVMContext *Context = DAG.getContext();
7096 // Build some magic constants.
7097 std::vector<Constant*> CV0;
7098 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000)));
7099 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000)));
7100 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0)));
7101 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0)));
7102 Constant *C0 = ConstantVector::get(CV0);
7103 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
7105 std::vector<Constant*> CV1;
7107 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL))));
7109 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL))));
7110 Constant *C1 = ConstantVector::get(CV1);
7111 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
7113 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
7114 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
7116 DAG.getIntPtrConstant(1)));
7117 SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
7118 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
7120 DAG.getIntPtrConstant(0)));
7121 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2);
7122 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
7123 MachinePointerInfo::getConstantPool(),
7125 SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0);
7126 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck2);
7127 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
7128 MachinePointerInfo::getConstantPool(),
7130 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
7132 // Add the halves; easiest way is to swap them into another reg first.
7133 int ShufMask[2] = { 1, -1 };
7134 SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub,
7135 DAG.getUNDEF(MVT::v2f64), ShufMask);
7136 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub);
7137 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add,
7138 DAG.getIntPtrConstant(0));
7141 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
7142 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
7143 SelectionDAG &DAG) const {
7144 DebugLoc dl = Op.getDebugLoc();
7145 // FP constant to bias correct the final result.
7146 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
7149 // Load the 32-bit value into an XMM register.
7150 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
7151 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
7153 DAG.getIntPtrConstant(0)));
7155 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
7156 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
7157 DAG.getIntPtrConstant(0));
7159 // Or the load with the bias.
7160 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
7161 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
7162 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7164 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
7165 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7166 MVT::v2f64, Bias)));
7167 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
7168 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
7169 DAG.getIntPtrConstant(0));
7171 // Subtract the bias.
7172 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
7174 // Handle final rounding.
7175 EVT DestVT = Op.getValueType();
7177 if (DestVT.bitsLT(MVT::f64)) {
7178 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
7179 DAG.getIntPtrConstant(0));
7180 } else if (DestVT.bitsGT(MVT::f64)) {
7181 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
7184 // Handle final rounding.
7188 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
7189 SelectionDAG &DAG) const {
7190 SDValue N0 = Op.getOperand(0);
7191 DebugLoc dl = Op.getDebugLoc();
7193 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
7194 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
7195 // the optimization here.
7196 if (DAG.SignBitIsZero(N0))
7197 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
7199 EVT SrcVT = N0.getValueType();
7200 EVT DstVT = Op.getValueType();
7201 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
7202 return LowerUINT_TO_FP_i64(Op, DAG);
7203 else if (SrcVT == MVT::i32 && X86ScalarSSEf64)
7204 return LowerUINT_TO_FP_i32(Op, DAG);
7206 // Make a 64-bit buffer, and use it to build an FILD.
7207 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
7208 if (SrcVT == MVT::i32) {
7209 SDValue WordOff = DAG.getConstant(4, getPointerTy());
7210 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
7211 getPointerTy(), StackSlot, WordOff);
7212 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
7213 StackSlot, MachinePointerInfo(),
7215 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
7216 OffsetSlot, MachinePointerInfo(),
7218 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
7222 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
7223 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
7224 StackSlot, MachinePointerInfo(),
7226 // For i64 source, we need to add the appropriate power of 2 if the input
7227 // was negative. This is the same as the optimization in
7228 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
7229 // we must be careful to do the computation in x87 extended precision, not
7230 // in SSE. (The generic code can't know it's OK to do this, or how to.)
7231 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
7232 MachineMemOperand *MMO =
7233 DAG.getMachineFunction()
7234 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
7235 MachineMemOperand::MOLoad, 8, 8);
7237 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
7238 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
7239 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3,
7242 APInt FF(32, 0x5F800000ULL);
7244 // Check whether the sign bit is set.
7245 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
7246 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
7249 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
7250 SDValue FudgePtr = DAG.getConstantPool(
7251 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
7254 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
7255 SDValue Zero = DAG.getIntPtrConstant(0);
7256 SDValue Four = DAG.getIntPtrConstant(4);
7257 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
7259 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
7261 // Load the value out, extending it from f32 to f80.
7262 // FIXME: Avoid the extend by constructing the right constant pool?
7263 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
7264 FudgePtr, MachinePointerInfo::getConstantPool(),
7265 MVT::f32, false, false, 4);
7266 // Extend everything to 80 bits to force it to be done on x87.
7267 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
7268 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
7271 std::pair<SDValue,SDValue> X86TargetLowering::
7272 FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
7273 DebugLoc DL = Op.getDebugLoc();
7275 EVT DstTy = Op.getValueType();
7278 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
7282 assert(DstTy.getSimpleVT() <= MVT::i64 &&
7283 DstTy.getSimpleVT() >= MVT::i16 &&
7284 "Unknown FP_TO_SINT to lower!");
7286 // These are really Legal.
7287 if (DstTy == MVT::i32 &&
7288 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
7289 return std::make_pair(SDValue(), SDValue());
7290 if (Subtarget->is64Bit() &&
7291 DstTy == MVT::i64 &&
7292 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
7293 return std::make_pair(SDValue(), SDValue());
7295 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
7297 MachineFunction &MF = DAG.getMachineFunction();
7298 unsigned MemSize = DstTy.getSizeInBits()/8;
7299 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
7300 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
7305 switch (DstTy.getSimpleVT().SimpleTy) {
7306 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
7307 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
7308 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
7309 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
7312 SDValue Chain = DAG.getEntryNode();
7313 SDValue Value = Op.getOperand(0);
7314 EVT TheVT = Op.getOperand(0).getValueType();
7315 if (isScalarFPTypeInSSEReg(TheVT)) {
7316 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
7317 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
7318 MachinePointerInfo::getFixedStack(SSFI),
7320 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
7322 Chain, StackSlot, DAG.getValueType(TheVT)
7325 MachineMemOperand *MMO =
7326 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
7327 MachineMemOperand::MOLoad, MemSize, MemSize);
7328 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3,
7330 Chain = Value.getValue(1);
7331 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
7332 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
7335 MachineMemOperand *MMO =
7336 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
7337 MachineMemOperand::MOStore, MemSize, MemSize);
7339 // Build the FP_TO_INT*_IN_MEM
7340 SDValue Ops[] = { Chain, Value, StackSlot };
7341 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
7342 Ops, 3, DstTy, MMO);
7344 return std::make_pair(FIST, StackSlot);
7347 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
7348 SelectionDAG &DAG) const {
7349 if (Op.getValueType().isVector())
7352 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true);
7353 SDValue FIST = Vals.first, StackSlot = Vals.second;
7354 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
7355 if (FIST.getNode() == 0) return Op;
7358 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
7359 FIST, StackSlot, MachinePointerInfo(), false, false, 0);
7362 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
7363 SelectionDAG &DAG) const {
7364 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false);
7365 SDValue FIST = Vals.first, StackSlot = Vals.second;
7366 assert(FIST.getNode() && "Unexpected failure");
7369 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
7370 FIST, StackSlot, MachinePointerInfo(), false, false, 0);
7373 SDValue X86TargetLowering::LowerFABS(SDValue Op,
7374 SelectionDAG &DAG) const {
7375 LLVMContext *Context = DAG.getContext();
7376 DebugLoc dl = Op.getDebugLoc();
7377 EVT VT = Op.getValueType();
7380 EltVT = VT.getVectorElementType();
7381 std::vector<Constant*> CV;
7382 if (EltVT == MVT::f64) {
7383 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))));
7387 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))));
7393 Constant *C = ConstantVector::get(CV);
7394 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
7395 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
7396 MachinePointerInfo::getConstantPool(),
7398 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask);
7401 SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
7402 LLVMContext *Context = DAG.getContext();
7403 DebugLoc dl = Op.getDebugLoc();
7404 EVT VT = Op.getValueType();
7407 EltVT = VT.getVectorElementType();
7408 std::vector<Constant*> CV;
7409 if (EltVT == MVT::f64) {
7410 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)));
7414 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)));
7420 Constant *C = ConstantVector::get(CV);
7421 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
7422 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
7423 MachinePointerInfo::getConstantPool(),
7425 if (VT.isVector()) {
7426 return DAG.getNode(ISD::BITCAST, dl, VT,
7427 DAG.getNode(ISD::XOR, dl, MVT::v2i64,
7428 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
7430 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Mask)));
7432 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
7436 SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
7437 LLVMContext *Context = DAG.getContext();
7438 SDValue Op0 = Op.getOperand(0);
7439 SDValue Op1 = Op.getOperand(1);
7440 DebugLoc dl = Op.getDebugLoc();
7441 EVT VT = Op.getValueType();
7442 EVT SrcVT = Op1.getValueType();
7444 // If second operand is smaller, extend it first.
7445 if (SrcVT.bitsLT(VT)) {
7446 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
7449 // And if it is bigger, shrink it first.
7450 if (SrcVT.bitsGT(VT)) {
7451 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
7455 // At this point the operands and the result should have the same
7456 // type, and that won't be f80 since that is not custom lowered.
7458 // First get the sign bit of second operand.
7459 std::vector<Constant*> CV;
7460 if (SrcVT == MVT::f64) {
7461 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))));
7462 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0))));
7464 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))));
7465 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
7466 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
7467 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
7469 Constant *C = ConstantVector::get(CV);
7470 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
7471 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
7472 MachinePointerInfo::getConstantPool(),
7474 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
7476 // Shift sign bit right or left if the two operands have different types.
7477 if (SrcVT.bitsGT(VT)) {
7478 // Op0 is MVT::f32, Op1 is MVT::f64.
7479 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit);
7480 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit,
7481 DAG.getConstant(32, MVT::i32));
7482 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit);
7483 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit,
7484 DAG.getIntPtrConstant(0));
7487 // Clear first operand sign bit.
7489 if (VT == MVT::f64) {
7490 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))));
7491 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0))));
7493 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))));
7494 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
7495 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
7496 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
7498 C = ConstantVector::get(CV);
7499 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
7500 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
7501 MachinePointerInfo::getConstantPool(),
7503 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2);
7505 // Or the value with the sign bit.
7506 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
7509 SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const {
7510 SDValue N0 = Op.getOperand(0);
7511 DebugLoc dl = Op.getDebugLoc();
7512 EVT VT = Op.getValueType();
7514 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
7515 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
7516 DAG.getConstant(1, VT));
7517 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
7520 /// Emit nodes that will be selected as "test Op0,Op0", or something
7522 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
7523 SelectionDAG &DAG) const {
7524 DebugLoc dl = Op.getDebugLoc();
7526 // CF and OF aren't always set the way we want. Determine which
7527 // of these we need.
7528 bool NeedCF = false;
7529 bool NeedOF = false;
7532 case X86::COND_A: case X86::COND_AE:
7533 case X86::COND_B: case X86::COND_BE:
7536 case X86::COND_G: case X86::COND_GE:
7537 case X86::COND_L: case X86::COND_LE:
7538 case X86::COND_O: case X86::COND_NO:
7543 // See if we can use the EFLAGS value from the operand instead of
7544 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
7545 // we prove that the arithmetic won't overflow, we can't use OF or CF.
7546 if (Op.getResNo() != 0 || NeedOF || NeedCF)
7547 // Emit a CMP with 0, which is the TEST pattern.
7548 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
7549 DAG.getConstant(0, Op.getValueType()));
7551 unsigned Opcode = 0;
7552 unsigned NumOperands = 0;
7553 switch (Op.getNode()->getOpcode()) {
7555 // Due to an isel shortcoming, be conservative if this add is likely to be
7556 // selected as part of a load-modify-store instruction. When the root node
7557 // in a match is a store, isel doesn't know how to remap non-chain non-flag
7558 // uses of other nodes in the match, such as the ADD in this case. This
7559 // leads to the ADD being left around and reselected, with the result being
7560 // two adds in the output. Alas, even if none our users are stores, that
7561 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
7562 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
7563 // climbing the DAG back to the root, and it doesn't seem to be worth the
7565 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
7566 UE = Op.getNode()->use_end(); UI != UE; ++UI)
7567 if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC)
7570 if (ConstantSDNode *C =
7571 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
7572 // An add of one will be selected as an INC.
7573 if (C->getAPIntValue() == 1) {
7574 Opcode = X86ISD::INC;
7579 // An add of negative one (subtract of one) will be selected as a DEC.
7580 if (C->getAPIntValue().isAllOnesValue()) {
7581 Opcode = X86ISD::DEC;
7587 // Otherwise use a regular EFLAGS-setting add.
7588 Opcode = X86ISD::ADD;
7592 // If the primary and result isn't used, don't bother using X86ISD::AND,
7593 // because a TEST instruction will be better.
7594 bool NonFlagUse = false;
7595 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
7596 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
7598 unsigned UOpNo = UI.getOperandNo();
7599 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
7600 // Look pass truncate.
7601 UOpNo = User->use_begin().getOperandNo();
7602 User = *User->use_begin();
7605 if (User->getOpcode() != ISD::BRCOND &&
7606 User->getOpcode() != ISD::SETCC &&
7607 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) {
7620 // Due to the ISEL shortcoming noted above, be conservative if this op is
7621 // likely to be selected as part of a load-modify-store instruction.
7622 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
7623 UE = Op.getNode()->use_end(); UI != UE; ++UI)
7624 if (UI->getOpcode() == ISD::STORE)
7627 // Otherwise use a regular EFLAGS-setting instruction.
7628 switch (Op.getNode()->getOpcode()) {
7629 default: llvm_unreachable("unexpected operator!");
7630 case ISD::SUB: Opcode = X86ISD::SUB; break;
7631 case ISD::OR: Opcode = X86ISD::OR; break;
7632 case ISD::XOR: Opcode = X86ISD::XOR; break;
7633 case ISD::AND: Opcode = X86ISD::AND; break;
7645 return SDValue(Op.getNode(), 1);
7652 // Emit a CMP with 0, which is the TEST pattern.
7653 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
7654 DAG.getConstant(0, Op.getValueType()));
7656 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
7657 SmallVector<SDValue, 4> Ops;
7658 for (unsigned i = 0; i != NumOperands; ++i)
7659 Ops.push_back(Op.getOperand(i));
7661 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
7662 DAG.ReplaceAllUsesWith(Op, New);
7663 return SDValue(New.getNode(), 1);
7666 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
7668 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
7669 SelectionDAG &DAG) const {
7670 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1))
7671 if (C->getAPIntValue() == 0)
7672 return EmitTest(Op0, X86CC, DAG);
7674 DebugLoc dl = Op0.getDebugLoc();
7675 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
7678 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
7679 /// if it's possible.
7680 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
7681 DebugLoc dl, SelectionDAG &DAG) const {
7682 SDValue Op0 = And.getOperand(0);
7683 SDValue Op1 = And.getOperand(1);
7684 if (Op0.getOpcode() == ISD::TRUNCATE)
7685 Op0 = Op0.getOperand(0);
7686 if (Op1.getOpcode() == ISD::TRUNCATE)
7687 Op1 = Op1.getOperand(0);
7690 if (Op1.getOpcode() == ISD::SHL)
7691 std::swap(Op0, Op1);
7692 if (Op0.getOpcode() == ISD::SHL) {
7693 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
7694 if (And00C->getZExtValue() == 1) {
7695 // If we looked past a truncate, check that it's only truncating away
7697 unsigned BitWidth = Op0.getValueSizeInBits();
7698 unsigned AndBitWidth = And.getValueSizeInBits();
7699 if (BitWidth > AndBitWidth) {
7700 APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones;
7701 DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones);
7702 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
7706 RHS = Op0.getOperand(1);
7708 } else if (Op1.getOpcode() == ISD::Constant) {
7709 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
7710 SDValue AndLHS = Op0;
7711 if (AndRHS->getZExtValue() == 1 && AndLHS.getOpcode() == ISD::SRL) {
7712 LHS = AndLHS.getOperand(0);
7713 RHS = AndLHS.getOperand(1);
7717 if (LHS.getNode()) {
7718 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
7719 // instruction. Since the shift amount is in-range-or-undefined, we know
7720 // that doing a bittest on the i32 value is ok. We extend to i32 because
7721 // the encoding for the i16 version is larger than the i32 version.
7722 // Also promote i16 to i32 for performance / code size reason.
7723 if (LHS.getValueType() == MVT::i8 ||
7724 LHS.getValueType() == MVT::i16)
7725 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
7727 // If the operand types disagree, extend the shift amount to match. Since
7728 // BT ignores high bits (like shifts) we can use anyextend.
7729 if (LHS.getValueType() != RHS.getValueType())
7730 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
7732 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
7733 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
7734 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
7735 DAG.getConstant(Cond, MVT::i8), BT);
7741 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
7742 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
7743 SDValue Op0 = Op.getOperand(0);
7744 SDValue Op1 = Op.getOperand(1);
7745 DebugLoc dl = Op.getDebugLoc();
7746 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
7748 // Optimize to BT if possible.
7749 // Lower (X & (1 << N)) == 0 to BT(X, N).
7750 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
7751 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
7752 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
7753 Op1.getOpcode() == ISD::Constant &&
7754 cast<ConstantSDNode>(Op1)->isNullValue() &&
7755 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
7756 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
7757 if (NewSetCC.getNode())
7761 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
7763 if (Op1.getOpcode() == ISD::Constant &&
7764 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
7765 cast<ConstantSDNode>(Op1)->isNullValue()) &&
7766 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
7768 // If the input is a setcc, then reuse the input setcc or use a new one with
7769 // the inverted condition.
7770 if (Op0.getOpcode() == X86ISD::SETCC) {
7771 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
7772 bool Invert = (CC == ISD::SETNE) ^
7773 cast<ConstantSDNode>(Op1)->isNullValue();
7774 if (!Invert) return Op0;
7776 CCode = X86::GetOppositeBranchCondition(CCode);
7777 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
7778 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1));
7782 bool isFP = Op1.getValueType().isFloatingPoint();
7783 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
7784 if (X86CC == X86::COND_INVALID)
7787 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG);
7788 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
7789 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
7792 SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
7794 SDValue Op0 = Op.getOperand(0);
7795 SDValue Op1 = Op.getOperand(1);
7796 SDValue CC = Op.getOperand(2);
7797 EVT VT = Op.getValueType();
7798 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
7799 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
7800 DebugLoc dl = Op.getDebugLoc();
7804 EVT VT0 = Op0.getValueType();
7805 assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64);
7806 unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD;
7809 switch (SetCCOpcode) {
7812 case ISD::SETEQ: SSECC = 0; break;
7814 case ISD::SETGT: Swap = true; // Fallthrough
7816 case ISD::SETOLT: SSECC = 1; break;
7818 case ISD::SETGE: Swap = true; // Fallthrough
7820 case ISD::SETOLE: SSECC = 2; break;
7821 case ISD::SETUO: SSECC = 3; break;
7823 case ISD::SETNE: SSECC = 4; break;
7824 case ISD::SETULE: Swap = true;
7825 case ISD::SETUGE: SSECC = 5; break;
7826 case ISD::SETULT: Swap = true;
7827 case ISD::SETUGT: SSECC = 6; break;
7828 case ISD::SETO: SSECC = 7; break;
7831 std::swap(Op0, Op1);
7833 // In the two special cases we can't handle, emit two comparisons.
7835 if (SetCCOpcode == ISD::SETUEQ) {
7837 UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8));
7838 EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8));
7839 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ);
7841 else if (SetCCOpcode == ISD::SETONE) {
7843 ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8));
7844 NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8));
7845 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ);
7847 llvm_unreachable("Illegal FP comparison");
7849 // Handle all other FP comparisons here.
7850 return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8));
7853 // We are handling one of the integer comparisons here. Since SSE only has
7854 // GT and EQ comparisons for integer, swapping operands and multiple
7855 // operations may be required for some comparisons.
7856 unsigned Opc = 0, EQOpc = 0, GTOpc = 0;
7857 bool Swap = false, Invert = false, FlipSigns = false;
7859 switch (VT.getSimpleVT().SimpleTy) {
7861 case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
7862 case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
7863 case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
7864 case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
7867 switch (SetCCOpcode) {
7869 case ISD::SETNE: Invert = true;
7870 case ISD::SETEQ: Opc = EQOpc; break;
7871 case ISD::SETLT: Swap = true;
7872 case ISD::SETGT: Opc = GTOpc; break;
7873 case ISD::SETGE: Swap = true;
7874 case ISD::SETLE: Opc = GTOpc; Invert = true; break;
7875 case ISD::SETULT: Swap = true;
7876 case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break;
7877 case ISD::SETUGE: Swap = true;
7878 case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break;
7881 std::swap(Op0, Op1);
7883 // Since SSE has no unsigned integer comparisons, we need to flip the sign
7884 // bits of the inputs before performing those operations.
7886 EVT EltVT = VT.getVectorElementType();
7887 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()),
7889 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit);
7890 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0],
7892 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec);
7893 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec);
7896 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
7898 // If the logical-not of the result is required, perform that now.
7900 Result = DAG.getNOT(dl, Result, VT);
7905 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
7906 static bool isX86LogicalCmp(SDValue Op) {
7907 unsigned Opc = Op.getNode()->getOpcode();
7908 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI)
7910 if (Op.getResNo() == 1 &&
7911 (Opc == X86ISD::ADD ||
7912 Opc == X86ISD::SUB ||
7913 Opc == X86ISD::ADC ||
7914 Opc == X86ISD::SBB ||
7915 Opc == X86ISD::SMUL ||
7916 Opc == X86ISD::UMUL ||
7917 Opc == X86ISD::INC ||
7918 Opc == X86ISD::DEC ||
7919 Opc == X86ISD::OR ||
7920 Opc == X86ISD::XOR ||
7921 Opc == X86ISD::AND))
7924 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
7930 static bool isZero(SDValue V) {
7931 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
7932 return C && C->isNullValue();
7935 static bool isAllOnes(SDValue V) {
7936 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
7937 return C && C->isAllOnesValue();
7940 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
7941 bool addTest = true;
7942 SDValue Cond = Op.getOperand(0);
7943 SDValue Op1 = Op.getOperand(1);
7944 SDValue Op2 = Op.getOperand(2);
7945 DebugLoc DL = Op.getDebugLoc();
7948 if (Cond.getOpcode() == ISD::SETCC) {
7949 SDValue NewCond = LowerSETCC(Cond, DAG);
7950 if (NewCond.getNode())
7954 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
7955 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
7956 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
7957 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
7958 if (Cond.getOpcode() == X86ISD::SETCC &&
7959 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
7960 isZero(Cond.getOperand(1).getOperand(1))) {
7961 SDValue Cmp = Cond.getOperand(1);
7963 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
7965 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
7966 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
7967 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
7969 SDValue CmpOp0 = Cmp.getOperand(0);
7970 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
7971 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
7973 SDValue Res = // Res = 0 or -1.
7974 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
7975 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
7977 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
7978 Res = DAG.getNOT(DL, Res, Res.getValueType());
7980 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
7981 if (N2C == 0 || !N2C->isNullValue())
7982 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
7987 // Look past (and (setcc_carry (cmp ...)), 1).
7988 if (Cond.getOpcode() == ISD::AND &&
7989 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
7990 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
7991 if (C && C->getAPIntValue() == 1)
7992 Cond = Cond.getOperand(0);
7995 // If condition flag is set by a X86ISD::CMP, then use it as the condition
7996 // setting operand in place of the X86ISD::SETCC.
7997 if (Cond.getOpcode() == X86ISD::SETCC ||
7998 Cond.getOpcode() == X86ISD::SETCC_CARRY) {
7999 CC = Cond.getOperand(0);
8001 SDValue Cmp = Cond.getOperand(1);
8002 unsigned Opc = Cmp.getOpcode();
8003 EVT VT = Op.getValueType();
8005 bool IllegalFPCMov = false;
8006 if (VT.isFloatingPoint() && !VT.isVector() &&
8007 !isScalarFPTypeInSSEReg(VT)) // FPStack?
8008 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
8010 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
8011 Opc == X86ISD::BT) { // FIXME
8018 // Look pass the truncate.
8019 if (Cond.getOpcode() == ISD::TRUNCATE)
8020 Cond = Cond.getOperand(0);
8022 // We know the result of AND is compared against zero. Try to match
8024 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
8025 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
8026 if (NewSetCC.getNode()) {
8027 CC = NewSetCC.getOperand(0);
8028 Cond = NewSetCC.getOperand(1);
8035 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
8036 Cond = EmitTest(Cond, X86::COND_NE, DAG);
8039 // a < b ? -1 : 0 -> RES = ~setcc_carry
8040 // a < b ? 0 : -1 -> RES = setcc_carry
8041 // a >= b ? -1 : 0 -> RES = setcc_carry
8042 // a >= b ? 0 : -1 -> RES = ~setcc_carry
8043 if (Cond.getOpcode() == X86ISD::CMP) {
8044 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
8046 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
8047 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
8048 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
8049 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
8050 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
8051 return DAG.getNOT(DL, Res, Res.getValueType());
8056 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
8057 // condition is true.
8058 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
8059 SDValue Ops[] = { Op2, Op1, CC, Cond };
8060 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops));
8063 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
8064 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
8065 // from the AND / OR.
8066 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
8067 Opc = Op.getOpcode();
8068 if (Opc != ISD::OR && Opc != ISD::AND)
8070 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
8071 Op.getOperand(0).hasOneUse() &&
8072 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
8073 Op.getOperand(1).hasOneUse());
8076 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
8077 // 1 and that the SETCC node has a single use.
8078 static bool isXor1OfSetCC(SDValue Op) {
8079 if (Op.getOpcode() != ISD::XOR)
8081 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8082 if (N1C && N1C->getAPIntValue() == 1) {
8083 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
8084 Op.getOperand(0).hasOneUse();
8089 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
8090 bool addTest = true;
8091 SDValue Chain = Op.getOperand(0);
8092 SDValue Cond = Op.getOperand(1);
8093 SDValue Dest = Op.getOperand(2);
8094 DebugLoc dl = Op.getDebugLoc();
8097 if (Cond.getOpcode() == ISD::SETCC) {
8098 SDValue NewCond = LowerSETCC(Cond, DAG);
8099 if (NewCond.getNode())
8103 // FIXME: LowerXALUO doesn't handle these!!
8104 else if (Cond.getOpcode() == X86ISD::ADD ||
8105 Cond.getOpcode() == X86ISD::SUB ||
8106 Cond.getOpcode() == X86ISD::SMUL ||
8107 Cond.getOpcode() == X86ISD::UMUL)
8108 Cond = LowerXALUO(Cond, DAG);
8111 // Look pass (and (setcc_carry (cmp ...)), 1).
8112 if (Cond.getOpcode() == ISD::AND &&
8113 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
8114 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
8115 if (C && C->getAPIntValue() == 1)
8116 Cond = Cond.getOperand(0);
8119 // If condition flag is set by a X86ISD::CMP, then use it as the condition
8120 // setting operand in place of the X86ISD::SETCC.
8121 if (Cond.getOpcode() == X86ISD::SETCC ||
8122 Cond.getOpcode() == X86ISD::SETCC_CARRY) {
8123 CC = Cond.getOperand(0);
8125 SDValue Cmp = Cond.getOperand(1);
8126 unsigned Opc = Cmp.getOpcode();
8127 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
8128 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
8132 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
8136 // These can only come from an arithmetic instruction with overflow,
8137 // e.g. SADDO, UADDO.
8138 Cond = Cond.getNode()->getOperand(1);
8145 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
8146 SDValue Cmp = Cond.getOperand(0).getOperand(1);
8147 if (CondOpc == ISD::OR) {
8148 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
8149 // two branches instead of an explicit OR instruction with a
8151 if (Cmp == Cond.getOperand(1).getOperand(1) &&
8152 isX86LogicalCmp(Cmp)) {
8153 CC = Cond.getOperand(0).getOperand(0);
8154 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
8155 Chain, Dest, CC, Cmp);
8156 CC = Cond.getOperand(1).getOperand(0);
8160 } else { // ISD::AND
8161 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
8162 // two branches instead of an explicit AND instruction with a
8163 // separate test. However, we only do this if this block doesn't
8164 // have a fall-through edge, because this requires an explicit
8165 // jmp when the condition is false.
8166 if (Cmp == Cond.getOperand(1).getOperand(1) &&
8167 isX86LogicalCmp(Cmp) &&
8168 Op.getNode()->hasOneUse()) {
8169 X86::CondCode CCode =
8170 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
8171 CCode = X86::GetOppositeBranchCondition(CCode);
8172 CC = DAG.getConstant(CCode, MVT::i8);
8173 SDNode *User = *Op.getNode()->use_begin();
8174 // Look for an unconditional branch following this conditional branch.
8175 // We need this because we need to reverse the successors in order
8176 // to implement FCMP_OEQ.
8177 if (User->getOpcode() == ISD::BR) {
8178 SDValue FalseBB = User->getOperand(1);
8180 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
8181 assert(NewBR == User);
8185 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
8186 Chain, Dest, CC, Cmp);
8187 X86::CondCode CCode =
8188 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
8189 CCode = X86::GetOppositeBranchCondition(CCode);
8190 CC = DAG.getConstant(CCode, MVT::i8);
8196 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
8197 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
8198 // It should be transformed during dag combiner except when the condition
8199 // is set by a arithmetics with overflow node.
8200 X86::CondCode CCode =
8201 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
8202 CCode = X86::GetOppositeBranchCondition(CCode);
8203 CC = DAG.getConstant(CCode, MVT::i8);
8204 Cond = Cond.getOperand(0).getOperand(1);
8210 // Look pass the truncate.
8211 if (Cond.getOpcode() == ISD::TRUNCATE)
8212 Cond = Cond.getOperand(0);
8214 // We know the result of AND is compared against zero. Try to match
8216 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
8217 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
8218 if (NewSetCC.getNode()) {
8219 CC = NewSetCC.getOperand(0);
8220 Cond = NewSetCC.getOperand(1);
8227 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
8228 Cond = EmitTest(Cond, X86::COND_NE, DAG);
8230 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
8231 Chain, Dest, CC, Cond);
8235 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
8236 // Calls to _alloca is needed to probe the stack when allocating more than 4k
8237 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
8238 // that the guard pages used by the OS virtual memory manager are allocated in
8239 // correct sequence.
8241 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
8242 SelectionDAG &DAG) const {
8243 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows()) &&
8244 "This should be used only on Windows targets");
8245 assert(!Subtarget->isTargetEnvMacho());
8246 DebugLoc dl = Op.getDebugLoc();
8249 SDValue Chain = Op.getOperand(0);
8250 SDValue Size = Op.getOperand(1);
8251 // FIXME: Ensure alignment here
8255 EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
8256 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX);
8258 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
8259 Flag = Chain.getValue(1);
8261 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8263 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
8264 Flag = Chain.getValue(1);
8266 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1);
8268 SDValue Ops1[2] = { Chain.getValue(0), Chain };
8269 return DAG.getMergeValues(Ops1, 2, dl);
8272 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
8273 MachineFunction &MF = DAG.getMachineFunction();
8274 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
8276 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8277 DebugLoc DL = Op.getDebugLoc();
8279 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
8280 // vastart just stores the address of the VarArgsFrameIndex slot into the
8281 // memory location argument.
8282 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
8284 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8285 MachinePointerInfo(SV), false, false, 0);
8289 // gp_offset (0 - 6 * 8)
8290 // fp_offset (48 - 48 + 8 * 16)
8291 // overflow_arg_area (point to parameters coming in memory).
8293 SmallVector<SDValue, 8> MemOps;
8294 SDValue FIN = Op.getOperand(1);
8296 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
8297 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
8299 FIN, MachinePointerInfo(SV), false, false, 0);
8300 MemOps.push_back(Store);
8303 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
8304 FIN, DAG.getIntPtrConstant(4));
8305 Store = DAG.getStore(Op.getOperand(0), DL,
8306 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
8308 FIN, MachinePointerInfo(SV, 4), false, false, 0);
8309 MemOps.push_back(Store);
8311 // Store ptr to overflow_arg_area
8312 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
8313 FIN, DAG.getIntPtrConstant(4));
8314 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
8316 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
8317 MachinePointerInfo(SV, 8),
8319 MemOps.push_back(Store);
8321 // Store ptr to reg_save_area.
8322 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
8323 FIN, DAG.getIntPtrConstant(8));
8324 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
8326 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
8327 MachinePointerInfo(SV, 16), false, false, 0);
8328 MemOps.push_back(Store);
8329 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
8330 &MemOps[0], MemOps.size());
8333 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
8334 assert(Subtarget->is64Bit() &&
8335 "LowerVAARG only handles 64-bit va_arg!");
8336 assert((Subtarget->isTargetLinux() ||
8337 Subtarget->isTargetDarwin()) &&
8338 "Unhandled target in LowerVAARG");
8339 assert(Op.getNode()->getNumOperands() == 4);
8340 SDValue Chain = Op.getOperand(0);
8341 SDValue SrcPtr = Op.getOperand(1);
8342 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8343 unsigned Align = Op.getConstantOperandVal(3);
8344 DebugLoc dl = Op.getDebugLoc();
8346 EVT ArgVT = Op.getNode()->getValueType(0);
8347 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
8348 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy);
8351 // Decide which area this value should be read from.
8352 // TODO: Implement the AMD64 ABI in its entirety. This simple
8353 // selection mechanism works only for the basic types.
8354 if (ArgVT == MVT::f80) {
8355 llvm_unreachable("va_arg for f80 not yet implemented");
8356 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
8357 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
8358 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
8359 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
8361 llvm_unreachable("Unhandled argument type in LowerVAARG");
8365 // Sanity Check: Make sure using fp_offset makes sense.
8366 assert(!UseSoftFloat &&
8367 !(DAG.getMachineFunction()
8368 .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) &&
8369 Subtarget->hasXMM());
8372 // Insert VAARG_64 node into the DAG
8373 // VAARG_64 returns two values: Variable Argument Address, Chain
8374 SmallVector<SDValue, 11> InstOps;
8375 InstOps.push_back(Chain);
8376 InstOps.push_back(SrcPtr);
8377 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
8378 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
8379 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
8380 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
8381 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
8382 VTs, &InstOps[0], InstOps.size(),
8384 MachinePointerInfo(SV),
8389 Chain = VAARG.getValue(1);
8391 // Load the next argument and return it
8392 return DAG.getLoad(ArgVT, dl,
8395 MachinePointerInfo(),
8399 SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
8400 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
8401 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
8402 SDValue Chain = Op.getOperand(0);
8403 SDValue DstPtr = Op.getOperand(1);
8404 SDValue SrcPtr = Op.getOperand(2);
8405 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
8406 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
8407 DebugLoc DL = Op.getDebugLoc();
8409 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
8410 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
8412 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
8416 X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
8417 DebugLoc dl = Op.getDebugLoc();
8418 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8420 default: return SDValue(); // Don't custom lower most intrinsics.
8421 // Comparison intrinsics.
8422 case Intrinsic::x86_sse_comieq_ss:
8423 case Intrinsic::x86_sse_comilt_ss:
8424 case Intrinsic::x86_sse_comile_ss:
8425 case Intrinsic::x86_sse_comigt_ss:
8426 case Intrinsic::x86_sse_comige_ss:
8427 case Intrinsic::x86_sse_comineq_ss:
8428 case Intrinsic::x86_sse_ucomieq_ss:
8429 case Intrinsic::x86_sse_ucomilt_ss:
8430 case Intrinsic::x86_sse_ucomile_ss:
8431 case Intrinsic::x86_sse_ucomigt_ss:
8432 case Intrinsic::x86_sse_ucomige_ss:
8433 case Intrinsic::x86_sse_ucomineq_ss:
8434 case Intrinsic::x86_sse2_comieq_sd:
8435 case Intrinsic::x86_sse2_comilt_sd:
8436 case Intrinsic::x86_sse2_comile_sd:
8437 case Intrinsic::x86_sse2_comigt_sd:
8438 case Intrinsic::x86_sse2_comige_sd:
8439 case Intrinsic::x86_sse2_comineq_sd:
8440 case Intrinsic::x86_sse2_ucomieq_sd:
8441 case Intrinsic::x86_sse2_ucomilt_sd:
8442 case Intrinsic::x86_sse2_ucomile_sd:
8443 case Intrinsic::x86_sse2_ucomigt_sd:
8444 case Intrinsic::x86_sse2_ucomige_sd:
8445 case Intrinsic::x86_sse2_ucomineq_sd: {
8447 ISD::CondCode CC = ISD::SETCC_INVALID;
8450 case Intrinsic::x86_sse_comieq_ss:
8451 case Intrinsic::x86_sse2_comieq_sd:
8455 case Intrinsic::x86_sse_comilt_ss:
8456 case Intrinsic::x86_sse2_comilt_sd:
8460 case Intrinsic::x86_sse_comile_ss:
8461 case Intrinsic::x86_sse2_comile_sd:
8465 case Intrinsic::x86_sse_comigt_ss:
8466 case Intrinsic::x86_sse2_comigt_sd:
8470 case Intrinsic::x86_sse_comige_ss:
8471 case Intrinsic::x86_sse2_comige_sd:
8475 case Intrinsic::x86_sse_comineq_ss:
8476 case Intrinsic::x86_sse2_comineq_sd:
8480 case Intrinsic::x86_sse_ucomieq_ss:
8481 case Intrinsic::x86_sse2_ucomieq_sd:
8482 Opc = X86ISD::UCOMI;
8485 case Intrinsic::x86_sse_ucomilt_ss:
8486 case Intrinsic::x86_sse2_ucomilt_sd:
8487 Opc = X86ISD::UCOMI;
8490 case Intrinsic::x86_sse_ucomile_ss:
8491 case Intrinsic::x86_sse2_ucomile_sd:
8492 Opc = X86ISD::UCOMI;
8495 case Intrinsic::x86_sse_ucomigt_ss:
8496 case Intrinsic::x86_sse2_ucomigt_sd:
8497 Opc = X86ISD::UCOMI;
8500 case Intrinsic::x86_sse_ucomige_ss:
8501 case Intrinsic::x86_sse2_ucomige_sd:
8502 Opc = X86ISD::UCOMI;
8505 case Intrinsic::x86_sse_ucomineq_ss:
8506 case Intrinsic::x86_sse2_ucomineq_sd:
8507 Opc = X86ISD::UCOMI;
8512 SDValue LHS = Op.getOperand(1);
8513 SDValue RHS = Op.getOperand(2);
8514 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
8515 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
8516 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS);
8517 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
8518 DAG.getConstant(X86CC, MVT::i8), Cond);
8519 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
8521 // ptest and testp intrinsics. The intrinsic these come from are designed to
8522 // return an integer value, not just an instruction so lower it to the ptest
8523 // or testp pattern and a setcc for the result.
8524 case Intrinsic::x86_sse41_ptestz:
8525 case Intrinsic::x86_sse41_ptestc:
8526 case Intrinsic::x86_sse41_ptestnzc:
8527 case Intrinsic::x86_avx_ptestz_256:
8528 case Intrinsic::x86_avx_ptestc_256:
8529 case Intrinsic::x86_avx_ptestnzc_256:
8530 case Intrinsic::x86_avx_vtestz_ps:
8531 case Intrinsic::x86_avx_vtestc_ps:
8532 case Intrinsic::x86_avx_vtestnzc_ps:
8533 case Intrinsic::x86_avx_vtestz_pd:
8534 case Intrinsic::x86_avx_vtestc_pd:
8535 case Intrinsic::x86_avx_vtestnzc_pd:
8536 case Intrinsic::x86_avx_vtestz_ps_256:
8537 case Intrinsic::x86_avx_vtestc_ps_256:
8538 case Intrinsic::x86_avx_vtestnzc_ps_256:
8539 case Intrinsic::x86_avx_vtestz_pd_256:
8540 case Intrinsic::x86_avx_vtestc_pd_256:
8541 case Intrinsic::x86_avx_vtestnzc_pd_256: {
8542 bool IsTestPacked = false;
8545 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
8546 case Intrinsic::x86_avx_vtestz_ps:
8547 case Intrinsic::x86_avx_vtestz_pd:
8548 case Intrinsic::x86_avx_vtestz_ps_256:
8549 case Intrinsic::x86_avx_vtestz_pd_256:
8550 IsTestPacked = true; // Fallthrough
8551 case Intrinsic::x86_sse41_ptestz:
8552 case Intrinsic::x86_avx_ptestz_256:
8554 X86CC = X86::COND_E;
8556 case Intrinsic::x86_avx_vtestc_ps:
8557 case Intrinsic::x86_avx_vtestc_pd:
8558 case Intrinsic::x86_avx_vtestc_ps_256:
8559 case Intrinsic::x86_avx_vtestc_pd_256:
8560 IsTestPacked = true; // Fallthrough
8561 case Intrinsic::x86_sse41_ptestc:
8562 case Intrinsic::x86_avx_ptestc_256:
8564 X86CC = X86::COND_B;
8566 case Intrinsic::x86_avx_vtestnzc_ps:
8567 case Intrinsic::x86_avx_vtestnzc_pd:
8568 case Intrinsic::x86_avx_vtestnzc_ps_256:
8569 case Intrinsic::x86_avx_vtestnzc_pd_256:
8570 IsTestPacked = true; // Fallthrough
8571 case Intrinsic::x86_sse41_ptestnzc:
8572 case Intrinsic::x86_avx_ptestnzc_256:
8574 X86CC = X86::COND_A;
8578 SDValue LHS = Op.getOperand(1);
8579 SDValue RHS = Op.getOperand(2);
8580 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
8581 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
8582 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
8583 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
8584 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
8587 // Fix vector shift instructions where the last operand is a non-immediate
8589 case Intrinsic::x86_sse2_pslli_w:
8590 case Intrinsic::x86_sse2_pslli_d:
8591 case Intrinsic::x86_sse2_pslli_q:
8592 case Intrinsic::x86_sse2_psrli_w:
8593 case Intrinsic::x86_sse2_psrli_d:
8594 case Intrinsic::x86_sse2_psrli_q:
8595 case Intrinsic::x86_sse2_psrai_w:
8596 case Intrinsic::x86_sse2_psrai_d:
8597 case Intrinsic::x86_mmx_pslli_w:
8598 case Intrinsic::x86_mmx_pslli_d:
8599 case Intrinsic::x86_mmx_pslli_q:
8600 case Intrinsic::x86_mmx_psrli_w:
8601 case Intrinsic::x86_mmx_psrli_d:
8602 case Intrinsic::x86_mmx_psrli_q:
8603 case Intrinsic::x86_mmx_psrai_w:
8604 case Intrinsic::x86_mmx_psrai_d: {
8605 SDValue ShAmt = Op.getOperand(2);
8606 if (isa<ConstantSDNode>(ShAmt))
8609 unsigned NewIntNo = 0;
8610 EVT ShAmtVT = MVT::v4i32;
8612 case Intrinsic::x86_sse2_pslli_w:
8613 NewIntNo = Intrinsic::x86_sse2_psll_w;
8615 case Intrinsic::x86_sse2_pslli_d:
8616 NewIntNo = Intrinsic::x86_sse2_psll_d;
8618 case Intrinsic::x86_sse2_pslli_q:
8619 NewIntNo = Intrinsic::x86_sse2_psll_q;
8621 case Intrinsic::x86_sse2_psrli_w:
8622 NewIntNo = Intrinsic::x86_sse2_psrl_w;
8624 case Intrinsic::x86_sse2_psrli_d:
8625 NewIntNo = Intrinsic::x86_sse2_psrl_d;
8627 case Intrinsic::x86_sse2_psrli_q:
8628 NewIntNo = Intrinsic::x86_sse2_psrl_q;
8630 case Intrinsic::x86_sse2_psrai_w:
8631 NewIntNo = Intrinsic::x86_sse2_psra_w;
8633 case Intrinsic::x86_sse2_psrai_d:
8634 NewIntNo = Intrinsic::x86_sse2_psra_d;
8637 ShAmtVT = MVT::v2i32;
8639 case Intrinsic::x86_mmx_pslli_w:
8640 NewIntNo = Intrinsic::x86_mmx_psll_w;
8642 case Intrinsic::x86_mmx_pslli_d:
8643 NewIntNo = Intrinsic::x86_mmx_psll_d;
8645 case Intrinsic::x86_mmx_pslli_q:
8646 NewIntNo = Intrinsic::x86_mmx_psll_q;
8648 case Intrinsic::x86_mmx_psrli_w:
8649 NewIntNo = Intrinsic::x86_mmx_psrl_w;
8651 case Intrinsic::x86_mmx_psrli_d:
8652 NewIntNo = Intrinsic::x86_mmx_psrl_d;
8654 case Intrinsic::x86_mmx_psrli_q:
8655 NewIntNo = Intrinsic::x86_mmx_psrl_q;
8657 case Intrinsic::x86_mmx_psrai_w:
8658 NewIntNo = Intrinsic::x86_mmx_psra_w;
8660 case Intrinsic::x86_mmx_psrai_d:
8661 NewIntNo = Intrinsic::x86_mmx_psra_d;
8663 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
8669 // The vector shift intrinsics with scalars uses 32b shift amounts but
8670 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
8674 ShOps[1] = DAG.getConstant(0, MVT::i32);
8675 if (ShAmtVT == MVT::v4i32) {
8676 ShOps[2] = DAG.getUNDEF(MVT::i32);
8677 ShOps[3] = DAG.getUNDEF(MVT::i32);
8678 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4);
8680 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
8681 // FIXME this must be lowered to get rid of the invalid type.
8684 EVT VT = Op.getValueType();
8685 ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
8686 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
8687 DAG.getConstant(NewIntNo, MVT::i32),
8688 Op.getOperand(1), ShAmt);
8693 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
8694 SelectionDAG &DAG) const {
8695 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
8696 MFI->setReturnAddressIsTaken(true);
8698 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8699 DebugLoc dl = Op.getDebugLoc();
8702 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
8704 DAG.getConstant(TD->getPointerSize(),
8705 Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
8706 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
8707 DAG.getNode(ISD::ADD, dl, getPointerTy(),
8709 MachinePointerInfo(), false, false, 0);
8712 // Just load the return address.
8713 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
8714 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
8715 RetAddrFI, MachinePointerInfo(), false, false, 0);
8718 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
8719 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
8720 MFI->setFrameAddressIsTaken(true);
8722 EVT VT = Op.getValueType();
8723 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
8724 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8725 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP;
8726 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
8728 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
8729 MachinePointerInfo(),
8734 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
8735 SelectionDAG &DAG) const {
8736 return DAG.getIntPtrConstant(2*TD->getPointerSize());
8739 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
8740 MachineFunction &MF = DAG.getMachineFunction();
8741 SDValue Chain = Op.getOperand(0);
8742 SDValue Offset = Op.getOperand(1);
8743 SDValue Handler = Op.getOperand(2);
8744 DebugLoc dl = Op.getDebugLoc();
8746 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
8747 Subtarget->is64Bit() ? X86::RBP : X86::EBP,
8749 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
8751 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame,
8752 DAG.getIntPtrConstant(TD->getPointerSize()));
8753 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset);
8754 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
8756 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
8757 MF.getRegInfo().addLiveOut(StoreAddrReg);
8759 return DAG.getNode(X86ISD::EH_RETURN, dl,
8761 Chain, DAG.getRegister(StoreAddrReg, getPointerTy()));
8764 SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
8765 SelectionDAG &DAG) const {
8766 SDValue Root = Op.getOperand(0);
8767 SDValue Trmp = Op.getOperand(1); // trampoline
8768 SDValue FPtr = Op.getOperand(2); // nested function
8769 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
8770 DebugLoc dl = Op.getDebugLoc();
8772 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
8774 if (Subtarget->is64Bit()) {
8775 SDValue OutChains[6];
8777 // Large code-model.
8778 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
8779 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
8781 const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10);
8782 const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11);
8784 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
8786 // Load the pointer to the nested function into R11.
8787 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
8788 SDValue Addr = Trmp;
8789 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
8790 Addr, MachinePointerInfo(TrmpAddr),
8793 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
8794 DAG.getConstant(2, MVT::i64));
8795 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
8796 MachinePointerInfo(TrmpAddr, 2),
8799 // Load the 'nest' parameter value into R10.
8800 // R10 is specified in X86CallingConv.td
8801 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
8802 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
8803 DAG.getConstant(10, MVT::i64));
8804 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
8805 Addr, MachinePointerInfo(TrmpAddr, 10),
8808 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
8809 DAG.getConstant(12, MVT::i64));
8810 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
8811 MachinePointerInfo(TrmpAddr, 12),
8814 // Jump to the nested function.
8815 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
8816 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
8817 DAG.getConstant(20, MVT::i64));
8818 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
8819 Addr, MachinePointerInfo(TrmpAddr, 20),
8822 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
8823 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
8824 DAG.getConstant(22, MVT::i64));
8825 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
8826 MachinePointerInfo(TrmpAddr, 22),
8830 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) };
8831 return DAG.getMergeValues(Ops, 2, dl);
8833 const Function *Func =
8834 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
8835 CallingConv::ID CC = Func->getCallingConv();
8840 llvm_unreachable("Unsupported calling convention");
8841 case CallingConv::C:
8842 case CallingConv::X86_StdCall: {
8843 // Pass 'nest' parameter in ECX.
8844 // Must be kept in sync with X86CallingConv.td
8847 // Check that ECX wasn't needed by an 'inreg' parameter.
8848 FunctionType *FTy = Func->getFunctionType();
8849 const AttrListPtr &Attrs = Func->getAttributes();
8851 if (!Attrs.isEmpty() && !Func->isVarArg()) {
8852 unsigned InRegCount = 0;
8855 for (FunctionType::param_iterator I = FTy->param_begin(),
8856 E = FTy->param_end(); I != E; ++I, ++Idx)
8857 if (Attrs.paramHasAttr(Idx, Attribute::InReg))
8858 // FIXME: should only count parameters that are lowered to integers.
8859 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
8861 if (InRegCount > 2) {
8862 report_fatal_error("Nest register in use - reduce number of inreg"
8868 case CallingConv::X86_FastCall:
8869 case CallingConv::X86_ThisCall:
8870 case CallingConv::Fast:
8871 // Pass 'nest' parameter in EAX.
8872 // Must be kept in sync with X86CallingConv.td
8877 SDValue OutChains[4];
8880 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
8881 DAG.getConstant(10, MVT::i32));
8882 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
8884 // This is storing the opcode for MOV32ri.
8885 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
8886 const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg);
8887 OutChains[0] = DAG.getStore(Root, dl,
8888 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
8889 Trmp, MachinePointerInfo(TrmpAddr),
8892 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
8893 DAG.getConstant(1, MVT::i32));
8894 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
8895 MachinePointerInfo(TrmpAddr, 1),
8898 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
8899 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
8900 DAG.getConstant(5, MVT::i32));
8901 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
8902 MachinePointerInfo(TrmpAddr, 5),
8905 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
8906 DAG.getConstant(6, MVT::i32));
8907 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
8908 MachinePointerInfo(TrmpAddr, 6),
8912 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4) };
8913 return DAG.getMergeValues(Ops, 2, dl);
8917 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8918 SelectionDAG &DAG) const {
8920 The rounding mode is in bits 11:10 of FPSR, and has the following
8927 FLT_ROUNDS, on the other hand, expects the following:
8934 To perform the conversion, we do:
8935 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
8938 MachineFunction &MF = DAG.getMachineFunction();
8939 const TargetMachine &TM = MF.getTarget();
8940 const TargetFrameLowering &TFI = *TM.getFrameLowering();
8941 unsigned StackAlignment = TFI.getStackAlignment();
8942 EVT VT = Op.getValueType();
8943 DebugLoc DL = Op.getDebugLoc();
8945 // Save FP Control Word to stack slot
8946 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
8947 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
8950 MachineMemOperand *MMO =
8951 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
8952 MachineMemOperand::MOStore, 2, 2);
8954 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
8955 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
8956 DAG.getVTList(MVT::Other),
8957 Ops, 2, MVT::i16, MMO);
8959 // Load FP Control Word from stack slot
8960 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
8961 MachinePointerInfo(), false, false, 0);
8963 // Transform as necessary
8965 DAG.getNode(ISD::SRL, DL, MVT::i16,
8966 DAG.getNode(ISD::AND, DL, MVT::i16,
8967 CWD, DAG.getConstant(0x800, MVT::i16)),
8968 DAG.getConstant(11, MVT::i8));
8970 DAG.getNode(ISD::SRL, DL, MVT::i16,
8971 DAG.getNode(ISD::AND, DL, MVT::i16,
8972 CWD, DAG.getConstant(0x400, MVT::i16)),
8973 DAG.getConstant(9, MVT::i8));
8976 DAG.getNode(ISD::AND, DL, MVT::i16,
8977 DAG.getNode(ISD::ADD, DL, MVT::i16,
8978 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
8979 DAG.getConstant(1, MVT::i16)),
8980 DAG.getConstant(3, MVT::i16));
8983 return DAG.getNode((VT.getSizeInBits() < 16 ?
8984 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
8987 SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
8988 EVT VT = Op.getValueType();
8990 unsigned NumBits = VT.getSizeInBits();
8991 DebugLoc dl = Op.getDebugLoc();
8993 Op = Op.getOperand(0);
8994 if (VT == MVT::i8) {
8995 // Zero extend to i32 since there is not an i8 bsr.
8997 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
9000 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
9001 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
9002 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
9004 // If src is zero (i.e. bsr sets ZF), returns NumBits.
9007 DAG.getConstant(NumBits+NumBits-1, OpVT),
9008 DAG.getConstant(X86::COND_E, MVT::i8),
9011 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops));
9013 // Finally xor with NumBits-1.
9014 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
9017 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
9021 SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
9022 EVT VT = Op.getValueType();
9024 unsigned NumBits = VT.getSizeInBits();
9025 DebugLoc dl = Op.getDebugLoc();
9027 Op = Op.getOperand(0);
9028 if (VT == MVT::i8) {
9030 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
9033 // Issue a bsf (scan bits forward) which also sets EFLAGS.
9034 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
9035 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
9037 // If src is zero (i.e. bsf sets ZF), returns NumBits.
9040 DAG.getConstant(NumBits, OpVT),
9041 DAG.getConstant(X86::COND_E, MVT::i8),
9044 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops));
9047 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
9051 SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const {
9052 EVT VT = Op.getValueType();
9053 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply");
9054 DebugLoc dl = Op.getDebugLoc();
9056 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32);
9057 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32);
9058 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b );
9059 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi );
9060 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b );
9062 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 );
9063 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 );
9064 // return AloBlo + AloBhi + AhiBlo;
9066 SDValue A = Op.getOperand(0);
9067 SDValue B = Op.getOperand(1);
9069 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9070 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
9071 A, DAG.getConstant(32, MVT::i32));
9072 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9073 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
9074 B, DAG.getConstant(32, MVT::i32));
9075 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9076 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
9078 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9079 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
9081 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9082 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
9084 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9085 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
9086 AloBhi, DAG.getConstant(32, MVT::i32));
9087 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9088 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
9089 AhiBlo, DAG.getConstant(32, MVT::i32));
9090 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
9091 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
9095 SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
9097 EVT VT = Op.getValueType();
9098 DebugLoc dl = Op.getDebugLoc();
9099 SDValue R = Op.getOperand(0);
9100 SDValue Amt = Op.getOperand(1);
9102 LLVMContext *Context = DAG.getContext();
9105 if (!Subtarget->hasSSE2()) return SDValue();
9107 // Optimize shl/srl/sra with constant shift amount.
9108 if (isSplatVector(Amt.getNode())) {
9109 SDValue SclrAmt = Amt->getOperand(0);
9110 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) {
9111 uint64_t ShiftAmt = C->getZExtValue();
9113 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SHL)
9114 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9115 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
9116 R, DAG.getConstant(ShiftAmt, MVT::i32));
9118 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SHL)
9119 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9120 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
9121 R, DAG.getConstant(ShiftAmt, MVT::i32));
9123 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SHL)
9124 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9125 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
9126 R, DAG.getConstant(ShiftAmt, MVT::i32));
9128 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SRL)
9129 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9130 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
9131 R, DAG.getConstant(ShiftAmt, MVT::i32));
9133 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRL)
9134 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9135 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32),
9136 R, DAG.getConstant(ShiftAmt, MVT::i32));
9138 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRL)
9139 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9140 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
9141 R, DAG.getConstant(ShiftAmt, MVT::i32));
9143 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRA)
9144 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9145 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32),
9146 R, DAG.getConstant(ShiftAmt, MVT::i32));
9148 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRA)
9149 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9150 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
9151 R, DAG.getConstant(ShiftAmt, MVT::i32));
9155 // Lower SHL with variable shift amount.
9156 // Cannot lower SHL without SSE2 or later.
9157 if (!Subtarget->hasSSE2()) return SDValue();
9159 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
9160 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9161 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
9162 Op.getOperand(1), DAG.getConstant(23, MVT::i32));
9164 ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U));
9166 std::vector<Constant*> CV(4, CI);
9167 Constant *C = ConstantVector::get(CV);
9168 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
9169 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
9170 MachinePointerInfo::getConstantPool(),
9173 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend);
9174 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
9175 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
9176 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
9178 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
9180 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9181 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
9182 Op.getOperand(1), DAG.getConstant(5, MVT::i32));
9184 ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15));
9185 ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63));
9187 std::vector<Constant*> CVM1(16, CM1);
9188 std::vector<Constant*> CVM2(16, CM2);
9189 Constant *C = ConstantVector::get(CVM1);
9190 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
9191 SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
9192 MachinePointerInfo::getConstantPool(),
9195 // r = pblendv(r, psllw(r & (char16)15, 4), a);
9196 M = DAG.getNode(ISD::AND, dl, VT, R, M);
9197 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9198 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
9199 DAG.getConstant(4, MVT::i32));
9200 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op);
9202 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
9204 C = ConstantVector::get(CVM2);
9205 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
9206 M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
9207 MachinePointerInfo::getConstantPool(),
9210 // r = pblendv(r, psllw(r & (char16)63, 2), a);
9211 M = DAG.getNode(ISD::AND, dl, VT, R, M);
9212 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9213 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
9214 DAG.getConstant(2, MVT::i32));
9215 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op);
9217 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
9219 // return pblendv(r, r+r, a);
9220 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT,
9221 R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op);
9227 SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
9228 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
9229 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
9230 // looks for this combo and may remove the "setcc" instruction if the "setcc"
9231 // has only one use.
9232 SDNode *N = Op.getNode();
9233 SDValue LHS = N->getOperand(0);
9234 SDValue RHS = N->getOperand(1);
9235 unsigned BaseOp = 0;
9237 DebugLoc DL = Op.getDebugLoc();
9238 switch (Op.getOpcode()) {
9239 default: llvm_unreachable("Unknown ovf instruction!");
9241 // A subtract of one will be selected as a INC. Note that INC doesn't
9242 // set CF, so we can't do this for UADDO.
9243 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
9245 BaseOp = X86ISD::INC;
9249 BaseOp = X86ISD::ADD;
9253 BaseOp = X86ISD::ADD;
9257 // A subtract of one will be selected as a DEC. Note that DEC doesn't
9258 // set CF, so we can't do this for USUBO.
9259 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
9261 BaseOp = X86ISD::DEC;
9265 BaseOp = X86ISD::SUB;
9269 BaseOp = X86ISD::SUB;
9273 BaseOp = X86ISD::SMUL;
9276 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
9277 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
9279 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
9282 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
9283 DAG.getConstant(X86::COND_O, MVT::i32),
9284 SDValue(Sum.getNode(), 2));
9286 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
9290 // Also sets EFLAGS.
9291 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
9292 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
9295 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
9296 DAG.getConstant(Cond, MVT::i32),
9297 SDValue(Sum.getNode(), 1));
9299 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
9302 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const{
9303 DebugLoc dl = Op.getDebugLoc();
9304 SDNode* Node = Op.getNode();
9305 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
9306 EVT VT = Node->getValueType(0);
9308 if (Subtarget->hasSSE2() && VT.isVector()) {
9309 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
9310 ExtraVT.getScalarType().getSizeInBits();
9311 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32);
9313 unsigned SHLIntrinsicsID = 0;
9314 unsigned SRAIntrinsicsID = 0;
9315 switch (VT.getSimpleVT().SimpleTy) {
9319 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_q;
9320 SRAIntrinsicsID = 0;
9324 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_d;
9325 SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_d;
9329 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_w;
9330 SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_w;
9335 SDValue Tmp1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9336 DAG.getConstant(SHLIntrinsicsID, MVT::i32),
9337 Node->getOperand(0), ShAmt);
9339 // In case of 1 bit sext, no need to shr
9340 if (ExtraVT.getScalarType().getSizeInBits() == 1) return Tmp1;
9342 if (SRAIntrinsicsID) {
9343 Tmp1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
9344 DAG.getConstant(SRAIntrinsicsID, MVT::i32),
9354 SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
9355 DebugLoc dl = Op.getDebugLoc();
9357 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2.
9358 // There isn't any reason to disable it if the target processor supports it.
9359 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) {
9360 SDValue Chain = Op.getOperand(0);
9361 SDValue Zero = DAG.getConstant(0, MVT::i32);
9363 DAG.getRegister(X86::ESP, MVT::i32), // Base
9364 DAG.getTargetConstant(1, MVT::i8), // Scale
9365 DAG.getRegister(0, MVT::i32), // Index
9366 DAG.getTargetConstant(0, MVT::i32), // Disp
9367 DAG.getRegister(0, MVT::i32), // Segment.
9372 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops,
9373 array_lengthof(Ops));
9374 return SDValue(Res, 0);
9377 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
9379 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
9381 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
9382 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
9383 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
9384 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
9386 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
9387 if (!Op1 && !Op2 && !Op3 && Op4)
9388 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0));
9390 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
9391 if (Op1 && !Op2 && !Op3 && !Op4)
9392 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0));
9394 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)),
9396 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
9399 SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op,
9400 SelectionDAG &DAG) const {
9401 DebugLoc dl = Op.getDebugLoc();
9402 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
9403 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
9404 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
9405 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
9407 // The only fence that needs an instruction is a sequentially-consistent
9408 // cross-thread fence.
9409 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
9410 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
9411 // no-sse2). There isn't any reason to disable it if the target processor
9413 if (Subtarget->hasSSE2() || Subtarget->is64Bit())
9414 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
9416 SDValue Chain = Op.getOperand(0);
9417 SDValue Zero = DAG.getConstant(0, MVT::i32);
9419 DAG.getRegister(X86::ESP, MVT::i32), // Base
9420 DAG.getTargetConstant(1, MVT::i8), // Scale
9421 DAG.getRegister(0, MVT::i32), // Index
9422 DAG.getTargetConstant(0, MVT::i32), // Disp
9423 DAG.getRegister(0, MVT::i32), // Segment.
9428 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops,
9429 array_lengthof(Ops));
9430 return SDValue(Res, 0);
9433 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
9434 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
9438 SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
9439 EVT T = Op.getValueType();
9440 DebugLoc DL = Op.getDebugLoc();
9443 switch(T.getSimpleVT().SimpleTy) {
9445 assert(false && "Invalid value type!");
9446 case MVT::i8: Reg = X86::AL; size = 1; break;
9447 case MVT::i16: Reg = X86::AX; size = 2; break;
9448 case MVT::i32: Reg = X86::EAX; size = 4; break;
9450 assert(Subtarget->is64Bit() && "Node not type legal!");
9451 Reg = X86::RAX; size = 8;
9454 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
9455 Op.getOperand(2), SDValue());
9456 SDValue Ops[] = { cpIn.getValue(0),
9459 DAG.getTargetConstant(size, MVT::i8),
9461 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
9462 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
9463 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
9466 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
9470 SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
9471 SelectionDAG &DAG) const {
9472 assert(Subtarget->is64Bit() && "Result not type legalized?");
9473 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
9474 SDValue TheChain = Op.getOperand(0);
9475 DebugLoc dl = Op.getDebugLoc();
9476 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
9477 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1));
9478 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64,
9480 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx,
9481 DAG.getConstant(32, MVT::i8));
9483 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp),
9486 return DAG.getMergeValues(Ops, 2, dl);
9489 SDValue X86TargetLowering::LowerBITCAST(SDValue Op,
9490 SelectionDAG &DAG) const {
9491 EVT SrcVT = Op.getOperand(0).getValueType();
9492 EVT DstVT = Op.getValueType();
9493 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
9494 Subtarget->hasMMX() && "Unexpected custom BITCAST");
9495 assert((DstVT == MVT::i64 ||
9496 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
9497 "Unexpected custom BITCAST");
9498 // i64 <=> MMX conversions are Legal.
9499 if (SrcVT==MVT::i64 && DstVT.isVector())
9501 if (DstVT==MVT::i64 && SrcVT.isVector())
9503 // MMX <=> MMX conversions are Legal.
9504 if (SrcVT.isVector() && DstVT.isVector())
9506 // All other conversions need to be expanded.
9510 SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const {
9511 SDNode *Node = Op.getNode();
9512 DebugLoc dl = Node->getDebugLoc();
9513 EVT T = Node->getValueType(0);
9514 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
9515 DAG.getConstant(0, T), Node->getOperand(2));
9516 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
9517 cast<AtomicSDNode>(Node)->getMemoryVT(),
9518 Node->getOperand(0),
9519 Node->getOperand(1), negOp,
9520 cast<AtomicSDNode>(Node)->getSrcValue(),
9521 cast<AtomicSDNode>(Node)->getAlignment());
9524 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
9525 EVT VT = Op.getNode()->getValueType(0);
9527 // Let legalize expand this if it isn't a legal type yet.
9528 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
9531 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
9534 bool ExtraOp = false;
9535 switch (Op.getOpcode()) {
9536 default: assert(0 && "Invalid code");
9537 case ISD::ADDC: Opc = X86ISD::ADD; break;
9538 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
9539 case ISD::SUBC: Opc = X86ISD::SUB; break;
9540 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
9544 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
9546 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
9547 Op.getOperand(1), Op.getOperand(2));
9550 /// LowerOperation - Provide custom lowering hooks for some operations.
9552 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9553 switch (Op.getOpcode()) {
9554 default: llvm_unreachable("Should not custom lower this!");
9555 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
9556 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG);
9557 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG);
9558 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
9559 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
9560 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
9561 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
9562 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
9563 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
9564 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
9565 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
9566 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
9567 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
9568 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
9569 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
9570 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
9571 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
9572 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
9573 case ISD::SHL_PARTS:
9574 case ISD::SRA_PARTS:
9575 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
9576 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
9577 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
9578 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
9579 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
9580 case ISD::FABS: return LowerFABS(Op, DAG);
9581 case ISD::FNEG: return LowerFNEG(Op, DAG);
9582 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
9583 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
9584 case ISD::SETCC: return LowerSETCC(Op, DAG);
9585 case ISD::VSETCC: return LowerVSETCC(Op, DAG);
9586 case ISD::SELECT: return LowerSELECT(Op, DAG);
9587 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
9588 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
9589 case ISD::VASTART: return LowerVASTART(Op, DAG);
9590 case ISD::VAARG: return LowerVAARG(Op, DAG);
9591 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
9592 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
9593 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
9594 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
9595 case ISD::FRAME_TO_ARGS_OFFSET:
9596 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
9597 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
9598 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
9599 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
9600 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
9601 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
9602 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
9603 case ISD::MUL: return LowerMUL_V2I64(Op, DAG);
9606 case ISD::SHL: return LowerShift(Op, DAG);
9612 case ISD::UMULO: return LowerXALUO(Op, DAG);
9613 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
9614 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
9618 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
9622 void X86TargetLowering::
9623 ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
9624 SelectionDAG &DAG, unsigned NewOp) const {
9625 EVT T = Node->getValueType(0);
9626 DebugLoc dl = Node->getDebugLoc();
9627 assert (T == MVT::i64 && "Only know how to expand i64 atomics");
9629 SDValue Chain = Node->getOperand(0);
9630 SDValue In1 = Node->getOperand(1);
9631 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9632 Node->getOperand(2), DAG.getIntPtrConstant(0));
9633 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9634 Node->getOperand(2), DAG.getIntPtrConstant(1));
9635 SDValue Ops[] = { Chain, In1, In2L, In2H };
9636 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
9638 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64,
9639 cast<MemSDNode>(Node)->getMemOperand());
9640 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
9641 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
9642 Results.push_back(Result.getValue(2));
9645 /// ReplaceNodeResults - Replace a node with an illegal result type
9646 /// with a new node built out of custom code.
9647 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
9648 SmallVectorImpl<SDValue>&Results,
9649 SelectionDAG &DAG) const {
9650 DebugLoc dl = N->getDebugLoc();
9651 switch (N->getOpcode()) {
9653 assert(false && "Do not know how to custom type legalize this operation!");
9655 case ISD::SIGN_EXTEND_INREG:
9660 // We don't want to expand or promote these.
9662 case ISD::FP_TO_SINT: {
9663 std::pair<SDValue,SDValue> Vals =
9664 FP_TO_INTHelper(SDValue(N, 0), DAG, true);
9665 SDValue FIST = Vals.first, StackSlot = Vals.second;
9666 if (FIST.getNode() != 0) {
9667 EVT VT = N->getValueType(0);
9668 // Return a load from the stack slot.
9669 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
9670 MachinePointerInfo(), false, false, 0));
9674 case ISD::READCYCLECOUNTER: {
9675 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
9676 SDValue TheChain = N->getOperand(0);
9677 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
9678 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32,
9680 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32,
9682 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
9683 SDValue Ops[] = { eax, edx };
9684 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2));
9685 Results.push_back(edx.getValue(1));
9688 case ISD::ATOMIC_CMP_SWAP: {
9689 EVT T = N->getValueType(0);
9690 assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
9691 SDValue cpInL, cpInH;
9692 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2),
9693 DAG.getConstant(0, MVT::i32));
9694 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2),
9695 DAG.getConstant(1, MVT::i32));
9696 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, X86::EAX, cpInL, SDValue());
9697 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, X86::EDX, cpInH,
9699 SDValue swapInL, swapInH;
9700 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3),
9701 DAG.getConstant(0, MVT::i32));
9702 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3),
9703 DAG.getConstant(1, MVT::i32));
9704 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, X86::EBX, swapInL,
9706 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, X86::ECX, swapInH,
9707 swapInL.getValue(1));
9708 SDValue Ops[] = { swapInH.getValue(0),
9710 swapInH.getValue(1) };
9711 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
9712 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
9713 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys,
9715 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX,
9716 MVT::i32, Result.getValue(1));
9717 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX,
9718 MVT::i32, cpOutL.getValue(2));
9719 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
9720 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
9721 Results.push_back(cpOutH.getValue(1));
9724 case ISD::ATOMIC_LOAD_ADD:
9725 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG);
9727 case ISD::ATOMIC_LOAD_AND:
9728 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG);
9730 case ISD::ATOMIC_LOAD_NAND:
9731 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG);
9733 case ISD::ATOMIC_LOAD_OR:
9734 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG);
9736 case ISD::ATOMIC_LOAD_SUB:
9737 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG);
9739 case ISD::ATOMIC_LOAD_XOR:
9740 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG);
9742 case ISD::ATOMIC_SWAP:
9743 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG);
9748 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
9750 default: return NULL;
9751 case X86ISD::BSF: return "X86ISD::BSF";
9752 case X86ISD::BSR: return "X86ISD::BSR";
9753 case X86ISD::SHLD: return "X86ISD::SHLD";
9754 case X86ISD::SHRD: return "X86ISD::SHRD";
9755 case X86ISD::FAND: return "X86ISD::FAND";
9756 case X86ISD::FOR: return "X86ISD::FOR";
9757 case X86ISD::FXOR: return "X86ISD::FXOR";
9758 case X86ISD::FSRL: return "X86ISD::FSRL";
9759 case X86ISD::FILD: return "X86ISD::FILD";
9760 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
9761 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
9762 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
9763 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
9764 case X86ISD::FLD: return "X86ISD::FLD";
9765 case X86ISD::FST: return "X86ISD::FST";
9766 case X86ISD::CALL: return "X86ISD::CALL";
9767 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
9768 case X86ISD::BT: return "X86ISD::BT";
9769 case X86ISD::CMP: return "X86ISD::CMP";
9770 case X86ISD::COMI: return "X86ISD::COMI";
9771 case X86ISD::UCOMI: return "X86ISD::UCOMI";
9772 case X86ISD::SETCC: return "X86ISD::SETCC";
9773 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
9774 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd";
9775 case X86ISD::FSETCCss: return "X86ISD::FSETCCss";
9776 case X86ISD::CMOV: return "X86ISD::CMOV";
9777 case X86ISD::BRCOND: return "X86ISD::BRCOND";
9778 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
9779 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
9780 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
9781 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
9782 case X86ISD::Wrapper: return "X86ISD::Wrapper";
9783 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
9784 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
9785 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
9786 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
9787 case X86ISD::PINSRB: return "X86ISD::PINSRB";
9788 case X86ISD::PINSRW: return "X86ISD::PINSRW";
9789 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
9790 case X86ISD::ANDNP: return "X86ISD::ANDNP";
9791 case X86ISD::PSIGNB: return "X86ISD::PSIGNB";
9792 case X86ISD::PSIGNW: return "X86ISD::PSIGNW";
9793 case X86ISD::PSIGND: return "X86ISD::PSIGND";
9794 case X86ISD::PBLENDVB: return "X86ISD::PBLENDVB";
9795 case X86ISD::FMAX: return "X86ISD::FMAX";
9796 case X86ISD::FMIN: return "X86ISD::FMIN";
9797 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
9798 case X86ISD::FRCP: return "X86ISD::FRCP";
9799 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
9800 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
9801 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
9802 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
9803 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
9804 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
9805 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
9806 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG";
9807 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG";
9808 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG";
9809 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG";
9810 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG";
9811 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG";
9812 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
9813 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
9814 case X86ISD::VSHL: return "X86ISD::VSHL";
9815 case X86ISD::VSRL: return "X86ISD::VSRL";
9816 case X86ISD::CMPPD: return "X86ISD::CMPPD";
9817 case X86ISD::CMPPS: return "X86ISD::CMPPS";
9818 case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB";
9819 case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW";
9820 case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD";
9821 case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ";
9822 case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB";
9823 case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW";
9824 case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD";
9825 case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ";
9826 case X86ISD::ADD: return "X86ISD::ADD";
9827 case X86ISD::SUB: return "X86ISD::SUB";
9828 case X86ISD::ADC: return "X86ISD::ADC";
9829 case X86ISD::SBB: return "X86ISD::SBB";
9830 case X86ISD::SMUL: return "X86ISD::SMUL";
9831 case X86ISD::UMUL: return "X86ISD::UMUL";
9832 case X86ISD::INC: return "X86ISD::INC";
9833 case X86ISD::DEC: return "X86ISD::DEC";
9834 case X86ISD::OR: return "X86ISD::OR";
9835 case X86ISD::XOR: return "X86ISD::XOR";
9836 case X86ISD::AND: return "X86ISD::AND";
9837 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
9838 case X86ISD::PTEST: return "X86ISD::PTEST";
9839 case X86ISD::TESTP: return "X86ISD::TESTP";
9840 case X86ISD::PALIGN: return "X86ISD::PALIGN";
9841 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
9842 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
9843 case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD";
9844 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
9845 case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD";
9846 case X86ISD::SHUFPS: return "X86ISD::SHUFPS";
9847 case X86ISD::SHUFPD: return "X86ISD::SHUFPD";
9848 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
9849 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
9850 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
9851 case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD";
9852 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
9853 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
9854 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
9855 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
9856 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
9857 case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD";
9858 case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD";
9859 case X86ISD::MOVSD: return "X86ISD::MOVSD";
9860 case X86ISD::MOVSS: return "X86ISD::MOVSS";
9861 case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS";
9862 case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD";
9863 case X86ISD::VUNPCKLPDY: return "X86ISD::VUNPCKLPDY";
9864 case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS";
9865 case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD";
9866 case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW";
9867 case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD";
9868 case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ";
9869 case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ";
9870 case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW";
9871 case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD";
9872 case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ";
9873 case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ";
9874 case X86ISD::VPERMILPS: return "X86ISD::VPERMILPS";
9875 case X86ISD::VPERMILPSY: return "X86ISD::VPERMILPSY";
9876 case X86ISD::VPERMILPD: return "X86ISD::VPERMILPD";
9877 case X86ISD::VPERMILPDY: return "X86ISD::VPERMILPDY";
9878 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
9879 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
9880 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
9881 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
9885 // isLegalAddressingMode - Return true if the addressing mode represented
9886 // by AM is legal for this target, for a load/store of the specified type.
9887 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
9889 // X86 supports extremely general addressing modes.
9890 CodeModel::Model M = getTargetMachine().getCodeModel();
9891 Reloc::Model R = getTargetMachine().getRelocationModel();
9893 // X86 allows a sign-extended 32-bit immediate field as a displacement.
9894 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL))
9899 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
9901 // If a reference to this global requires an extra load, we can't fold it.
9902 if (isGlobalStubReference(GVFlags))
9905 // If BaseGV requires a register for the PIC base, we cannot also have a
9906 // BaseReg specified.
9907 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
9910 // If lower 4G is not available, then we must use rip-relative addressing.
9911 if ((M != CodeModel::Small || R != Reloc::Static) &&
9912 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
9922 // These scales always work.
9927 // These scales are formed with basereg+scalereg. Only accept if there is
9932 default: // Other stuff never works.
9940 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
9941 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
9943 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
9944 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
9945 if (NumBits1 <= NumBits2)
9950 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
9951 if (!VT1.isInteger() || !VT2.isInteger())
9953 unsigned NumBits1 = VT1.getSizeInBits();
9954 unsigned NumBits2 = VT2.getSizeInBits();
9955 if (NumBits1 <= NumBits2)
9960 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
9961 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
9962 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
9965 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
9966 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
9967 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
9970 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
9971 // i16 instructions are longer (0x66 prefix) and potentially slower.
9972 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
9975 /// isShuffleMaskLegal - Targets can use this to indicate that they only
9976 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
9977 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
9978 /// are assumed to be legal.
9980 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
9982 // Very little shuffling can be done for 64-bit vectors right now.
9983 if (VT.getSizeInBits() == 64)
9984 return isPALIGNRMask(M, VT, Subtarget->hasSSSE3());
9986 // FIXME: pshufb, blends, shifts.
9987 return (VT.getVectorNumElements() == 2 ||
9988 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
9989 isMOVLMask(M, VT) ||
9990 isSHUFPMask(M, VT) ||
9991 isPSHUFDMask(M, VT) ||
9992 isPSHUFHWMask(M, VT) ||
9993 isPSHUFLWMask(M, VT) ||
9994 isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) ||
9995 isUNPCKLMask(M, VT) ||
9996 isUNPCKHMask(M, VT) ||
9997 isUNPCKL_v_undef_Mask(M, VT) ||
9998 isUNPCKH_v_undef_Mask(M, VT));
10002 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
10004 unsigned NumElts = VT.getVectorNumElements();
10005 // FIXME: This collection of masks seems suspect.
10008 if (NumElts == 4 && VT.getSizeInBits() == 128) {
10009 return (isMOVLMask(Mask, VT) ||
10010 isCommutedMOVLMask(Mask, VT, true) ||
10011 isSHUFPMask(Mask, VT) ||
10012 isCommutedSHUFPMask(Mask, VT));
10017 //===----------------------------------------------------------------------===//
10018 // X86 Scheduler Hooks
10019 //===----------------------------------------------------------------------===//
10021 // private utility function
10022 MachineBasicBlock *
10023 X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
10024 MachineBasicBlock *MBB,
10031 TargetRegisterClass *RC,
10032 bool invSrc) const {
10033 // For the atomic bitwise operator, we generate
10036 // ld t1 = [bitinstr.addr]
10037 // op t2 = t1, [bitinstr.val]
10039 // lcs dest = [bitinstr.addr], t2 [EAX is implicit]
10041 // fallthrough -->nextMBB
10042 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10043 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
10044 MachineFunction::iterator MBBIter = MBB;
10047 /// First build the CFG
10048 MachineFunction *F = MBB->getParent();
10049 MachineBasicBlock *thisMBB = MBB;
10050 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
10051 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
10052 F->insert(MBBIter, newMBB);
10053 F->insert(MBBIter, nextMBB);
10055 // Transfer the remainder of thisMBB and its successor edges to nextMBB.
10056 nextMBB->splice(nextMBB->begin(), thisMBB,
10057 llvm::next(MachineBasicBlock::iterator(bInstr)),
10059 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
10061 // Update thisMBB to fall through to newMBB
10062 thisMBB->addSuccessor(newMBB);
10064 // newMBB jumps to itself and fall through to nextMBB
10065 newMBB->addSuccessor(nextMBB);
10066 newMBB->addSuccessor(newMBB);
10068 // Insert instructions into newMBB based on incoming instruction
10069 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
10070 "unexpected number of operands");
10071 DebugLoc dl = bInstr->getDebugLoc();
10072 MachineOperand& destOper = bInstr->getOperand(0);
10073 MachineOperand* argOpers[2 + X86::AddrNumOperands];
10074 int numArgs = bInstr->getNumOperands() - 1;
10075 for (int i=0; i < numArgs; ++i)
10076 argOpers[i] = &bInstr->getOperand(i+1);
10078 // x86 address has 4 operands: base, index, scale, and displacement
10079 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
10080 int valArgIndx = lastAddrIndx + 1;
10082 unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
10083 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1);
10084 for (int i=0; i <= lastAddrIndx; ++i)
10085 (*MIB).addOperand(*argOpers[i]);
10087 unsigned tt = F->getRegInfo().createVirtualRegister(RC);
10089 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1);
10094 unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
10095 assert((argOpers[valArgIndx]->isReg() ||
10096 argOpers[valArgIndx]->isImm()) &&
10097 "invalid operand");
10098 if (argOpers[valArgIndx]->isReg())
10099 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2);
10101 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2);
10103 (*MIB).addOperand(*argOpers[valArgIndx]);
10105 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg);
10108 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc));
10109 for (int i=0; i <= lastAddrIndx; ++i)
10110 (*MIB).addOperand(*argOpers[i]);
10112 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
10113 (*MIB).setMemRefs(bInstr->memoperands_begin(),
10114 bInstr->memoperands_end());
10116 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
10117 MIB.addReg(EAXreg);
10120 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
10122 bInstr->eraseFromParent(); // The pseudo instruction is gone now.
10126 // private utility function: 64 bit atomics on 32 bit host.
10127 MachineBasicBlock *
10128 X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
10129 MachineBasicBlock *MBB,
10134 bool invSrc) const {
10135 // For the atomic bitwise operator, we generate
10136 // thisMBB (instructions are in pairs, except cmpxchg8b)
10137 // ld t1,t2 = [bitinstr.addr]
10139 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4)
10140 // op t5, t6 <- out1, out2, [bitinstr.val]
10141 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val])
10142 // mov ECX, EBX <- t5, t6
10143 // mov EAX, EDX <- t1, t2
10144 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit]
10145 // mov t3, t4 <- EAX, EDX
10147 // result in out1, out2
10148 // fallthrough -->nextMBB
10150 const TargetRegisterClass *RC = X86::GR32RegisterClass;
10151 const unsigned LoadOpc = X86::MOV32rm;
10152 const unsigned NotOpc = X86::NOT32r;
10153 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10154 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
10155 MachineFunction::iterator MBBIter = MBB;
10158 /// First build the CFG
10159 MachineFunction *F = MBB->getParent();
10160 MachineBasicBlock *thisMBB = MBB;
10161 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
10162 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
10163 F->insert(MBBIter, newMBB);
10164 F->insert(MBBIter, nextMBB);
10166 // Transfer the remainder of thisMBB and its successor edges to nextMBB.
10167 nextMBB->splice(nextMBB->begin(), thisMBB,
10168 llvm::next(MachineBasicBlock::iterator(bInstr)),
10170 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
10172 // Update thisMBB to fall through to newMBB
10173 thisMBB->addSuccessor(newMBB);
10175 // newMBB jumps to itself and fall through to nextMBB
10176 newMBB->addSuccessor(nextMBB);
10177 newMBB->addSuccessor(newMBB);
10179 DebugLoc dl = bInstr->getDebugLoc();
10180 // Insert instructions into newMBB based on incoming instruction
10181 // There are 8 "real" operands plus 9 implicit def/uses, ignored here.
10182 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 &&
10183 "unexpected number of operands");
10184 MachineOperand& dest1Oper = bInstr->getOperand(0);
10185 MachineOperand& dest2Oper = bInstr->getOperand(1);
10186 MachineOperand* argOpers[2 + X86::AddrNumOperands];
10187 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) {
10188 argOpers[i] = &bInstr->getOperand(i+2);
10190 // We use some of the operands multiple times, so conservatively just
10191 // clear any kill flags that might be present.
10192 if (argOpers[i]->isReg() && argOpers[i]->isUse())
10193 argOpers[i]->setIsKill(false);
10196 // x86 address has 5 operands: base, index, scale, displacement, and segment.
10197 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
10199 unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
10200 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
10201 for (int i=0; i <= lastAddrIndx; ++i)
10202 (*MIB).addOperand(*argOpers[i]);
10203 unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
10204 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2);
10205 // add 4 to displacement.
10206 for (int i=0; i <= lastAddrIndx-2; ++i)
10207 (*MIB).addOperand(*argOpers[i]);
10208 MachineOperand newOp3 = *(argOpers[3]);
10209 if (newOp3.isImm())
10210 newOp3.setImm(newOp3.getImm()+4);
10212 newOp3.setOffset(newOp3.getOffset()+4);
10213 (*MIB).addOperand(newOp3);
10214 (*MIB).addOperand(*argOpers[lastAddrIndx]);
10216 // t3/4 are defined later, at the bottom of the loop
10217 unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
10218 unsigned t4 = F->getRegInfo().createVirtualRegister(RC);
10219 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg())
10220 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB);
10221 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg())
10222 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB);
10224 // The subsequent operations should be using the destination registers of
10225 //the PHI instructions.
10227 t1 = F->getRegInfo().createVirtualRegister(RC);
10228 t2 = F->getRegInfo().createVirtualRegister(RC);
10229 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg());
10230 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg());
10232 t1 = dest1Oper.getReg();
10233 t2 = dest2Oper.getReg();
10236 int valArgIndx = lastAddrIndx + 1;
10237 assert((argOpers[valArgIndx]->isReg() ||
10238 argOpers[valArgIndx]->isImm()) &&
10239 "invalid operand");
10240 unsigned t5 = F->getRegInfo().createVirtualRegister(RC);
10241 unsigned t6 = F->getRegInfo().createVirtualRegister(RC);
10242 if (argOpers[valArgIndx]->isReg())
10243 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5);
10245 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5);
10246 if (regOpcL != X86::MOV32rr)
10248 (*MIB).addOperand(*argOpers[valArgIndx]);
10249 assert(argOpers[valArgIndx + 1]->isReg() ==
10250 argOpers[valArgIndx]->isReg());
10251 assert(argOpers[valArgIndx + 1]->isImm() ==
10252 argOpers[valArgIndx]->isImm());
10253 if (argOpers[valArgIndx + 1]->isReg())
10254 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6);
10256 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6);
10257 if (regOpcH != X86::MOV32rr)
10259 (*MIB).addOperand(*argOpers[valArgIndx + 1]);
10261 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
10263 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX);
10266 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX);
10268 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX);
10271 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B));
10272 for (int i=0; i <= lastAddrIndx; ++i)
10273 (*MIB).addOperand(*argOpers[i]);
10275 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
10276 (*MIB).setMemRefs(bInstr->memoperands_begin(),
10277 bInstr->memoperands_end());
10279 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3);
10280 MIB.addReg(X86::EAX);
10281 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4);
10282 MIB.addReg(X86::EDX);
10285 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
10287 bInstr->eraseFromParent(); // The pseudo instruction is gone now.
10291 // private utility function
10292 MachineBasicBlock *
10293 X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
10294 MachineBasicBlock *MBB,
10295 unsigned cmovOpc) const {
10296 // For the atomic min/max operator, we generate
10299 // ld t1 = [min/max.addr]
10300 // mov t2 = [min/max.val]
10302 // cmov[cond] t2 = t1
10304 // lcs dest = [bitinstr.addr], t2 [EAX is implicit]
10306 // fallthrough -->nextMBB
10308 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10309 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
10310 MachineFunction::iterator MBBIter = MBB;
10313 /// First build the CFG
10314 MachineFunction *F = MBB->getParent();
10315 MachineBasicBlock *thisMBB = MBB;
10316 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
10317 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
10318 F->insert(MBBIter, newMBB);
10319 F->insert(MBBIter, nextMBB);
10321 // Transfer the remainder of thisMBB and its successor edges to nextMBB.
10322 nextMBB->splice(nextMBB->begin(), thisMBB,
10323 llvm::next(MachineBasicBlock::iterator(mInstr)),
10325 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
10327 // Update thisMBB to fall through to newMBB
10328 thisMBB->addSuccessor(newMBB);
10330 // newMBB jumps to newMBB and fall through to nextMBB
10331 newMBB->addSuccessor(nextMBB);
10332 newMBB->addSuccessor(newMBB);
10334 DebugLoc dl = mInstr->getDebugLoc();
10335 // Insert instructions into newMBB based on incoming instruction
10336 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
10337 "unexpected number of operands");
10338 MachineOperand& destOper = mInstr->getOperand(0);
10339 MachineOperand* argOpers[2 + X86::AddrNumOperands];
10340 int numArgs = mInstr->getNumOperands() - 1;
10341 for (int i=0; i < numArgs; ++i)
10342 argOpers[i] = &mInstr->getOperand(i+1);
10344 // x86 address has 4 operands: base, index, scale, and displacement
10345 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
10346 int valArgIndx = lastAddrIndx + 1;
10348 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
10349 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1);
10350 for (int i=0; i <= lastAddrIndx; ++i)
10351 (*MIB).addOperand(*argOpers[i]);
10353 // We only support register and immediate values
10354 assert((argOpers[valArgIndx]->isReg() ||
10355 argOpers[valArgIndx]->isImm()) &&
10356 "invalid operand");
10358 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
10359 if (argOpers[valArgIndx]->isReg())
10360 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2);
10362 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
10363 (*MIB).addOperand(*argOpers[valArgIndx]);
10365 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
10368 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr));
10373 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
10374 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3);
10378 // Cmp and exchange if none has modified the memory location
10379 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32));
10380 for (int i=0; i <= lastAddrIndx; ++i)
10381 (*MIB).addOperand(*argOpers[i]);
10383 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand");
10384 (*MIB).setMemRefs(mInstr->memoperands_begin(),
10385 mInstr->memoperands_end());
10387 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
10388 MIB.addReg(X86::EAX);
10391 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
10393 mInstr->eraseFromParent(); // The pseudo instruction is gone now.
10397 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
10398 // or XMM0_V32I8 in AVX all of this code can be replaced with that
10399 // in the .td file.
10400 MachineBasicBlock *
10401 X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
10402 unsigned numArgs, bool memArg) const {
10403 assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) &&
10404 "Target must have SSE4.2 or AVX features enabled");
10406 DebugLoc dl = MI->getDebugLoc();
10407 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10409 if (!Subtarget->hasAVX()) {
10411 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm;
10413 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr;
10416 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm;
10418 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr;
10421 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
10422 for (unsigned i = 0; i < numArgs; ++i) {
10423 MachineOperand &Op = MI->getOperand(i+1);
10424 if (!(Op.isReg() && Op.isImplicit()))
10425 MIB.addOperand(Op);
10427 BuildMI(*BB, MI, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg())
10428 .addReg(X86::XMM0);
10430 MI->eraseFromParent();
10434 MachineBasicBlock *
10435 X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const {
10436 DebugLoc dl = MI->getDebugLoc();
10437 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10439 // Address into RAX/EAX, other two args into ECX, EDX.
10440 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
10441 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
10442 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
10443 for (int i = 0; i < X86::AddrNumOperands; ++i)
10444 MIB.addOperand(MI->getOperand(i));
10446 unsigned ValOps = X86::AddrNumOperands;
10447 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
10448 .addReg(MI->getOperand(ValOps).getReg());
10449 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
10450 .addReg(MI->getOperand(ValOps+1).getReg());
10452 // The instruction doesn't actually take any operands though.
10453 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
10455 MI->eraseFromParent(); // The pseudo is gone now.
10459 MachineBasicBlock *
10460 X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const {
10461 DebugLoc dl = MI->getDebugLoc();
10462 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10464 // First arg in ECX, the second in EAX.
10465 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
10466 .addReg(MI->getOperand(0).getReg());
10467 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
10468 .addReg(MI->getOperand(1).getReg());
10470 // The instruction doesn't actually take any operands though.
10471 BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr));
10473 MI->eraseFromParent(); // The pseudo is gone now.
10477 MachineBasicBlock *
10478 X86TargetLowering::EmitVAARG64WithCustomInserter(
10480 MachineBasicBlock *MBB) const {
10481 // Emit va_arg instruction on X86-64.
10483 // Operands to this pseudo-instruction:
10484 // 0 ) Output : destination address (reg)
10485 // 1-5) Input : va_list address (addr, i64mem)
10486 // 6 ) ArgSize : Size (in bytes) of vararg type
10487 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
10488 // 8 ) Align : Alignment of type
10489 // 9 ) EFLAGS (implicit-def)
10491 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
10492 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
10494 unsigned DestReg = MI->getOperand(0).getReg();
10495 MachineOperand &Base = MI->getOperand(1);
10496 MachineOperand &Scale = MI->getOperand(2);
10497 MachineOperand &Index = MI->getOperand(3);
10498 MachineOperand &Disp = MI->getOperand(4);
10499 MachineOperand &Segment = MI->getOperand(5);
10500 unsigned ArgSize = MI->getOperand(6).getImm();
10501 unsigned ArgMode = MI->getOperand(7).getImm();
10502 unsigned Align = MI->getOperand(8).getImm();
10504 // Memory Reference
10505 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
10506 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
10507 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
10509 // Machine Information
10510 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10511 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
10512 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
10513 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
10514 DebugLoc DL = MI->getDebugLoc();
10516 // struct va_list {
10519 // i64 overflow_area (address)
10520 // i64 reg_save_area (address)
10522 // sizeof(va_list) = 24
10523 // alignment(va_list) = 8
10525 unsigned TotalNumIntRegs = 6;
10526 unsigned TotalNumXMMRegs = 8;
10527 bool UseGPOffset = (ArgMode == 1);
10528 bool UseFPOffset = (ArgMode == 2);
10529 unsigned MaxOffset = TotalNumIntRegs * 8 +
10530 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
10532 /* Align ArgSize to a multiple of 8 */
10533 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
10534 bool NeedsAlign = (Align > 8);
10536 MachineBasicBlock *thisMBB = MBB;
10537 MachineBasicBlock *overflowMBB;
10538 MachineBasicBlock *offsetMBB;
10539 MachineBasicBlock *endMBB;
10541 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
10542 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
10543 unsigned OffsetReg = 0;
10545 if (!UseGPOffset && !UseFPOffset) {
10546 // If we only pull from the overflow region, we don't create a branch.
10547 // We don't need to alter control flow.
10548 OffsetDestReg = 0; // unused
10549 OverflowDestReg = DestReg;
10552 overflowMBB = thisMBB;
10555 // First emit code to check if gp_offset (or fp_offset) is below the bound.
10556 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
10557 // If not, pull from overflow_area. (branch to overflowMBB)
10562 // offsetMBB overflowMBB
10567 // Registers for the PHI in endMBB
10568 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
10569 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
10571 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
10572 MachineFunction *MF = MBB->getParent();
10573 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10574 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10575 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10577 MachineFunction::iterator MBBIter = MBB;
10580 // Insert the new basic blocks
10581 MF->insert(MBBIter, offsetMBB);
10582 MF->insert(MBBIter, overflowMBB);
10583 MF->insert(MBBIter, endMBB);
10585 // Transfer the remainder of MBB and its successor edges to endMBB.
10586 endMBB->splice(endMBB->begin(), thisMBB,
10587 llvm::next(MachineBasicBlock::iterator(MI)),
10589 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
10591 // Make offsetMBB and overflowMBB successors of thisMBB
10592 thisMBB->addSuccessor(offsetMBB);
10593 thisMBB->addSuccessor(overflowMBB);
10595 // endMBB is a successor of both offsetMBB and overflowMBB
10596 offsetMBB->addSuccessor(endMBB);
10597 overflowMBB->addSuccessor(endMBB);
10599 // Load the offset value into a register
10600 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
10601 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
10605 .addDisp(Disp, UseFPOffset ? 4 : 0)
10606 .addOperand(Segment)
10607 .setMemRefs(MMOBegin, MMOEnd);
10609 // Check if there is enough room left to pull this argument.
10610 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
10612 .addImm(MaxOffset + 8 - ArgSizeA8);
10614 // Branch to "overflowMBB" if offset >= max
10615 // Fall through to "offsetMBB" otherwise
10616 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
10617 .addMBB(overflowMBB);
10620 // In offsetMBB, emit code to use the reg_save_area.
10622 assert(OffsetReg != 0);
10624 // Read the reg_save_area address.
10625 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
10626 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
10631 .addOperand(Segment)
10632 .setMemRefs(MMOBegin, MMOEnd);
10634 // Zero-extend the offset
10635 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
10636 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
10639 .addImm(X86::sub_32bit);
10641 // Add the offset to the reg_save_area to get the final address.
10642 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
10643 .addReg(OffsetReg64)
10644 .addReg(RegSaveReg);
10646 // Compute the offset for the next argument
10647 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
10648 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
10650 .addImm(UseFPOffset ? 16 : 8);
10652 // Store it back into the va_list.
10653 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
10657 .addDisp(Disp, UseFPOffset ? 4 : 0)
10658 .addOperand(Segment)
10659 .addReg(NextOffsetReg)
10660 .setMemRefs(MMOBegin, MMOEnd);
10663 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4))
10668 // Emit code to use overflow area
10671 // Load the overflow_area address into a register.
10672 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
10673 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
10678 .addOperand(Segment)
10679 .setMemRefs(MMOBegin, MMOEnd);
10681 // If we need to align it, do so. Otherwise, just copy the address
10682 // to OverflowDestReg.
10684 // Align the overflow address
10685 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
10686 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
10688 // aligned_addr = (addr + (align-1)) & ~(align-1)
10689 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
10690 .addReg(OverflowAddrReg)
10693 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
10695 .addImm(~(uint64_t)(Align-1));
10697 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
10698 .addReg(OverflowAddrReg);
10701 // Compute the next overflow address after this argument.
10702 // (the overflow address should be kept 8-byte aligned)
10703 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
10704 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
10705 .addReg(OverflowDestReg)
10706 .addImm(ArgSizeA8);
10708 // Store the new overflow address.
10709 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
10714 .addOperand(Segment)
10715 .addReg(NextAddrReg)
10716 .setMemRefs(MMOBegin, MMOEnd);
10718 // If we branched, emit the PHI to the front of endMBB.
10720 BuildMI(*endMBB, endMBB->begin(), DL,
10721 TII->get(X86::PHI), DestReg)
10722 .addReg(OffsetDestReg).addMBB(offsetMBB)
10723 .addReg(OverflowDestReg).addMBB(overflowMBB);
10726 // Erase the pseudo instruction
10727 MI->eraseFromParent();
10732 MachineBasicBlock *
10733 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
10735 MachineBasicBlock *MBB) const {
10736 // Emit code to save XMM registers to the stack. The ABI says that the
10737 // number of registers to save is given in %al, so it's theoretically
10738 // possible to do an indirect jump trick to avoid saving all of them,
10739 // however this code takes a simpler approach and just executes all
10740 // of the stores if %al is non-zero. It's less code, and it's probably
10741 // easier on the hardware branch predictor, and stores aren't all that
10742 // expensive anyway.
10744 // Create the new basic blocks. One block contains all the XMM stores,
10745 // and one block is the final destination regardless of whether any
10746 // stores were performed.
10747 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
10748 MachineFunction *F = MBB->getParent();
10749 MachineFunction::iterator MBBIter = MBB;
10751 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
10752 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
10753 F->insert(MBBIter, XMMSaveMBB);
10754 F->insert(MBBIter, EndMBB);
10756 // Transfer the remainder of MBB and its successor edges to EndMBB.
10757 EndMBB->splice(EndMBB->begin(), MBB,
10758 llvm::next(MachineBasicBlock::iterator(MI)),
10760 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
10762 // The original block will now fall through to the XMM save block.
10763 MBB->addSuccessor(XMMSaveMBB);
10764 // The XMMSaveMBB will fall through to the end block.
10765 XMMSaveMBB->addSuccessor(EndMBB);
10767 // Now add the instructions.
10768 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10769 DebugLoc DL = MI->getDebugLoc();
10771 unsigned CountReg = MI->getOperand(0).getReg();
10772 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
10773 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
10775 if (!Subtarget->isTargetWin64()) {
10776 // If %al is 0, branch around the XMM save block.
10777 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
10778 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB);
10779 MBB->addSuccessor(EndMBB);
10782 // In the XMM save block, save all the XMM argument registers.
10783 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) {
10784 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
10785 MachineMemOperand *MMO =
10786 F->getMachineMemOperand(
10787 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
10788 MachineMemOperand::MOStore,
10789 /*Size=*/16, /*Align=*/16);
10790 BuildMI(XMMSaveMBB, DL, TII->get(X86::MOVAPSmr))
10791 .addFrameIndex(RegSaveFrameIndex)
10792 .addImm(/*Scale=*/1)
10793 .addReg(/*IndexReg=*/0)
10794 .addImm(/*Disp=*/Offset)
10795 .addReg(/*Segment=*/0)
10796 .addReg(MI->getOperand(i).getReg())
10797 .addMemOperand(MMO);
10800 MI->eraseFromParent(); // The pseudo instruction is gone now.
10805 MachineBasicBlock *
10806 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
10807 MachineBasicBlock *BB) const {
10808 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10809 DebugLoc DL = MI->getDebugLoc();
10811 // To "insert" a SELECT_CC instruction, we actually have to insert the
10812 // diamond control-flow pattern. The incoming instruction knows the
10813 // destination vreg to set, the condition code register to branch on, the
10814 // true/false values to select between, and a branch opcode to use.
10815 const BasicBlock *LLVM_BB = BB->getBasicBlock();
10816 MachineFunction::iterator It = BB;
10822 // cmpTY ccX, r1, r2
10824 // fallthrough --> copy0MBB
10825 MachineBasicBlock *thisMBB = BB;
10826 MachineFunction *F = BB->getParent();
10827 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
10828 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10829 F->insert(It, copy0MBB);
10830 F->insert(It, sinkMBB);
10832 // If the EFLAGS register isn't dead in the terminator, then claim that it's
10833 // live into the sink and copy blocks.
10834 const MachineFunction *MF = BB->getParent();
10835 const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
10836 BitVector ReservedRegs = TRI->getReservedRegs(*MF);
10838 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
10839 const MachineOperand &MO = MI->getOperand(I);
10840 if (!MO.isReg() || !MO.isUse() || MO.isKill()) continue;
10841 unsigned Reg = MO.getReg();
10842 if (Reg != X86::EFLAGS) continue;
10843 copy0MBB->addLiveIn(Reg);
10844 sinkMBB->addLiveIn(Reg);
10847 // Transfer the remainder of BB and its successor edges to sinkMBB.
10848 sinkMBB->splice(sinkMBB->begin(), BB,
10849 llvm::next(MachineBasicBlock::iterator(MI)),
10851 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10853 // Add the true and fallthrough blocks as its successors.
10854 BB->addSuccessor(copy0MBB);
10855 BB->addSuccessor(sinkMBB);
10857 // Create the conditional branch instruction.
10859 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
10860 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
10863 // %FalseValue = ...
10864 // # fallthrough to sinkMBB
10865 copy0MBB->addSuccessor(sinkMBB);
10868 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
10870 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
10871 TII->get(X86::PHI), MI->getOperand(0).getReg())
10872 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
10873 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
10875 MI->eraseFromParent(); // The pseudo instruction is gone now.
10879 MachineBasicBlock *
10880 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
10881 MachineBasicBlock *BB) const {
10882 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
10883 DebugLoc DL = MI->getDebugLoc();
10885 assert(!Subtarget->isTargetEnvMacho());
10887 // The lowering is pretty easy: we're just emitting the call to _alloca. The
10888 // non-trivial part is impdef of ESP.
10890 if (Subtarget->isTargetWin64()) {
10891 if (Subtarget->isTargetCygMing()) {
10892 // ___chkstk(Mingw64):
10893 // Clobbers R10, R11, RAX and EFLAGS.
10895 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
10896 .addExternalSymbol("___chkstk")
10897 .addReg(X86::RAX, RegState::Implicit)
10898 .addReg(X86::RSP, RegState::Implicit)
10899 .addReg(X86::RAX, RegState::Define | RegState::Implicit)
10900 .addReg(X86::RSP, RegState::Define | RegState::Implicit)
10901 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
10903 // __chkstk(MSVCRT): does not update stack pointer.
10904 // Clobbers R10, R11 and EFLAGS.
10905 // FIXME: RAX(allocated size) might be reused and not killed.
10906 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
10907 .addExternalSymbol("__chkstk")
10908 .addReg(X86::RAX, RegState::Implicit)
10909 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
10910 // RAX has the offset to subtracted from RSP.
10911 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP)
10916 const char *StackProbeSymbol =
10917 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca";
10919 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
10920 .addExternalSymbol(StackProbeSymbol)
10921 .addReg(X86::EAX, RegState::Implicit)
10922 .addReg(X86::ESP, RegState::Implicit)
10923 .addReg(X86::EAX, RegState::Define | RegState::Implicit)
10924 .addReg(X86::ESP, RegState::Define | RegState::Implicit)
10925 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
10928 MI->eraseFromParent(); // The pseudo instruction is gone now.
10932 MachineBasicBlock *
10933 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
10934 MachineBasicBlock *BB) const {
10935 // This is pretty easy. We're taking the value that we received from
10936 // our load from the relocation, sticking it in either RDI (x86-64)
10937 // or EAX and doing an indirect call. The return value will then
10938 // be in the normal return register.
10939 const X86InstrInfo *TII
10940 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo());
10941 DebugLoc DL = MI->getDebugLoc();
10942 MachineFunction *F = BB->getParent();
10944 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
10945 assert(MI->getOperand(3).isGlobal() && "This should be a global");
10947 if (Subtarget->is64Bit()) {
10948 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
10949 TII->get(X86::MOV64rm), X86::RDI)
10951 .addImm(0).addReg(0)
10952 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
10953 MI->getOperand(3).getTargetFlags())
10955 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
10956 addDirectMem(MIB, X86::RDI);
10957 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
10958 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
10959 TII->get(X86::MOV32rm), X86::EAX)
10961 .addImm(0).addReg(0)
10962 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
10963 MI->getOperand(3).getTargetFlags())
10965 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
10966 addDirectMem(MIB, X86::EAX);
10968 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
10969 TII->get(X86::MOV32rm), X86::EAX)
10970 .addReg(TII->getGlobalBaseReg(F))
10971 .addImm(0).addReg(0)
10972 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
10973 MI->getOperand(3).getTargetFlags())
10975 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
10976 addDirectMem(MIB, X86::EAX);
10979 MI->eraseFromParent(); // The pseudo instruction is gone now.
10983 MachineBasicBlock *
10984 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
10985 MachineBasicBlock *BB) const {
10986 switch (MI->getOpcode()) {
10987 default: assert(false && "Unexpected instr type to insert");
10988 case X86::TAILJMPd64:
10989 case X86::TAILJMPr64:
10990 case X86::TAILJMPm64:
10991 assert(!"TAILJMP64 would not be touched here.");
10992 case X86::TCRETURNdi64:
10993 case X86::TCRETURNri64:
10994 case X86::TCRETURNmi64:
10995 // Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset.
10996 // On AMD64, additional defs should be added before register allocation.
10997 if (!Subtarget->isTargetWin64()) {
10998 MI->addRegisterDefined(X86::RSI);
10999 MI->addRegisterDefined(X86::RDI);
11000 MI->addRegisterDefined(X86::XMM6);
11001 MI->addRegisterDefined(X86::XMM7);
11002 MI->addRegisterDefined(X86::XMM8);
11003 MI->addRegisterDefined(X86::XMM9);
11004 MI->addRegisterDefined(X86::XMM10);
11005 MI->addRegisterDefined(X86::XMM11);
11006 MI->addRegisterDefined(X86::XMM12);
11007 MI->addRegisterDefined(X86::XMM13);
11008 MI->addRegisterDefined(X86::XMM14);
11009 MI->addRegisterDefined(X86::XMM15);
11012 case X86::WIN_ALLOCA:
11013 return EmitLoweredWinAlloca(MI, BB);
11014 case X86::TLSCall_32:
11015 case X86::TLSCall_64:
11016 return EmitLoweredTLSCall(MI, BB);
11017 case X86::CMOV_GR8:
11018 case X86::CMOV_FR32:
11019 case X86::CMOV_FR64:
11020 case X86::CMOV_V4F32:
11021 case X86::CMOV_V2F64:
11022 case X86::CMOV_V2I64:
11023 case X86::CMOV_GR16:
11024 case X86::CMOV_GR32:
11025 case X86::CMOV_RFP32:
11026 case X86::CMOV_RFP64:
11027 case X86::CMOV_RFP80:
11028 return EmitLoweredSelect(MI, BB);
11030 case X86::FP32_TO_INT16_IN_MEM:
11031 case X86::FP32_TO_INT32_IN_MEM:
11032 case X86::FP32_TO_INT64_IN_MEM:
11033 case X86::FP64_TO_INT16_IN_MEM:
11034 case X86::FP64_TO_INT32_IN_MEM:
11035 case X86::FP64_TO_INT64_IN_MEM:
11036 case X86::FP80_TO_INT16_IN_MEM:
11037 case X86::FP80_TO_INT32_IN_MEM:
11038 case X86::FP80_TO_INT64_IN_MEM: {
11039 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
11040 DebugLoc DL = MI->getDebugLoc();
11042 // Change the floating point control register to use "round towards zero"
11043 // mode when truncating to an integer value.
11044 MachineFunction *F = BB->getParent();
11045 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
11046 addFrameReference(BuildMI(*BB, MI, DL,
11047 TII->get(X86::FNSTCW16m)), CWFrameIdx);
11049 // Load the old value of the high byte of the control word...
11051 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
11052 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
11055 // Set the high part to be round to zero...
11056 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
11059 // Reload the modified control word now...
11060 addFrameReference(BuildMI(*BB, MI, DL,
11061 TII->get(X86::FLDCW16m)), CWFrameIdx);
11063 // Restore the memory image of control word to original value
11064 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
11067 // Get the X86 opcode to use.
11069 switch (MI->getOpcode()) {
11070 default: llvm_unreachable("illegal opcode!");
11071 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
11072 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
11073 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
11074 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
11075 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
11076 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
11077 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
11078 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
11079 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
11083 MachineOperand &Op = MI->getOperand(0);
11085 AM.BaseType = X86AddressMode::RegBase;
11086 AM.Base.Reg = Op.getReg();
11088 AM.BaseType = X86AddressMode::FrameIndexBase;
11089 AM.Base.FrameIndex = Op.getIndex();
11091 Op = MI->getOperand(1);
11093 AM.Scale = Op.getImm();
11094 Op = MI->getOperand(2);
11096 AM.IndexReg = Op.getImm();
11097 Op = MI->getOperand(3);
11098 if (Op.isGlobal()) {
11099 AM.GV = Op.getGlobal();
11101 AM.Disp = Op.getImm();
11103 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
11104 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
11106 // Reload the original control word now.
11107 addFrameReference(BuildMI(*BB, MI, DL,
11108 TII->get(X86::FLDCW16m)), CWFrameIdx);
11110 MI->eraseFromParent(); // The pseudo instruction is gone now.
11113 // String/text processing lowering.
11114 case X86::PCMPISTRM128REG:
11115 case X86::VPCMPISTRM128REG:
11116 return EmitPCMP(MI, BB, 3, false /* in-mem */);
11117 case X86::PCMPISTRM128MEM:
11118 case X86::VPCMPISTRM128MEM:
11119 return EmitPCMP(MI, BB, 3, true /* in-mem */);
11120 case X86::PCMPESTRM128REG:
11121 case X86::VPCMPESTRM128REG:
11122 return EmitPCMP(MI, BB, 5, false /* in mem */);
11123 case X86::PCMPESTRM128MEM:
11124 case X86::VPCMPESTRM128MEM:
11125 return EmitPCMP(MI, BB, 5, true /* in mem */);
11127 // Thread synchronization.
11129 return EmitMonitor(MI, BB);
11131 return EmitMwait(MI, BB);
11133 // Atomic Lowering.
11134 case X86::ATOMAND32:
11135 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
11136 X86::AND32ri, X86::MOV32rm,
11138 X86::NOT32r, X86::EAX,
11139 X86::GR32RegisterClass);
11140 case X86::ATOMOR32:
11141 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
11142 X86::OR32ri, X86::MOV32rm,
11144 X86::NOT32r, X86::EAX,
11145 X86::GR32RegisterClass);
11146 case X86::ATOMXOR32:
11147 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
11148 X86::XOR32ri, X86::MOV32rm,
11150 X86::NOT32r, X86::EAX,
11151 X86::GR32RegisterClass);
11152 case X86::ATOMNAND32:
11153 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
11154 X86::AND32ri, X86::MOV32rm,
11156 X86::NOT32r, X86::EAX,
11157 X86::GR32RegisterClass, true);
11158 case X86::ATOMMIN32:
11159 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
11160 case X86::ATOMMAX32:
11161 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr);
11162 case X86::ATOMUMIN32:
11163 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr);
11164 case X86::ATOMUMAX32:
11165 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr);
11167 case X86::ATOMAND16:
11168 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
11169 X86::AND16ri, X86::MOV16rm,
11171 X86::NOT16r, X86::AX,
11172 X86::GR16RegisterClass);
11173 case X86::ATOMOR16:
11174 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
11175 X86::OR16ri, X86::MOV16rm,
11177 X86::NOT16r, X86::AX,
11178 X86::GR16RegisterClass);
11179 case X86::ATOMXOR16:
11180 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
11181 X86::XOR16ri, X86::MOV16rm,
11183 X86::NOT16r, X86::AX,
11184 X86::GR16RegisterClass);
11185 case X86::ATOMNAND16:
11186 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
11187 X86::AND16ri, X86::MOV16rm,
11189 X86::NOT16r, X86::AX,
11190 X86::GR16RegisterClass, true);
11191 case X86::ATOMMIN16:
11192 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr);
11193 case X86::ATOMMAX16:
11194 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr);
11195 case X86::ATOMUMIN16:
11196 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr);
11197 case X86::ATOMUMAX16:
11198 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr);
11200 case X86::ATOMAND8:
11201 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
11202 X86::AND8ri, X86::MOV8rm,
11204 X86::NOT8r, X86::AL,
11205 X86::GR8RegisterClass);
11207 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
11208 X86::OR8ri, X86::MOV8rm,
11210 X86::NOT8r, X86::AL,
11211 X86::GR8RegisterClass);
11212 case X86::ATOMXOR8:
11213 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
11214 X86::XOR8ri, X86::MOV8rm,
11216 X86::NOT8r, X86::AL,
11217 X86::GR8RegisterClass);
11218 case X86::ATOMNAND8:
11219 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
11220 X86::AND8ri, X86::MOV8rm,
11222 X86::NOT8r, X86::AL,
11223 X86::GR8RegisterClass, true);
11224 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
11225 // This group is for 64-bit host.
11226 case X86::ATOMAND64:
11227 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
11228 X86::AND64ri32, X86::MOV64rm,
11230 X86::NOT64r, X86::RAX,
11231 X86::GR64RegisterClass);
11232 case X86::ATOMOR64:
11233 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
11234 X86::OR64ri32, X86::MOV64rm,
11236 X86::NOT64r, X86::RAX,
11237 X86::GR64RegisterClass);
11238 case X86::ATOMXOR64:
11239 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
11240 X86::XOR64ri32, X86::MOV64rm,
11242 X86::NOT64r, X86::RAX,
11243 X86::GR64RegisterClass);
11244 case X86::ATOMNAND64:
11245 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
11246 X86::AND64ri32, X86::MOV64rm,
11248 X86::NOT64r, X86::RAX,
11249 X86::GR64RegisterClass, true);
11250 case X86::ATOMMIN64:
11251 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
11252 case X86::ATOMMAX64:
11253 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
11254 case X86::ATOMUMIN64:
11255 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
11256 case X86::ATOMUMAX64:
11257 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
11259 // This group does 64-bit operations on a 32-bit host.
11260 case X86::ATOMAND6432:
11261 return EmitAtomicBit6432WithCustomInserter(MI, BB,
11262 X86::AND32rr, X86::AND32rr,
11263 X86::AND32ri, X86::AND32ri,
11265 case X86::ATOMOR6432:
11266 return EmitAtomicBit6432WithCustomInserter(MI, BB,
11267 X86::OR32rr, X86::OR32rr,
11268 X86::OR32ri, X86::OR32ri,
11270 case X86::ATOMXOR6432:
11271 return EmitAtomicBit6432WithCustomInserter(MI, BB,
11272 X86::XOR32rr, X86::XOR32rr,
11273 X86::XOR32ri, X86::XOR32ri,
11275 case X86::ATOMNAND6432:
11276 return EmitAtomicBit6432WithCustomInserter(MI, BB,
11277 X86::AND32rr, X86::AND32rr,
11278 X86::AND32ri, X86::AND32ri,
11280 case X86::ATOMADD6432:
11281 return EmitAtomicBit6432WithCustomInserter(MI, BB,
11282 X86::ADD32rr, X86::ADC32rr,
11283 X86::ADD32ri, X86::ADC32ri,
11285 case X86::ATOMSUB6432:
11286 return EmitAtomicBit6432WithCustomInserter(MI, BB,
11287 X86::SUB32rr, X86::SBB32rr,
11288 X86::SUB32ri, X86::SBB32ri,
11290 case X86::ATOMSWAP6432:
11291 return EmitAtomicBit6432WithCustomInserter(MI, BB,
11292 X86::MOV32rr, X86::MOV32rr,
11293 X86::MOV32ri, X86::MOV32ri,
11295 case X86::VASTART_SAVE_XMM_REGS:
11296 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
11298 case X86::VAARG_64:
11299 return EmitVAARG64WithCustomInserter(MI, BB);
11303 //===----------------------------------------------------------------------===//
11304 // X86 Optimization Hooks
11305 //===----------------------------------------------------------------------===//
11307 void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
11311 const SelectionDAG &DAG,
11312 unsigned Depth) const {
11313 unsigned Opc = Op.getOpcode();
11314 assert((Opc >= ISD::BUILTIN_OP_END ||
11315 Opc == ISD::INTRINSIC_WO_CHAIN ||
11316 Opc == ISD::INTRINSIC_W_CHAIN ||
11317 Opc == ISD::INTRINSIC_VOID) &&
11318 "Should use MaskedValueIsZero if you don't know whether Op"
11319 " is a target node!");
11321 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
11335 // These nodes' second result is a boolean.
11336 if (Op.getResNo() == 0)
11339 case X86ISD::SETCC:
11340 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(),
11341 Mask.getBitWidth() - 1);
11346 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
11347 unsigned Depth) const {
11348 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
11349 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
11350 return Op.getValueType().getScalarType().getSizeInBits();
11356 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
11357 /// node is a GlobalAddress + offset.
11358 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
11359 const GlobalValue* &GA,
11360 int64_t &Offset) const {
11361 if (N->getOpcode() == X86ISD::Wrapper) {
11362 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
11363 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
11364 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
11368 return TargetLowering::isGAPlusOffset(N, GA, Offset);
11371 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
11372 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
11373 TargetLowering::DAGCombinerInfo &DCI) {
11374 DebugLoc dl = N->getDebugLoc();
11375 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
11376 SDValue V1 = SVOp->getOperand(0);
11377 SDValue V2 = SVOp->getOperand(1);
11378 EVT VT = SVOp->getValueType(0);
11380 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
11381 V2.getOpcode() == ISD::CONCAT_VECTORS) {
11385 // V UNDEF BUILD_VECTOR UNDEF
11387 // CONCAT_VECTOR CONCAT_VECTOR
11390 // RESULT: V + zero extended
11392 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
11393 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
11394 V1.getOperand(1).getOpcode() != ISD::UNDEF)
11397 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
11400 // To match the shuffle mask, the first half of the mask should
11401 // be exactly the first vector, and all the rest a splat with the
11402 // first element of the second one.
11403 int NumElems = VT.getVectorNumElements();
11404 for (int i = 0; i < NumElems/2; ++i)
11405 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
11406 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
11409 // Emit a zeroed vector and insert the desired subvector on its
11411 SDValue Zeros = getZeroVector(VT, true /* HasSSE2 */, DAG, dl);
11412 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0),
11413 DAG.getConstant(0, MVT::i32), DAG, dl);
11414 return DCI.CombineTo(N, InsV);
11420 /// PerformShuffleCombine - Performs several different shuffle combines.
11421 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
11422 TargetLowering::DAGCombinerInfo &DCI) {
11423 DebugLoc dl = N->getDebugLoc();
11424 EVT VT = N->getValueType(0);
11426 // Don't create instructions with illegal types after legalize types has run.
11427 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11428 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
11431 // Only handle pure VECTOR_SHUFFLE nodes.
11432 if (VT.getSizeInBits() == 256 && N->getOpcode() == ISD::VECTOR_SHUFFLE)
11433 return PerformShuffleCombine256(N, DAG, DCI);
11435 // Only handle 128 wide vector from here on.
11436 if (VT.getSizeInBits() != 128)
11439 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
11440 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
11441 // consecutive, non-overlapping, and in the right order.
11442 SmallVector<SDValue, 16> Elts;
11443 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
11444 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
11446 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG);
11449 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
11450 /// generation and convert it from being a bunch of shuffles and extracts
11451 /// to a simple store and scalar loads to extract the elements.
11452 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
11453 const TargetLowering &TLI) {
11454 SDValue InputVector = N->getOperand(0);
11456 // Only operate on vectors of 4 elements, where the alternative shuffling
11457 // gets to be more expensive.
11458 if (InputVector.getValueType() != MVT::v4i32)
11461 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
11462 // single use which is a sign-extend or zero-extend, and all elements are
11464 SmallVector<SDNode *, 4> Uses;
11465 unsigned ExtractedElements = 0;
11466 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
11467 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
11468 if (UI.getUse().getResNo() != InputVector.getResNo())
11471 SDNode *Extract = *UI;
11472 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11475 if (Extract->getValueType(0) != MVT::i32)
11477 if (!Extract->hasOneUse())
11479 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
11480 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
11482 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
11485 // Record which element was extracted.
11486 ExtractedElements |=
11487 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
11489 Uses.push_back(Extract);
11492 // If not all the elements were used, this may not be worthwhile.
11493 if (ExtractedElements != 15)
11496 // Ok, we've now decided to do the transformation.
11497 DebugLoc dl = InputVector.getDebugLoc();
11499 // Store the value to a temporary stack slot.
11500 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
11501 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
11502 MachinePointerInfo(), false, false, 0);
11504 // Replace each use (extract) with a load of the appropriate element.
11505 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
11506 UE = Uses.end(); UI != UE; ++UI) {
11507 SDNode *Extract = *UI;
11509 // cOMpute the element's address.
11510 SDValue Idx = Extract->getOperand(1);
11512 InputVector.getValueType().getVectorElementType().getSizeInBits()/8;
11513 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue();
11514 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
11516 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
11517 StackPtr, OffsetVal);
11519 // Load the scalar.
11520 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch,
11521 ScalarAddr, MachinePointerInfo(),
11524 // Replace the exact with the load.
11525 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar);
11528 // The replacement was made in place; don't return anything.
11532 /// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
11533 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
11534 const X86Subtarget *Subtarget) {
11535 DebugLoc DL = N->getDebugLoc();
11536 SDValue Cond = N->getOperand(0);
11537 // Get the LHS/RHS of the select.
11538 SDValue LHS = N->getOperand(1);
11539 SDValue RHS = N->getOperand(2);
11541 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
11542 // instructions match the semantics of the common C idiom x<y?x:y but not
11543 // x<=y?x:y, because of how they handle negative zero (which can be
11544 // ignored in unsafe-math mode).
11545 if (Subtarget->hasSSE2() &&
11546 (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) &&
11547 Cond.getOpcode() == ISD::SETCC) {
11548 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
11550 unsigned Opcode = 0;
11551 // Check for x CC y ? x : y.
11552 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
11553 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
11557 // Converting this to a min would handle NaNs incorrectly, and swapping
11558 // the operands would cause it to handle comparisons between positive
11559 // and negative zero incorrectly.
11560 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
11561 if (!UnsafeFPMath &&
11562 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
11564 std::swap(LHS, RHS);
11566 Opcode = X86ISD::FMIN;
11569 // Converting this to a min would handle comparisons between positive
11570 // and negative zero incorrectly.
11571 if (!UnsafeFPMath &&
11572 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
11574 Opcode = X86ISD::FMIN;
11577 // Converting this to a min would handle both negative zeros and NaNs
11578 // incorrectly, but we can swap the operands to fix both.
11579 std::swap(LHS, RHS);
11583 Opcode = X86ISD::FMIN;
11587 // Converting this to a max would handle comparisons between positive
11588 // and negative zero incorrectly.
11589 if (!UnsafeFPMath &&
11590 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(LHS))
11592 Opcode = X86ISD::FMAX;
11595 // Converting this to a max would handle NaNs incorrectly, and swapping
11596 // the operands would cause it to handle comparisons between positive
11597 // and negative zero incorrectly.
11598 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
11599 if (!UnsafeFPMath &&
11600 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
11602 std::swap(LHS, RHS);
11604 Opcode = X86ISD::FMAX;
11607 // Converting this to a max would handle both negative zeros and NaNs
11608 // incorrectly, but we can swap the operands to fix both.
11609 std::swap(LHS, RHS);
11613 Opcode = X86ISD::FMAX;
11616 // Check for x CC y ? y : x -- a min/max with reversed arms.
11617 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
11618 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
11622 // Converting this to a min would handle comparisons between positive
11623 // and negative zero incorrectly, and swapping the operands would
11624 // cause it to handle NaNs incorrectly.
11625 if (!UnsafeFPMath &&
11626 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
11627 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
11629 std::swap(LHS, RHS);
11631 Opcode = X86ISD::FMIN;
11634 // Converting this to a min would handle NaNs incorrectly.
11635 if (!UnsafeFPMath &&
11636 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
11638 Opcode = X86ISD::FMIN;
11641 // Converting this to a min would handle both negative zeros and NaNs
11642 // incorrectly, but we can swap the operands to fix both.
11643 std::swap(LHS, RHS);
11647 Opcode = X86ISD::FMIN;
11651 // Converting this to a max would handle NaNs incorrectly.
11652 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
11654 Opcode = X86ISD::FMAX;
11657 // Converting this to a max would handle comparisons between positive
11658 // and negative zero incorrectly, and swapping the operands would
11659 // cause it to handle NaNs incorrectly.
11660 if (!UnsafeFPMath &&
11661 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
11662 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
11664 std::swap(LHS, RHS);
11666 Opcode = X86ISD::FMAX;
11669 // Converting this to a max would handle both negative zeros and NaNs
11670 // incorrectly, but we can swap the operands to fix both.
11671 std::swap(LHS, RHS);
11675 Opcode = X86ISD::FMAX;
11681 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
11684 // If this is a select between two integer constants, try to do some
11686 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
11687 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
11688 // Don't do this for crazy integer types.
11689 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
11690 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
11691 // so that TrueC (the true value) is larger than FalseC.
11692 bool NeedsCondInvert = false;
11694 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
11695 // Efficiently invertible.
11696 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
11697 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
11698 isa<ConstantSDNode>(Cond.getOperand(1))))) {
11699 NeedsCondInvert = true;
11700 std::swap(TrueC, FalseC);
11703 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
11704 if (FalseC->getAPIntValue() == 0 &&
11705 TrueC->getAPIntValue().isPowerOf2()) {
11706 if (NeedsCondInvert) // Invert the condition if needed.
11707 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
11708 DAG.getConstant(1, Cond.getValueType()));
11710 // Zero extend the condition if needed.
11711 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
11713 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
11714 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
11715 DAG.getConstant(ShAmt, MVT::i8));
11718 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
11719 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
11720 if (NeedsCondInvert) // Invert the condition if needed.
11721 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
11722 DAG.getConstant(1, Cond.getValueType()));
11724 // Zero extend the condition if needed.
11725 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
11726 FalseC->getValueType(0), Cond);
11727 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
11728 SDValue(FalseC, 0));
11731 // Optimize cases that will turn into an LEA instruction. This requires
11732 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
11733 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
11734 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
11735 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
11737 bool isFastMultiplier = false;
11739 switch ((unsigned char)Diff) {
11741 case 1: // result = add base, cond
11742 case 2: // result = lea base( , cond*2)
11743 case 3: // result = lea base(cond, cond*2)
11744 case 4: // result = lea base( , cond*4)
11745 case 5: // result = lea base(cond, cond*4)
11746 case 8: // result = lea base( , cond*8)
11747 case 9: // result = lea base(cond, cond*8)
11748 isFastMultiplier = true;
11753 if (isFastMultiplier) {
11754 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
11755 if (NeedsCondInvert) // Invert the condition if needed.
11756 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
11757 DAG.getConstant(1, Cond.getValueType()));
11759 // Zero extend the condition if needed.
11760 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
11762 // Scale the condition by the difference.
11764 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
11765 DAG.getConstant(Diff, Cond.getValueType()));
11767 // Add the base if non-zero.
11768 if (FalseC->getAPIntValue() != 0)
11769 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
11770 SDValue(FalseC, 0));
11780 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
11781 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
11782 TargetLowering::DAGCombinerInfo &DCI) {
11783 DebugLoc DL = N->getDebugLoc();
11785 // If the flag operand isn't dead, don't touch this CMOV.
11786 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
11789 SDValue FalseOp = N->getOperand(0);
11790 SDValue TrueOp = N->getOperand(1);
11791 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
11792 SDValue Cond = N->getOperand(3);
11793 if (CC == X86::COND_E || CC == X86::COND_NE) {
11794 switch (Cond.getOpcode()) {
11798 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
11799 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
11800 return (CC == X86::COND_E) ? FalseOp : TrueOp;
11804 // If this is a select between two integer constants, try to do some
11805 // optimizations. Note that the operands are ordered the opposite of SELECT
11807 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
11808 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
11809 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
11810 // larger than FalseC (the false value).
11811 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
11812 CC = X86::GetOppositeBranchCondition(CC);
11813 std::swap(TrueC, FalseC);
11816 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
11817 // This is efficient for any integer data type (including i8/i16) and
11819 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
11820 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
11821 DAG.getConstant(CC, MVT::i8), Cond);
11823 // Zero extend the condition if needed.
11824 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
11826 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
11827 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
11828 DAG.getConstant(ShAmt, MVT::i8));
11829 if (N->getNumValues() == 2) // Dead flag value?
11830 return DCI.CombineTo(N, Cond, SDValue());
11834 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
11835 // for any integer data type, including i8/i16.
11836 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
11837 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
11838 DAG.getConstant(CC, MVT::i8), Cond);
11840 // Zero extend the condition if needed.
11841 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
11842 FalseC->getValueType(0), Cond);
11843 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
11844 SDValue(FalseC, 0));
11846 if (N->getNumValues() == 2) // Dead flag value?
11847 return DCI.CombineTo(N, Cond, SDValue());
11851 // Optimize cases that will turn into an LEA instruction. This requires
11852 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
11853 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
11854 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
11855 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
11857 bool isFastMultiplier = false;
11859 switch ((unsigned char)Diff) {
11861 case 1: // result = add base, cond
11862 case 2: // result = lea base( , cond*2)
11863 case 3: // result = lea base(cond, cond*2)
11864 case 4: // result = lea base( , cond*4)
11865 case 5: // result = lea base(cond, cond*4)
11866 case 8: // result = lea base( , cond*8)
11867 case 9: // result = lea base(cond, cond*8)
11868 isFastMultiplier = true;
11873 if (isFastMultiplier) {
11874 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
11875 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
11876 DAG.getConstant(CC, MVT::i8), Cond);
11877 // Zero extend the condition if needed.
11878 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
11880 // Scale the condition by the difference.
11882 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
11883 DAG.getConstant(Diff, Cond.getValueType()));
11885 // Add the base if non-zero.
11886 if (FalseC->getAPIntValue() != 0)
11887 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
11888 SDValue(FalseC, 0));
11889 if (N->getNumValues() == 2) // Dead flag value?
11890 return DCI.CombineTo(N, Cond, SDValue());
11900 /// PerformMulCombine - Optimize a single multiply with constant into two
11901 /// in order to implement it with two cheaper instructions, e.g.
11902 /// LEA + SHL, LEA + LEA.
11903 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
11904 TargetLowering::DAGCombinerInfo &DCI) {
11905 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
11908 EVT VT = N->getValueType(0);
11909 if (VT != MVT::i64)
11912 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
11915 uint64_t MulAmt = C->getZExtValue();
11916 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
11919 uint64_t MulAmt1 = 0;
11920 uint64_t MulAmt2 = 0;
11921 if ((MulAmt % 9) == 0) {
11923 MulAmt2 = MulAmt / 9;
11924 } else if ((MulAmt % 5) == 0) {
11926 MulAmt2 = MulAmt / 5;
11927 } else if ((MulAmt % 3) == 0) {
11929 MulAmt2 = MulAmt / 3;
11932 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
11933 DebugLoc DL = N->getDebugLoc();
11935 if (isPowerOf2_64(MulAmt2) &&
11936 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
11937 // If second multiplifer is pow2, issue it first. We want the multiply by
11938 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
11940 std::swap(MulAmt1, MulAmt2);
11943 if (isPowerOf2_64(MulAmt1))
11944 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
11945 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
11947 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
11948 DAG.getConstant(MulAmt1, VT));
11950 if (isPowerOf2_64(MulAmt2))
11951 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
11952 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
11954 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
11955 DAG.getConstant(MulAmt2, VT));
11957 // Do not add new nodes to DAG combiner worklist.
11958 DCI.CombineTo(N, NewMul, false);
11963 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
11964 SDValue N0 = N->getOperand(0);
11965 SDValue N1 = N->getOperand(1);
11966 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
11967 EVT VT = N0.getValueType();
11969 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
11970 // since the result of setcc_c is all zero's or all ones.
11971 if (N1C && N0.getOpcode() == ISD::AND &&
11972 N0.getOperand(1).getOpcode() == ISD::Constant) {
11973 SDValue N00 = N0.getOperand(0);
11974 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
11975 ((N00.getOpcode() == ISD::ANY_EXTEND ||
11976 N00.getOpcode() == ISD::ZERO_EXTEND) &&
11977 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
11978 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
11979 APInt ShAmt = N1C->getAPIntValue();
11980 Mask = Mask.shl(ShAmt);
11982 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
11983 N00, DAG.getConstant(Mask, VT));
11990 /// PerformShiftCombine - Transforms vector shift nodes to use vector shifts
11992 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
11993 const X86Subtarget *Subtarget) {
11994 EVT VT = N->getValueType(0);
11995 if (!VT.isVector() && VT.isInteger() &&
11996 N->getOpcode() == ISD::SHL)
11997 return PerformSHLCombine(N, DAG);
11999 // On X86 with SSE2 support, we can transform this to a vector shift if
12000 // all elements are shifted by the same amount. We can't do this in legalize
12001 // because the a constant vector is typically transformed to a constant pool
12002 // so we have no knowledge of the shift amount.
12003 if (!Subtarget->hasSSE2())
12006 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
12009 SDValue ShAmtOp = N->getOperand(1);
12010 EVT EltVT = VT.getVectorElementType();
12011 DebugLoc DL = N->getDebugLoc();
12012 SDValue BaseShAmt = SDValue();
12013 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) {
12014 unsigned NumElts = VT.getVectorNumElements();
12016 for (; i != NumElts; ++i) {
12017 SDValue Arg = ShAmtOp.getOperand(i);
12018 if (Arg.getOpcode() == ISD::UNDEF) continue;
12022 for (; i != NumElts; ++i) {
12023 SDValue Arg = ShAmtOp.getOperand(i);
12024 if (Arg.getOpcode() == ISD::UNDEF) continue;
12025 if (Arg != BaseShAmt) {
12029 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE &&
12030 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) {
12031 SDValue InVec = ShAmtOp.getOperand(0);
12032 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
12033 unsigned NumElts = InVec.getValueType().getVectorNumElements();
12035 for (; i != NumElts; ++i) {
12036 SDValue Arg = InVec.getOperand(i);
12037 if (Arg.getOpcode() == ISD::UNDEF) continue;
12041 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
12042 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
12043 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex();
12044 if (C->getZExtValue() == SplatIdx)
12045 BaseShAmt = InVec.getOperand(1);
12048 if (BaseShAmt.getNode() == 0)
12049 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp,
12050 DAG.getIntPtrConstant(0));
12054 // The shift amount is an i32.
12055 if (EltVT.bitsGT(MVT::i32))
12056 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt);
12057 else if (EltVT.bitsLT(MVT::i32))
12058 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt);
12060 // The shift amount is identical so we can do a vector shift.
12061 SDValue ValOp = N->getOperand(0);
12062 switch (N->getOpcode()) {
12064 llvm_unreachable("Unknown shift opcode!");
12067 if (VT == MVT::v2i64)
12068 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12069 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
12071 if (VT == MVT::v4i32)
12072 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12073 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
12075 if (VT == MVT::v8i16)
12076 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12077 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
12081 if (VT == MVT::v4i32)
12082 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12083 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32),
12085 if (VT == MVT::v8i16)
12086 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12087 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
12091 if (VT == MVT::v2i64)
12092 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12093 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
12095 if (VT == MVT::v4i32)
12096 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12097 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32),
12099 if (VT == MVT::v8i16)
12100 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12101 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
12109 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
12110 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
12111 // and friends. Likewise for OR -> CMPNEQSS.
12112 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
12113 TargetLowering::DAGCombinerInfo &DCI,
12114 const X86Subtarget *Subtarget) {
12117 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
12118 // we're requiring SSE2 for both.
12119 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
12120 SDValue N0 = N->getOperand(0);
12121 SDValue N1 = N->getOperand(1);
12122 SDValue CMP0 = N0->getOperand(1);
12123 SDValue CMP1 = N1->getOperand(1);
12124 DebugLoc DL = N->getDebugLoc();
12126 // The SETCCs should both refer to the same CMP.
12127 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
12130 SDValue CMP00 = CMP0->getOperand(0);
12131 SDValue CMP01 = CMP0->getOperand(1);
12132 EVT VT = CMP00.getValueType();
12134 if (VT == MVT::f32 || VT == MVT::f64) {
12135 bool ExpectingFlags = false;
12136 // Check for any users that want flags:
12137 for (SDNode::use_iterator UI = N->use_begin(),
12139 !ExpectingFlags && UI != UE; ++UI)
12140 switch (UI->getOpcode()) {
12145 ExpectingFlags = true;
12147 case ISD::CopyToReg:
12148 case ISD::SIGN_EXTEND:
12149 case ISD::ZERO_EXTEND:
12150 case ISD::ANY_EXTEND:
12154 if (!ExpectingFlags) {
12155 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
12156 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
12158 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
12159 X86::CondCode tmp = cc0;
12164 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
12165 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
12166 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
12167 X86ISD::NodeType NTOperator = is64BitFP ?
12168 X86ISD::FSETCCsd : X86ISD::FSETCCss;
12169 // FIXME: need symbolic constants for these magic numbers.
12170 // See X86ATTInstPrinter.cpp:printSSECC().
12171 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
12172 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01,
12173 DAG.getConstant(x86cc, MVT::i8));
12174 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32,
12176 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI,
12177 DAG.getConstant(1, MVT::i32));
12178 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
12179 return OneBitOfTruth;
12187 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
12188 /// so it can be folded inside ANDNP.
12189 static bool CanFoldXORWithAllOnes(const SDNode *N) {
12190 EVT VT = N->getValueType(0);
12192 // Match direct AllOnes for 128 and 256-bit vectors
12193 if (ISD::isBuildVectorAllOnes(N))
12196 // Look through a bit convert.
12197 if (N->getOpcode() == ISD::BITCAST)
12198 N = N->getOperand(0).getNode();
12200 // Sometimes the operand may come from a insert_subvector building a 256-bit
12202 SDValue V1 = N->getOperand(0);
12203 SDValue V2 = N->getOperand(1);
12205 if (VT.getSizeInBits() == 256 &&
12206 N->getOpcode() == ISD::INSERT_SUBVECTOR &&
12207 V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
12208 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
12209 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
12210 ISD::isBuildVectorAllOnes(V2.getNode()))
12216 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
12217 TargetLowering::DAGCombinerInfo &DCI,
12218 const X86Subtarget *Subtarget) {
12219 if (DCI.isBeforeLegalizeOps())
12222 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
12226 // Want to form ANDNP nodes:
12227 // 1) In the hopes of then easily combining them with OR and AND nodes
12228 // to form PBLEND/PSIGN.
12229 // 2) To match ANDN packed intrinsics
12230 EVT VT = N->getValueType(0);
12231 if (VT != MVT::v2i64 && VT != MVT::v4i64)
12234 SDValue N0 = N->getOperand(0);
12235 SDValue N1 = N->getOperand(1);
12236 DebugLoc DL = N->getDebugLoc();
12238 // Check LHS for vnot
12239 if (N0.getOpcode() == ISD::XOR &&
12240 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
12241 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
12242 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
12244 // Check RHS for vnot
12245 if (N1.getOpcode() == ISD::XOR &&
12246 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
12247 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
12248 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
12253 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
12254 TargetLowering::DAGCombinerInfo &DCI,
12255 const X86Subtarget *Subtarget) {
12256 if (DCI.isBeforeLegalizeOps())
12259 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
12263 EVT VT = N->getValueType(0);
12264 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64 && VT != MVT::v2i64)
12267 SDValue N0 = N->getOperand(0);
12268 SDValue N1 = N->getOperand(1);
12270 // look for psign/blend
12271 if (Subtarget->hasSSSE3()) {
12272 if (VT == MVT::v2i64) {
12273 // Canonicalize pandn to RHS
12274 if (N0.getOpcode() == X86ISD::ANDNP)
12276 // or (and (m, x), (pandn m, y))
12277 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
12278 SDValue Mask = N1.getOperand(0);
12279 SDValue X = N1.getOperand(1);
12281 if (N0.getOperand(0) == Mask)
12282 Y = N0.getOperand(1);
12283 if (N0.getOperand(1) == Mask)
12284 Y = N0.getOperand(0);
12286 // Check to see if the mask appeared in both the AND and ANDNP and
12290 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
12291 if (Mask.getOpcode() != ISD::BITCAST ||
12292 X.getOpcode() != ISD::BITCAST ||
12293 Y.getOpcode() != ISD::BITCAST)
12296 // Look through mask bitcast.
12297 Mask = Mask.getOperand(0);
12298 EVT MaskVT = Mask.getValueType();
12300 // Validate that the Mask operand is a vector sra node. The sra node
12301 // will be an intrinsic.
12302 if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN)
12305 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
12306 // there is no psrai.b
12307 switch (cast<ConstantSDNode>(Mask.getOperand(0))->getZExtValue()) {
12308 case Intrinsic::x86_sse2_psrai_w:
12309 case Intrinsic::x86_sse2_psrai_d:
12311 default: return SDValue();
12314 // Check that the SRA is all signbits.
12315 SDValue SraC = Mask.getOperand(2);
12316 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
12317 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
12318 if ((SraAmt + 1) != EltBits)
12321 DebugLoc DL = N->getDebugLoc();
12323 // Now we know we at least have a plendvb with the mask val. See if
12324 // we can form a psignb/w/d.
12325 // psign = x.type == y.type == mask.type && y = sub(0, x);
12326 X = X.getOperand(0);
12327 Y = Y.getOperand(0);
12328 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
12329 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
12330 X.getValueType() == MaskVT && X.getValueType() == Y.getValueType()){
12333 case 8: Opc = X86ISD::PSIGNB; break;
12334 case 16: Opc = X86ISD::PSIGNW; break;
12335 case 32: Opc = X86ISD::PSIGND; break;
12339 SDValue Sign = DAG.getNode(Opc, DL, MaskVT, X, Mask.getOperand(1));
12340 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Sign);
12343 // PBLENDVB only available on SSE 4.1
12344 if (!Subtarget->hasSSE41())
12347 X = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, X);
12348 Y = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Y);
12349 Mask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Mask);
12350 Mask = DAG.getNode(X86ISD::PBLENDVB, DL, MVT::v16i8, X, Y, Mask);
12351 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Mask);
12356 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
12357 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
12359 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
12361 if (!N0.hasOneUse() || !N1.hasOneUse())
12364 SDValue ShAmt0 = N0.getOperand(1);
12365 if (ShAmt0.getValueType() != MVT::i8)
12367 SDValue ShAmt1 = N1.getOperand(1);
12368 if (ShAmt1.getValueType() != MVT::i8)
12370 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
12371 ShAmt0 = ShAmt0.getOperand(0);
12372 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
12373 ShAmt1 = ShAmt1.getOperand(0);
12375 DebugLoc DL = N->getDebugLoc();
12376 unsigned Opc = X86ISD::SHLD;
12377 SDValue Op0 = N0.getOperand(0);
12378 SDValue Op1 = N1.getOperand(0);
12379 if (ShAmt0.getOpcode() == ISD::SUB) {
12380 Opc = X86ISD::SHRD;
12381 std::swap(Op0, Op1);
12382 std::swap(ShAmt0, ShAmt1);
12385 unsigned Bits = VT.getSizeInBits();
12386 if (ShAmt1.getOpcode() == ISD::SUB) {
12387 SDValue Sum = ShAmt1.getOperand(0);
12388 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
12389 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
12390 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
12391 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
12392 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
12393 return DAG.getNode(Opc, DL, VT,
12395 DAG.getNode(ISD::TRUNCATE, DL,
12398 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
12399 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
12401 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
12402 return DAG.getNode(Opc, DL, VT,
12403 N0.getOperand(0), N1.getOperand(0),
12404 DAG.getNode(ISD::TRUNCATE, DL,
12411 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
12412 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
12413 const X86Subtarget *Subtarget) {
12414 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
12415 // the FP state in cases where an emms may be missing.
12416 // A preferable solution to the general problem is to figure out the right
12417 // places to insert EMMS. This qualifies as a quick hack.
12419 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
12420 StoreSDNode *St = cast<StoreSDNode>(N);
12421 EVT VT = St->getValue().getValueType();
12422 if (VT.getSizeInBits() != 64)
12425 const Function *F = DAG.getMachineFunction().getFunction();
12426 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
12427 bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps
12428 && Subtarget->hasSSE2();
12429 if ((VT.isVector() ||
12430 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
12431 isa<LoadSDNode>(St->getValue()) &&
12432 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
12433 St->getChain().hasOneUse() && !St->isVolatile()) {
12434 SDNode* LdVal = St->getValue().getNode();
12435 LoadSDNode *Ld = 0;
12436 int TokenFactorIndex = -1;
12437 SmallVector<SDValue, 8> Ops;
12438 SDNode* ChainVal = St->getChain().getNode();
12439 // Must be a store of a load. We currently handle two cases: the load
12440 // is a direct child, and it's under an intervening TokenFactor. It is
12441 // possible to dig deeper under nested TokenFactors.
12442 if (ChainVal == LdVal)
12443 Ld = cast<LoadSDNode>(St->getChain());
12444 else if (St->getValue().hasOneUse() &&
12445 ChainVal->getOpcode() == ISD::TokenFactor) {
12446 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) {
12447 if (ChainVal->getOperand(i).getNode() == LdVal) {
12448 TokenFactorIndex = i;
12449 Ld = cast<LoadSDNode>(St->getValue());
12451 Ops.push_back(ChainVal->getOperand(i));
12455 if (!Ld || !ISD::isNormalLoad(Ld))
12458 // If this is not the MMX case, i.e. we are just turning i64 load/store
12459 // into f64 load/store, avoid the transformation if there are multiple
12460 // uses of the loaded value.
12461 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
12464 DebugLoc LdDL = Ld->getDebugLoc();
12465 DebugLoc StDL = N->getDebugLoc();
12466 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
12467 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
12469 if (Subtarget->is64Bit() || F64IsLegal) {
12470 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
12471 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
12472 Ld->getPointerInfo(), Ld->isVolatile(),
12473 Ld->isNonTemporal(), Ld->getAlignment());
12474 SDValue NewChain = NewLd.getValue(1);
12475 if (TokenFactorIndex != -1) {
12476 Ops.push_back(NewChain);
12477 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
12480 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
12481 St->getPointerInfo(),
12482 St->isVolatile(), St->isNonTemporal(),
12483 St->getAlignment());
12486 // Otherwise, lower to two pairs of 32-bit loads / stores.
12487 SDValue LoAddr = Ld->getBasePtr();
12488 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
12489 DAG.getConstant(4, MVT::i32));
12491 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
12492 Ld->getPointerInfo(),
12493 Ld->isVolatile(), Ld->isNonTemporal(),
12494 Ld->getAlignment());
12495 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
12496 Ld->getPointerInfo().getWithOffset(4),
12497 Ld->isVolatile(), Ld->isNonTemporal(),
12498 MinAlign(Ld->getAlignment(), 4));
12500 SDValue NewChain = LoLd.getValue(1);
12501 if (TokenFactorIndex != -1) {
12502 Ops.push_back(LoLd);
12503 Ops.push_back(HiLd);
12504 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
12508 LoAddr = St->getBasePtr();
12509 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
12510 DAG.getConstant(4, MVT::i32));
12512 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
12513 St->getPointerInfo(),
12514 St->isVolatile(), St->isNonTemporal(),
12515 St->getAlignment());
12516 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
12517 St->getPointerInfo().getWithOffset(4),
12519 St->isNonTemporal(),
12520 MinAlign(St->getAlignment(), 4));
12521 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
12526 /// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
12527 /// X86ISD::FXOR nodes.
12528 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
12529 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
12530 // F[X]OR(0.0, x) -> x
12531 // F[X]OR(x, 0.0) -> x
12532 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
12533 if (C->getValueAPF().isPosZero())
12534 return N->getOperand(1);
12535 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
12536 if (C->getValueAPF().isPosZero())
12537 return N->getOperand(0);
12541 /// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
12542 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
12543 // FAND(0.0, x) -> 0.0
12544 // FAND(x, 0.0) -> 0.0
12545 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
12546 if (C->getValueAPF().isPosZero())
12547 return N->getOperand(0);
12548 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
12549 if (C->getValueAPF().isPosZero())
12550 return N->getOperand(1);
12554 static SDValue PerformBTCombine(SDNode *N,
12556 TargetLowering::DAGCombinerInfo &DCI) {
12557 // BT ignores high bits in the bit index operand.
12558 SDValue Op1 = N->getOperand(1);
12559 if (Op1.hasOneUse()) {
12560 unsigned BitWidth = Op1.getValueSizeInBits();
12561 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
12562 APInt KnownZero, KnownOne;
12563 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
12564 !DCI.isBeforeLegalizeOps());
12565 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12566 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
12567 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
12568 DCI.CommitTargetLoweringOpt(TLO);
12573 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
12574 SDValue Op = N->getOperand(0);
12575 if (Op.getOpcode() == ISD::BITCAST)
12576 Op = Op.getOperand(0);
12577 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
12578 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
12579 VT.getVectorElementType().getSizeInBits() ==
12580 OpVT.getVectorElementType().getSizeInBits()) {
12581 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
12586 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) {
12587 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
12588 // (and (i32 x86isd::setcc_carry), 1)
12589 // This eliminates the zext. This transformation is necessary because
12590 // ISD::SETCC is always legalized to i8.
12591 DebugLoc dl = N->getDebugLoc();
12592 SDValue N0 = N->getOperand(0);
12593 EVT VT = N->getValueType(0);
12594 if (N0.getOpcode() == ISD::AND &&
12596 N0.getOperand(0).hasOneUse()) {
12597 SDValue N00 = N0.getOperand(0);
12598 if (N00.getOpcode() != X86ISD::SETCC_CARRY)
12600 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
12601 if (!C || C->getZExtValue() != 1)
12603 return DAG.getNode(ISD::AND, dl, VT,
12604 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
12605 N00.getOperand(0), N00.getOperand(1)),
12606 DAG.getConstant(1, VT));
12612 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
12613 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) {
12614 unsigned X86CC = N->getConstantOperandVal(0);
12615 SDValue EFLAG = N->getOperand(1);
12616 DebugLoc DL = N->getDebugLoc();
12618 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
12619 // a zext and produces an all-ones bit which is more useful than 0/1 in some
12621 if (X86CC == X86::COND_B)
12622 return DAG.getNode(ISD::AND, DL, MVT::i8,
12623 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
12624 DAG.getConstant(X86CC, MVT::i8), EFLAG),
12625 DAG.getConstant(1, MVT::i8));
12630 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
12631 const X86TargetLowering *XTLI) {
12632 SDValue Op0 = N->getOperand(0);
12633 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
12634 // a 32-bit target where SSE doesn't support i64->FP operations.
12635 if (Op0.getOpcode() == ISD::LOAD) {
12636 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
12637 EVT VT = Ld->getValueType(0);
12638 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
12639 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
12640 !XTLI->getSubtarget()->is64Bit() &&
12641 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
12642 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0),
12643 Ld->getChain(), Op0, DAG);
12644 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
12651 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
12652 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
12653 X86TargetLowering::DAGCombinerInfo &DCI) {
12654 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
12655 // the result is either zero or one (depending on the input carry bit).
12656 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
12657 if (X86::isZeroNode(N->getOperand(0)) &&
12658 X86::isZeroNode(N->getOperand(1)) &&
12659 // We don't have a good way to replace an EFLAGS use, so only do this when
12661 SDValue(N, 1).use_empty()) {
12662 DebugLoc DL = N->getDebugLoc();
12663 EVT VT = N->getValueType(0);
12664 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
12665 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
12666 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
12667 DAG.getConstant(X86::COND_B,MVT::i8),
12669 DAG.getConstant(1, VT));
12670 return DCI.CombineTo(N, Res1, CarryOut);
12676 // fold (add Y, (sete X, 0)) -> adc 0, Y
12677 // (add Y, (setne X, 0)) -> sbb -1, Y
12678 // (sub (sete X, 0), Y) -> sbb 0, Y
12679 // (sub (setne X, 0), Y) -> adc -1, Y
12680 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
12681 DebugLoc DL = N->getDebugLoc();
12683 // Look through ZExts.
12684 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
12685 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
12688 SDValue SetCC = Ext.getOperand(0);
12689 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
12692 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
12693 if (CC != X86::COND_E && CC != X86::COND_NE)
12696 SDValue Cmp = SetCC.getOperand(1);
12697 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
12698 !X86::isZeroNode(Cmp.getOperand(1)) ||
12699 !Cmp.getOperand(0).getValueType().isInteger())
12702 SDValue CmpOp0 = Cmp.getOperand(0);
12703 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
12704 DAG.getConstant(1, CmpOp0.getValueType()));
12706 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
12707 if (CC == X86::COND_NE)
12708 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
12709 DL, OtherVal.getValueType(), OtherVal,
12710 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
12711 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
12712 DL, OtherVal.getValueType(), OtherVal,
12713 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
12716 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG) {
12717 SDValue Op0 = N->getOperand(0);
12718 SDValue Op1 = N->getOperand(1);
12720 // X86 can't encode an immediate LHS of a sub. See if we can push the
12721 // negation into a preceding instruction.
12722 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
12723 uint64_t Op0C = C->getSExtValue();
12725 // If the RHS of the sub is a XOR with one use and a constant, invert the
12726 // immediate. Then add one to the LHS of the sub so we can turn
12727 // X-Y -> X+~Y+1, saving one register.
12728 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
12729 isa<ConstantSDNode>(Op1.getOperand(1))) {
12730 uint64_t XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getSExtValue();
12731 EVT VT = Op0.getValueType();
12732 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT,
12734 DAG.getConstant(~XorC, VT));
12735 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor,
12736 DAG.getConstant(Op0C+1, VT));
12740 return OptimizeConditionalInDecrement(N, DAG);
12743 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
12744 DAGCombinerInfo &DCI) const {
12745 SelectionDAG &DAG = DCI.DAG;
12746 switch (N->getOpcode()) {
12748 case ISD::EXTRACT_VECTOR_ELT:
12749 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this);
12750 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
12751 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
12752 case ISD::ADD: return OptimizeConditionalInDecrement(N, DAG);
12753 case ISD::SUB: return PerformSubCombine(N, DAG);
12754 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
12755 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
12758 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget);
12759 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
12760 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
12761 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
12762 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
12764 case X86ISD::FOR: return PerformFORCombine(N, DAG);
12765 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
12766 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
12767 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
12768 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
12769 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
12770 case X86ISD::SHUFPS: // Handle all target specific shuffles
12771 case X86ISD::SHUFPD:
12772 case X86ISD::PALIGN:
12773 case X86ISD::PUNPCKHBW:
12774 case X86ISD::PUNPCKHWD:
12775 case X86ISD::PUNPCKHDQ:
12776 case X86ISD::PUNPCKHQDQ:
12777 case X86ISD::UNPCKHPS:
12778 case X86ISD::UNPCKHPD:
12779 case X86ISD::VUNPCKHPSY:
12780 case X86ISD::VUNPCKHPDY:
12781 case X86ISD::PUNPCKLBW:
12782 case X86ISD::PUNPCKLWD:
12783 case X86ISD::PUNPCKLDQ:
12784 case X86ISD::PUNPCKLQDQ:
12785 case X86ISD::UNPCKLPS:
12786 case X86ISD::UNPCKLPD:
12787 case X86ISD::VUNPCKLPSY:
12788 case X86ISD::VUNPCKLPDY:
12789 case X86ISD::MOVHLPS:
12790 case X86ISD::MOVLHPS:
12791 case X86ISD::PSHUFD:
12792 case X86ISD::PSHUFHW:
12793 case X86ISD::PSHUFLW:
12794 case X86ISD::MOVSS:
12795 case X86ISD::MOVSD:
12796 case X86ISD::VPERMILPS:
12797 case X86ISD::VPERMILPSY:
12798 case X86ISD::VPERMILPD:
12799 case X86ISD::VPERMILPDY:
12800 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI);
12806 /// isTypeDesirableForOp - Return true if the target has native support for
12807 /// the specified value type and it is 'desirable' to use the type for the
12808 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
12809 /// instruction encodings are longer and some i16 instructions are slow.
12810 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
12811 if (!isTypeLegal(VT))
12813 if (VT != MVT::i16)
12820 case ISD::SIGN_EXTEND:
12821 case ISD::ZERO_EXTEND:
12822 case ISD::ANY_EXTEND:
12835 /// IsDesirableToPromoteOp - This method query the target whether it is
12836 /// beneficial for dag combiner to promote the specified node. If true, it
12837 /// should return the desired promotion type by reference.
12838 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
12839 EVT VT = Op.getValueType();
12840 if (VT != MVT::i16)
12843 bool Promote = false;
12844 bool Commute = false;
12845 switch (Op.getOpcode()) {
12848 LoadSDNode *LD = cast<LoadSDNode>(Op);
12849 // If the non-extending load has a single use and it's not live out, then it
12850 // might be folded.
12851 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
12852 Op.hasOneUse()*/) {
12853 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
12854 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
12855 // The only case where we'd want to promote LOAD (rather then it being
12856 // promoted as an operand is when it's only use is liveout.
12857 if (UI->getOpcode() != ISD::CopyToReg)
12864 case ISD::SIGN_EXTEND:
12865 case ISD::ZERO_EXTEND:
12866 case ISD::ANY_EXTEND:
12871 SDValue N0 = Op.getOperand(0);
12872 // Look out for (store (shl (load), x)).
12873 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
12886 SDValue N0 = Op.getOperand(0);
12887 SDValue N1 = Op.getOperand(1);
12888 if (!Commute && MayFoldLoad(N1))
12890 // Avoid disabling potential load folding opportunities.
12891 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
12893 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
12903 //===----------------------------------------------------------------------===//
12904 // X86 Inline Assembly Support
12905 //===----------------------------------------------------------------------===//
12907 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
12908 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
12910 std::string AsmStr = IA->getAsmString();
12912 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
12913 SmallVector<StringRef, 4> AsmPieces;
12914 SplitString(AsmStr, AsmPieces, ";\n");
12916 switch (AsmPieces.size()) {
12917 default: return false;
12919 AsmStr = AsmPieces[0];
12921 SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace.
12923 // FIXME: this should verify that we are targeting a 486 or better. If not,
12924 // we will turn this bswap into something that will be lowered to logical ops
12925 // instead of emitting the bswap asm. For now, we don't support 486 or lower
12926 // so don't worry about this.
12928 if (AsmPieces.size() == 2 &&
12929 (AsmPieces[0] == "bswap" ||
12930 AsmPieces[0] == "bswapq" ||
12931 AsmPieces[0] == "bswapl") &&
12932 (AsmPieces[1] == "$0" ||
12933 AsmPieces[1] == "${0:q}")) {
12934 // No need to check constraints, nothing other than the equivalent of
12935 // "=r,0" would be valid here.
12936 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
12937 if (!Ty || Ty->getBitWidth() % 16 != 0)
12939 return IntrinsicLowering::LowerToByteSwap(CI);
12941 // rorw $$8, ${0:w} --> llvm.bswap.i16
12942 if (CI->getType()->isIntegerTy(16) &&
12943 AsmPieces.size() == 3 &&
12944 (AsmPieces[0] == "rorw" || AsmPieces[0] == "rolw") &&
12945 AsmPieces[1] == "$$8," &&
12946 AsmPieces[2] == "${0:w}" &&
12947 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) {
12949 const std::string &ConstraintsStr = IA->getConstraintString();
12950 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
12951 std::sort(AsmPieces.begin(), AsmPieces.end());
12952 if (AsmPieces.size() == 4 &&
12953 AsmPieces[0] == "~{cc}" &&
12954 AsmPieces[1] == "~{dirflag}" &&
12955 AsmPieces[2] == "~{flags}" &&
12956 AsmPieces[3] == "~{fpsr}") {
12957 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
12958 if (!Ty || Ty->getBitWidth() % 16 != 0)
12960 return IntrinsicLowering::LowerToByteSwap(CI);
12965 if (CI->getType()->isIntegerTy(32) &&
12966 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) {
12967 SmallVector<StringRef, 4> Words;
12968 SplitString(AsmPieces[0], Words, " \t,");
12969 if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" &&
12970 Words[2] == "${0:w}") {
12972 SplitString(AsmPieces[1], Words, " \t,");
12973 if (Words.size() == 3 && Words[0] == "rorl" && Words[1] == "$$16" &&
12974 Words[2] == "$0") {
12976 SplitString(AsmPieces[2], Words, " \t,");
12977 if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" &&
12978 Words[2] == "${0:w}") {
12980 const std::string &ConstraintsStr = IA->getConstraintString();
12981 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
12982 std::sort(AsmPieces.begin(), AsmPieces.end());
12983 if (AsmPieces.size() == 4 &&
12984 AsmPieces[0] == "~{cc}" &&
12985 AsmPieces[1] == "~{dirflag}" &&
12986 AsmPieces[2] == "~{flags}" &&
12987 AsmPieces[3] == "~{fpsr}") {
12988 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
12989 if (!Ty || Ty->getBitWidth() % 16 != 0)
12991 return IntrinsicLowering::LowerToByteSwap(CI);
12998 if (CI->getType()->isIntegerTy(64)) {
12999 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
13000 if (Constraints.size() >= 2 &&
13001 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
13002 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
13003 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
13004 SmallVector<StringRef, 4> Words;
13005 SplitString(AsmPieces[0], Words, " \t");
13006 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") {
13008 SplitString(AsmPieces[1], Words, " \t");
13009 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") {
13011 SplitString(AsmPieces[2], Words, " \t,");
13012 if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" &&
13013 Words[2] == "%edx") {
13014 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
13015 if (!Ty || Ty->getBitWidth() % 16 != 0)
13017 return IntrinsicLowering::LowerToByteSwap(CI);
13030 /// getConstraintType - Given a constraint letter, return the type of
13031 /// constraint it is for this target.
13032 X86TargetLowering::ConstraintType
13033 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
13034 if (Constraint.size() == 1) {
13035 switch (Constraint[0]) {
13046 return C_RegisterClass;
13070 return TargetLowering::getConstraintType(Constraint);
13073 /// Examine constraint type and operand type and determine a weight value.
13074 /// This object must already have been set up with the operand type
13075 /// and the current alternative constraint selected.
13076 TargetLowering::ConstraintWeight
13077 X86TargetLowering::getSingleConstraintMatchWeight(
13078 AsmOperandInfo &info, const char *constraint) const {
13079 ConstraintWeight weight = CW_Invalid;
13080 Value *CallOperandVal = info.CallOperandVal;
13081 // If we don't have a value, we can't do a match,
13082 // but allow it at the lowest weight.
13083 if (CallOperandVal == NULL)
13085 Type *type = CallOperandVal->getType();
13086 // Look at the constraint type.
13087 switch (*constraint) {
13089 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
13100 if (CallOperandVal->getType()->isIntegerTy())
13101 weight = CW_SpecificReg;
13106 if (type->isFloatingPointTy())
13107 weight = CW_SpecificReg;
13110 if (type->isX86_MMXTy() && Subtarget->hasMMX())
13111 weight = CW_SpecificReg;
13115 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasXMM())
13116 weight = CW_Register;
13119 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
13120 if (C->getZExtValue() <= 31)
13121 weight = CW_Constant;
13125 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
13126 if (C->getZExtValue() <= 63)
13127 weight = CW_Constant;
13131 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
13132 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
13133 weight = CW_Constant;
13137 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
13138 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
13139 weight = CW_Constant;
13143 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
13144 if (C->getZExtValue() <= 3)
13145 weight = CW_Constant;
13149 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
13150 if (C->getZExtValue() <= 0xff)
13151 weight = CW_Constant;
13156 if (dyn_cast<ConstantFP>(CallOperandVal)) {
13157 weight = CW_Constant;
13161 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
13162 if ((C->getSExtValue() >= -0x80000000LL) &&
13163 (C->getSExtValue() <= 0x7fffffffLL))
13164 weight = CW_Constant;
13168 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
13169 if (C->getZExtValue() <= 0xffffffff)
13170 weight = CW_Constant;
13177 /// LowerXConstraint - try to replace an X constraint, which matches anything,
13178 /// with another that has more specific requirements based on the type of the
13179 /// corresponding operand.
13180 const char *X86TargetLowering::
13181 LowerXConstraint(EVT ConstraintVT) const {
13182 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
13183 // 'f' like normal targets.
13184 if (ConstraintVT.isFloatingPoint()) {
13185 if (Subtarget->hasXMMInt())
13187 if (Subtarget->hasXMM())
13191 return TargetLowering::LowerXConstraint(ConstraintVT);
13194 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
13195 /// vector. If it is invalid, don't add anything to Ops.
13196 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
13197 std::string &Constraint,
13198 std::vector<SDValue>&Ops,
13199 SelectionDAG &DAG) const {
13200 SDValue Result(0, 0);
13202 // Only support length 1 constraints for now.
13203 if (Constraint.length() > 1) return;
13205 char ConstraintLetter = Constraint[0];
13206 switch (ConstraintLetter) {
13209 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
13210 if (C->getZExtValue() <= 31) {
13211 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
13217 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
13218 if (C->getZExtValue() <= 63) {
13219 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
13225 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
13226 if ((int8_t)C->getSExtValue() == C->getSExtValue()) {
13227 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
13233 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
13234 if (C->getZExtValue() <= 255) {
13235 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
13241 // 32-bit signed value
13242 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
13243 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
13244 C->getSExtValue())) {
13245 // Widen to 64 bits here to get it sign extended.
13246 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
13249 // FIXME gcc accepts some relocatable values here too, but only in certain
13250 // memory models; it's complicated.
13255 // 32-bit unsigned value
13256 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
13257 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
13258 C->getZExtValue())) {
13259 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
13263 // FIXME gcc accepts some relocatable values here too, but only in certain
13264 // memory models; it's complicated.
13268 // Literal immediates are always ok.
13269 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
13270 // Widen to 64 bits here to get it sign extended.
13271 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
13275 // In any sort of PIC mode addresses need to be computed at runtime by
13276 // adding in a register or some sort of table lookup. These can't
13277 // be used as immediates.
13278 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
13281 // If we are in non-pic codegen mode, we allow the address of a global (with
13282 // an optional displacement) to be used with 'i'.
13283 GlobalAddressSDNode *GA = 0;
13284 int64_t Offset = 0;
13286 // Match either (GA), (GA+C), (GA+C1+C2), etc.
13288 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
13289 Offset += GA->getOffset();
13291 } else if (Op.getOpcode() == ISD::ADD) {
13292 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
13293 Offset += C->getZExtValue();
13294 Op = Op.getOperand(0);
13297 } else if (Op.getOpcode() == ISD::SUB) {
13298 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
13299 Offset += -C->getZExtValue();
13300 Op = Op.getOperand(0);
13305 // Otherwise, this isn't something we can handle, reject it.
13309 const GlobalValue *GV = GA->getGlobal();
13310 // If we require an extra load to get this address, as in PIC mode, we
13311 // can't accept it.
13312 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV,
13313 getTargetMachine())))
13316 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
13317 GA->getValueType(0), Offset);
13322 if (Result.getNode()) {
13323 Ops.push_back(Result);
13326 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
13329 std::pair<unsigned, const TargetRegisterClass*>
13330 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
13332 // First, see if this is a constraint that directly corresponds to an LLVM
13334 if (Constraint.size() == 1) {
13335 // GCC Constraint Letters
13336 switch (Constraint[0]) {
13338 // TODO: Slight differences here in allocation order and leaving
13339 // RIP in the class. Do they matter any more here than they do
13340 // in the normal allocation?
13341 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
13342 if (Subtarget->is64Bit()) {
13343 if (VT == MVT::i32 || VT == MVT::f32)
13344 return std::make_pair(0U, X86::GR32RegisterClass);
13345 else if (VT == MVT::i16)
13346 return std::make_pair(0U, X86::GR16RegisterClass);
13347 else if (VT == MVT::i8 || VT == MVT::i1)
13348 return std::make_pair(0U, X86::GR8RegisterClass);
13349 else if (VT == MVT::i64 || VT == MVT::f64)
13350 return std::make_pair(0U, X86::GR64RegisterClass);
13353 // 32-bit fallthrough
13354 case 'Q': // Q_REGS
13355 if (VT == MVT::i32 || VT == MVT::f32)
13356 return std::make_pair(0U, X86::GR32_ABCDRegisterClass);
13357 else if (VT == MVT::i16)
13358 return std::make_pair(0U, X86::GR16_ABCDRegisterClass);
13359 else if (VT == MVT::i8 || VT == MVT::i1)
13360 return std::make_pair(0U, X86::GR8_ABCD_LRegisterClass);
13361 else if (VT == MVT::i64)
13362 return std::make_pair(0U, X86::GR64_ABCDRegisterClass);
13364 case 'r': // GENERAL_REGS
13365 case 'l': // INDEX_REGS
13366 if (VT == MVT::i8 || VT == MVT::i1)
13367 return std::make_pair(0U, X86::GR8RegisterClass);
13368 if (VT == MVT::i16)
13369 return std::make_pair(0U, X86::GR16RegisterClass);
13370 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
13371 return std::make_pair(0U, X86::GR32RegisterClass);
13372 return std::make_pair(0U, X86::GR64RegisterClass);
13373 case 'R': // LEGACY_REGS
13374 if (VT == MVT::i8 || VT == MVT::i1)
13375 return std::make_pair(0U, X86::GR8_NOREXRegisterClass);
13376 if (VT == MVT::i16)
13377 return std::make_pair(0U, X86::GR16_NOREXRegisterClass);
13378 if (VT == MVT::i32 || !Subtarget->is64Bit())
13379 return std::make_pair(0U, X86::GR32_NOREXRegisterClass);
13380 return std::make_pair(0U, X86::GR64_NOREXRegisterClass);
13381 case 'f': // FP Stack registers.
13382 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
13383 // value to the correct fpstack register class.
13384 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
13385 return std::make_pair(0U, X86::RFP32RegisterClass);
13386 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
13387 return std::make_pair(0U, X86::RFP64RegisterClass);
13388 return std::make_pair(0U, X86::RFP80RegisterClass);
13389 case 'y': // MMX_REGS if MMX allowed.
13390 if (!Subtarget->hasMMX()) break;
13391 return std::make_pair(0U, X86::VR64RegisterClass);
13392 case 'Y': // SSE_REGS if SSE2 allowed
13393 if (!Subtarget->hasXMMInt()) break;
13395 case 'x': // SSE_REGS if SSE1 allowed
13396 if (!Subtarget->hasXMM()) break;
13398 switch (VT.getSimpleVT().SimpleTy) {
13400 // Scalar SSE types.
13403 return std::make_pair(0U, X86::FR32RegisterClass);
13406 return std::make_pair(0U, X86::FR64RegisterClass);
13414 return std::make_pair(0U, X86::VR128RegisterClass);
13420 // Use the default implementation in TargetLowering to convert the register
13421 // constraint into a member of a register class.
13422 std::pair<unsigned, const TargetRegisterClass*> Res;
13423 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
13425 // Not found as a standard register?
13426 if (Res.second == 0) {
13427 // Map st(0) -> st(7) -> ST0
13428 if (Constraint.size() == 7 && Constraint[0] == '{' &&
13429 tolower(Constraint[1]) == 's' &&
13430 tolower(Constraint[2]) == 't' &&
13431 Constraint[3] == '(' &&
13432 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
13433 Constraint[5] == ')' &&
13434 Constraint[6] == '}') {
13436 Res.first = X86::ST0+Constraint[4]-'0';
13437 Res.second = X86::RFP80RegisterClass;
13441 // GCC allows "st(0)" to be called just plain "st".
13442 if (StringRef("{st}").equals_lower(Constraint)) {
13443 Res.first = X86::ST0;
13444 Res.second = X86::RFP80RegisterClass;
13449 if (StringRef("{flags}").equals_lower(Constraint)) {
13450 Res.first = X86::EFLAGS;
13451 Res.second = X86::CCRRegisterClass;
13455 // 'A' means EAX + EDX.
13456 if (Constraint == "A") {
13457 Res.first = X86::EAX;
13458 Res.second = X86::GR32_ADRegisterClass;
13464 // Otherwise, check to see if this is a register class of the wrong value
13465 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
13466 // turn into {ax},{dx}.
13467 if (Res.second->hasType(VT))
13468 return Res; // Correct type already, nothing to do.
13470 // All of the single-register GCC register classes map their values onto
13471 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
13472 // really want an 8-bit or 32-bit register, map to the appropriate register
13473 // class and return the appropriate register.
13474 if (Res.second == X86::GR16RegisterClass) {
13475 if (VT == MVT::i8) {
13476 unsigned DestReg = 0;
13477 switch (Res.first) {
13479 case X86::AX: DestReg = X86::AL; break;
13480 case X86::DX: DestReg = X86::DL; break;
13481 case X86::CX: DestReg = X86::CL; break;
13482 case X86::BX: DestReg = X86::BL; break;
13485 Res.first = DestReg;
13486 Res.second = X86::GR8RegisterClass;
13488 } else if (VT == MVT::i32) {
13489 unsigned DestReg = 0;
13490 switch (Res.first) {
13492 case X86::AX: DestReg = X86::EAX; break;
13493 case X86::DX: DestReg = X86::EDX; break;
13494 case X86::CX: DestReg = X86::ECX; break;
13495 case X86::BX: DestReg = X86::EBX; break;
13496 case X86::SI: DestReg = X86::ESI; break;
13497 case X86::DI: DestReg = X86::EDI; break;
13498 case X86::BP: DestReg = X86::EBP; break;
13499 case X86::SP: DestReg = X86::ESP; break;
13502 Res.first = DestReg;
13503 Res.second = X86::GR32RegisterClass;
13505 } else if (VT == MVT::i64) {
13506 unsigned DestReg = 0;
13507 switch (Res.first) {
13509 case X86::AX: DestReg = X86::RAX; break;
13510 case X86::DX: DestReg = X86::RDX; break;
13511 case X86::CX: DestReg = X86::RCX; break;
13512 case X86::BX: DestReg = X86::RBX; break;
13513 case X86::SI: DestReg = X86::RSI; break;
13514 case X86::DI: DestReg = X86::RDI; break;
13515 case X86::BP: DestReg = X86::RBP; break;
13516 case X86::SP: DestReg = X86::RSP; break;
13519 Res.first = DestReg;
13520 Res.second = X86::GR64RegisterClass;
13523 } else if (Res.second == X86::FR32RegisterClass ||
13524 Res.second == X86::FR64RegisterClass ||
13525 Res.second == X86::VR128RegisterClass) {
13526 // Handle references to XMM physical registers that got mapped into the
13527 // wrong class. This can happen with constraints like {xmm0} where the
13528 // target independent register mapper will just pick the first match it can
13529 // find, ignoring the required type.
13530 if (VT == MVT::f32)
13531 Res.second = X86::FR32RegisterClass;
13532 else if (VT == MVT::f64)
13533 Res.second = X86::FR64RegisterClass;
13534 else if (X86::VR128RegisterClass->hasType(VT))
13535 Res.second = X86::VR128RegisterClass;