X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86FastISel.cpp;h=c5da0b9b164f6140f3d8c5b099bbef0686b4acb1;hb=b8f0d89d0584e37e205c04ed5753f57a23365403;hp=aafa96389acb6ec1cd40100972e3a13abbd91856;hpb=a6176adc8aad5d37268f40e49ae58c0eae2323c2;p=oota-llvm.git diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index aafa96389ac..c5da0b9b164 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -14,24 +14,24 @@ //===----------------------------------------------------------------------===// #include "X86.h" -#include "X86InstrBuilder.h" #include "X86ISelLowering.h" +#include "X86InstrBuilder.h" #include "X86RegisterInfo.h" #include "X86Subtarget.h" #include "X86TargetMachine.h" -#include "llvm/CallingConv.h" -#include "llvm/DerivedTypes.h" -#include "llvm/GlobalVariable.h" -#include "llvm/GlobalAlias.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Operator.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Operator.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/GetElementPtrTypeIterator.h" @@ -45,9 +45,9 @@ class X86FastISel : public FastISel { /// make the right decision when generating code for different targets. const X86Subtarget *Subtarget; - /// StackPtr - Register used as the stack pointer. + /// RegInfo - X86 register info. /// - unsigned StackPtr; + const X86RegisterInfo *RegInfo; /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 /// floating point ops. @@ -57,11 +57,13 @@ class X86FastISel : public FastISel { bool X86ScalarSSEf32; public: - explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) { + explicit X86FastISel(FunctionLoweringInfo &funcInfo, + const TargetLibraryInfo *libInfo) + : FastISel(funcInfo, libInfo) { Subtarget = &TM.getSubtarget(); - StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; - X86ScalarSSEf64 = Subtarget->hasSSE2() || Subtarget->hasAVX(); - X86ScalarSSEf32 = Subtarget->hasSSE1() || Subtarget->hasAVX(); + X86ScalarSSEf64 = Subtarget->hasSSE2(); + X86ScalarSSEf32 = Subtarget->hasSSE1(); + RegInfo = static_cast(TM.getRegisterInfo()); } virtual bool TargetSelectInstruction(const Instruction *I); @@ -73,6 +75,8 @@ public: virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, const LoadInst *LI); + virtual bool FastLowerArguments(); + #include "X86GenFastISel.inc" private: @@ -155,9 +159,9 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) { // For now, require SSE/SSE2 for performing floating-point operations, // since x87 requires additional work. if (VT == MVT::f64 && !X86ScalarSSEf64) - return false; + return false; if (VT == MVT::f32 && !X86ScalarSSEf32) - return false; + return false; // Similarly, no f80 support yet. if (VT == MVT::f80) return false; @@ -183,37 +187,37 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, case MVT::i1: case MVT::i8: Opc = X86::MOV8rm; - RC = X86::GR8RegisterClass; + RC = &X86::GR8RegClass; break; case MVT::i16: Opc = X86::MOV16rm; - RC = X86::GR16RegisterClass; + RC = &X86::GR16RegClass; break; case MVT::i32: Opc = X86::MOV32rm; - RC = X86::GR32RegisterClass; + RC = &X86::GR32RegClass; break; case MVT::i64: // Must be in x86-64 mode. Opc = X86::MOV64rm; - RC = X86::GR64RegisterClass; + RC = &X86::GR64RegClass; break; case MVT::f32: if (X86ScalarSSEf32) { Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; - RC = X86::FR32RegisterClass; + RC = &X86::FR32RegClass; } else { Opc = X86::LD_Fp32m; - RC = X86::RFP32RegisterClass; + RC = &X86::RFP32RegClass; } break; case MVT::f64: if (X86ScalarSSEf64) { Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm; - RC = X86::FR64RegisterClass; + RC = &X86::FR64RegClass; } else { Opc = X86::LD_Fp64m; - RC = X86::RFP64RegisterClass; + RC = &X86::RFP64RegClass; } break; case MVT::f80: @@ -240,7 +244,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) { default: return false; case MVT::i1: { // Mask out all but lowest bit. - unsigned AndResult = createResultReg(X86::GR8RegisterClass); + unsigned AndResult = createResultReg(&X86::GR8RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); Val = AndResult; @@ -258,6 +262,18 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) { Opc = X86ScalarSSEf64 ? (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m; break; + case MVT::v4f32: + Opc = X86::MOVAPSmr; + break; + case MVT::v2f64: + Opc = X86::MOVAPDmr; + break; + case MVT::v4i32: + case MVT::v2i64: + case MVT::v8i16: + case MVT::v16i8: + Opc = X86::MOVDQAmr; + break; } addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, @@ -283,7 +299,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, case MVT::i32: Opc = X86::MOV32mi; break; case MVT::i64: // Must be a 32-bit sign extended value. - if ((int)CI->getSExtValue() == CI->getSExtValue()) + if (isInt<32>(CI->getSExtValue())) Opc = X86::MOV64mi32; break; } @@ -312,12 +328,11 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned &ResultReg) { unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src, /*TODO: Kill=*/false); - - if (RR != 0) { - ResultReg = RR; - return true; - } else + if (RR == 0) return false; + + ResultReg = RR; + return true; } /// X86SelectAddress - Attempt to fill in an address from the given value. @@ -535,13 +550,13 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { if (TLI.getPointerTy() == MVT::i64) { Opc = X86::MOV64rm; - RC = X86::GR64RegisterClass; + RC = &X86::GR64RegClass; if (Subtarget->isPICStyleRIPRel()) StubAM.Base.Reg = X86::RIP; } else { Opc = X86::MOV32rm; - RC = X86::GR32RegisterClass; + RC = &X86::GR32RegClass; } LoadReg = createResultReg(RC); @@ -671,7 +686,14 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) { /// X86SelectStore - Select and emit code to implement store instructions. bool X86FastISel::X86SelectStore(const Instruction *I) { // Atomic stores need special handling. - if (cast(I)->isAtomic()) + const StoreInst *S = cast(I); + + if (S->isAtomic()) + return false; + + unsigned SABIAlignment = + TD.getABITypeAlignment(S->getValueOperand()->getType()); + if (S->getAlignment() != 0 && S->getAlignment() < SABIAlignment) return false; MVT VT; @@ -689,6 +711,8 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { bool X86FastISel::X86SelectRet(const Instruction *I) { const ReturnInst *Ret = cast(I); const Function &F = *I->getParent()->getParent(); + const X86MachineFunctionInfo *X86MFInfo = + FuncInfo.MF->getInfo(); if (!FuncInfo.CanLowerReturn) return false; @@ -703,28 +727,29 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { return false; // Don't handle popping bytes on return for now. - if (FuncInfo.MF->getInfo() - ->getBytesToPopOnReturn() != 0) - return 0; + if (X86MFInfo->getBytesToPopOnReturn() != 0) + return false; // fastcc with -tailcallopt is intended to provide a guaranteed // tail call optimization. Fastisel doesn't know how to do that. - if (CC == CallingConv::Fast && GuaranteedTailCallOpt) + if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) return false; // Let SDISel handle vararg functions. if (F.isVarArg()) return false; + // Build a list of return value registers. + SmallVector RetRegs; + if (Ret->getNumOperands() > 0) { SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), - Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, - I->getContext()); + I->getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86); const Value *RV = Ret->getOperand(0); @@ -784,12 +809,30 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); - // Mark the register as live out of the function. - MRI.addLiveOut(VA.getLocReg()); + // Add register to return instruction. + RetRegs.push_back(VA.getLocReg()); + } + + // The x86-64 ABI for returning structs by value requires that we copy + // the sret argument into %rax for the return. We saved the argument into + // a virtual register in the entry block, so now we copy the value out + // and into %rax. We also do the same with %eax for Win32. + if (F.hasStructRetAttr() && + (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { + unsigned Reg = X86MFInfo->getSRetReturnReg(); + assert(Reg && + "SRetReturnReg should have been set in LowerFormalArguments()!"); + unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + RetReg).addReg(Reg); + RetRegs.push_back(RetReg); } // Now emit the RET. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET)); + MachineInstrBuilder MIB = + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET)); + for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) + MIB.addReg(RetRegs[i], RegState::Implicit); return true; } @@ -818,8 +861,8 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) { static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) { bool HasAVX = Subtarget->hasAVX(); - bool X86ScalarSSEf32 = HasAVX || Subtarget->hasSSE1(); - bool X86ScalarSSEf64 = HasAVX || Subtarget->hasSSE2(); + bool X86ScalarSSEf32 = Subtarget->hasSSE1(); + bool X86ScalarSSEf64 = Subtarget->hasSSE2(); switch (VT.getSimpleVT().SimpleTy) { default: return 0; @@ -1239,7 +1282,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { if (V->getType()->isFloatTy()) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(X86::FR64RegisterClass); + unsigned ResultReg = createResultReg(&X86::FR64RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::CVTSS2SDrr), ResultReg) .addReg(OpReg); @@ -1258,7 +1301,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { if (V->getType()->isDoubleTy()) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(X86::FR32RegisterClass); + unsigned ResultReg = createResultReg(&X86::FR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::CVTSD2SSrr), ResultReg) .addReg(OpReg); @@ -1295,8 +1338,9 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { if (!Subtarget->is64Bit()) { // If we're on x86-32; we can't extract an i8 from a general register. // First issue a copy to GR16_ABCD or GR32_ABCD. - const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) - ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; + const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? + (const TargetRegisterClass*)&X86::GR16_ABCDRegClass : + (const TargetRegisterClass*)&X86::GR32_ABCDRegClass; unsigned CopyReg = createResultReg(CopyRC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg); @@ -1337,7 +1381,6 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM, else if (Len >= 2) VT = MVT::i16; else { - assert(Len == 1); VT = MVT::i8; } @@ -1404,7 +1447,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return DoSelectCall(&I, "memset"); } case Intrinsic::stackprotector: { - // Emit code inline code to store the stack guard onto the stack. + // Emit code to store the stack guard onto the stack. EVT PtrTy = TLI.getPointerTy(); const Value *Op1 = I.getArgOperand(0); // The guard's value. @@ -1465,7 +1508,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; // The call to CreateRegs builds two sequential registers, to store the - // both the the returned values. + // both the returned values. unsigned ResultReg = FuncInfo.CreateRegs(I.getType()); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg) .addReg(Reg1).addReg(Reg2); @@ -1481,6 +1524,81 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { } } +bool X86FastISel::FastLowerArguments() { + if (!FuncInfo.CanLowerReturn) + return false; + + if (Subtarget->isTargetWindows()) + return false; + + const Function *F = FuncInfo.Fn; + if (F->isVarArg()) + return false; + + CallingConv::ID CC = F->getCallingConv(); + if (CC != CallingConv::C) + return false; + + if (!Subtarget->is64Bit()) + return false; + + // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments. + unsigned Idx = 1; + for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); + I != E; ++I, ++Idx) { + if (Idx > 6) + return false; + + if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) || + F->getAttributes().hasAttribute(Idx, Attribute::InReg) || + F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || + F->getAttributes().hasAttribute(Idx, Attribute::Nest)) + return false; + + Type *ArgTy = I->getType(); + if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) + return false; + + EVT ArgVT = TLI.getValueType(ArgTy); + if (!ArgVT.isSimple()) return false; + switch (ArgVT.getSimpleVT().SimpleTy) { + case MVT::i32: + case MVT::i64: + break; + default: + return false; + } + } + + static const uint16_t GPR32ArgRegs[] = { + X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D + }; + static const uint16_t GPR64ArgRegs[] = { + X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9 + }; + + Idx = 0; + const TargetRegisterClass *RC32 = TLI.getRegClassFor(MVT::i32); + const TargetRegisterClass *RC64 = TLI.getRegClassFor(MVT::i64); + for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); + I != E; ++I, ++Idx) { + if (I->use_empty()) + continue; + bool is32Bit = TLI.getValueType(I->getType()) == MVT::i32; + const TargetRegisterClass *RC = is32Bit ? RC32 : RC64; + unsigned SrcReg = is32Bit ? GPR32ArgRegs[Idx] : GPR64ArgRegs[Idx]; + unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); + // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. + // Without this, EmitLiveInCopies may eliminate the livein if its only + // use is a bitcast (which isn't turned into an instruction). + unsigned ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + ResultReg).addReg(DstReg, getKillRegState(true)); + UpdateValueMap(I, ResultReg); + } + return true; +} + bool X86FastISel::X86SelectCall(const Instruction *I) { const CallInst *CI = cast(I); const Value *Callee = CI->getCalledValue(); @@ -1493,9 +1611,29 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (const IntrinsicInst *II = dyn_cast(CI)) return X86VisitIntrinsicCall(*II); + // Allow SelectionDAG isel to handle tail calls. + if (cast(I)->isTailCall()) + return false; + return DoSelectCall(I, 0); } +static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget, + const ImmutableCallSite &CS) { + if (Subtarget.is64Bit()) + return 0; + if (Subtarget.isTargetWindows()) + return 0; + CallingConv::ID CC = CS.getCallingConv(); + if (CC == CallingConv::Fast || CC == CallingConv::GHC) + return 0; + if (!CS.paramHasAttr(1, Attribute::StructRet)) + return 0; + if (CS.paramHasAttr(1, Attribute::InReg)) + return 0; + return 4; +} + // Select either a call, or an llvm.memcpy/memmove/memset intrinsic bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { const CallInst *CI = cast(I); @@ -1510,7 +1648,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // fastcc with -tailcallopt is intended to provide a guaranteed // tail call optimization. Fastisel doesn't know how to do that. - if (CC == CallingConv::Fast && GuaranteedTailCallOpt) + if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) return false; PointerType *PT = cast(CS.getCalledValue()->getType()); @@ -1524,17 +1662,15 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Fast-isel doesn't know about callee-pop yet. if (X86::isCalleePop(CC, Subtarget->is64Bit(), isVarArg, - GuaranteedTailCallOpt)) + TM.Options.GuaranteedTailCallOpt)) return false; // Check whether the function can return without sret-demotion. SmallVector Outs; - SmallVector Offsets; - GetReturnInfo(I->getType(), CS.getAttributes().getRetAttributes(), - Outs, TLI, &Offsets); + GetReturnInfo(I->getType(), CS.getAttributes(), Outs, TLI); bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(), - *FuncInfo.MF, FTy->isVarArg(), - Outs, FTy->getContext()); + *FuncInfo.MF, FTy->isVarArg(), + Outs, FTy->getContext()); if (!CanLowerReturn) return false; @@ -1557,10 +1693,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { SmallVector Args; SmallVector ArgVTs; SmallVector ArgFlags; - Args.reserve(CS.arg_size()); - ArgVals.reserve(CS.arg_size()); - ArgVTs.reserve(CS.arg_size()); - ArgFlags.reserve(CS.arg_size()); + unsigned arg_size = CS.arg_size(); + Args.reserve(arg_size); + ArgVals.reserve(arg_size); + ArgVTs.reserve(arg_size); + ArgFlags.reserve(arg_size); for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) { // If we're lowering a mem intrinsic instead of a regular call, skip the @@ -1647,7 +1784,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, - I->getParent()->getContext()); + I->getParent()->getContext()); // Allocate shadow area for Win64 if (Subtarget->isTargetWin64()) @@ -1673,7 +1810,6 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Promote the value if needed. switch (VA.getLocInfo()) { - default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: { assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && @@ -1717,6 +1853,14 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { ArgVT = VA.getLocVT(); break; } + case CCValAssign::VExt: + // VExt has not been implemented, so this should be impossible to reach + // for now. However, fallback to Selection DAG isel once implemented. + return false; + case CCValAssign::Indirect: + // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully + // support this. + return false; } if (VA.isRegLoc()) { @@ -1726,7 +1870,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { } else { unsigned LocMemOffset = VA.getLocMemOffset(); X86AddressMode AM; - AM.Base.Reg = StackPtr; + AM.Base.Reg = RegInfo->getStackRegister(); AM.Disp = LocMemOffset; const Value *ArgVal = ArgVals[VA.getValNo()]; ISD::ArgFlagsTy Flags = ArgFlags[VA.getValNo()]; @@ -1738,11 +1882,13 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { assert(Res && "memcpy length already checked!"); (void)Res; } else if (isa(ArgVal) || isa(ArgVal)) { // If this is a really simple value, emit this with the Value* version - //of X86FastEmitStore. If it isn't simple, we don't want to do this, + // of X86FastEmitStore. If it isn't simple, we don't want to do this, // as it can cause us to reevaluate the argument. - X86FastEmitStore(ArgVT, ArgVal, AM); + if (!X86FastEmitStore(ArgVT, ArgVal, AM)) + return false; } else { - X86FastEmitStore(ArgVT, Arg, AM); + if (!X86FastEmitStore(ArgVT, Arg, AM)) + return false; } } } @@ -1757,7 +1903,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) { // Count the number of XMM registers allocated. - static const unsigned XMMArgRegs[] = { + static const uint16_t XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 }; @@ -1771,9 +1917,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { if (CalleeOp) { // Register-indirect call. unsigned CallOpc; - if (Subtarget->isTargetWin64()) - CallOpc = X86::WINCALL64r; - else if (Subtarget->is64Bit()) + if (Subtarget->is64Bit()) CallOpc = X86::CALL64r; else CallOpc = X86::CALL32r; @@ -1784,9 +1928,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Direct call. assert(GV && "Not a direct call"); unsigned CallOpc; - if (Subtarget->isTargetWin64()) - CallOpc = X86::WINCALL64pcrel32; - else if (Subtarget->is64Bit()) + if (Subtarget->is64Bit()) CallOpc = X86::CALL64pcrel32; else CallOpc = X86::CALLpcrel32; @@ -1820,22 +1962,24 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { MIB.addGlobalAddress(GV, 0, OpFlags); } + // Add a register mask with the call-preserved registers. + // Proper defs for return values will be added by setPhysRegsDeadExcept(). + MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv())); + // Add an implicit use GOT pointer in EBX. if (Subtarget->isPICStyleGOT()) - MIB.addReg(X86::EBX); + MIB.addReg(X86::EBX, RegState::Implicit); if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) - MIB.addReg(X86::AL); + MIB.addReg(X86::AL, RegState::Implicit); // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) - MIB.addReg(RegArgs[i]); + MIB.addReg(RegArgs[i], RegState::Implicit); // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - unsigned NumBytesCallee = 0; - if (!Subtarget->is64Bit() && CS.paramHasAttr(1, Attribute::StructRet)) - NumBytesCallee = 4; + const unsigned NumBytesCallee = computeBytesPoppedByCallee(*Subtarget, CS); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(NumBytesCallee); @@ -1846,11 +1990,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { ComputeValueVTs(TLI, I->getType(), RetTys); for (unsigned i = 0, e = RetTys.size(); i != e; ++i) { EVT VT = RetTys[i]; - EVT RegisterVT = TLI.getRegisterType(I->getParent()->getContext(), VT); + MVT RegisterVT = TLI.getRegisterType(I->getParent()->getContext(), VT); unsigned NumRegs = TLI.getNumRegisters(I->getParent()->getContext(), VT); for (unsigned j = 0; j != NumRegs; ++j) { ISD::InputArg MyFlags; - MyFlags.VT = RegisterVT.getSimpleVT(); + MyFlags.VT = RegisterVT; MyFlags.Used = !CS.getInstruction()->use_empty(); if (CS.paramHasAttr(0, Attribute::SExt)) MyFlags.Flags.setSExt(); @@ -1866,7 +2010,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { SmallVector UsedRegs; SmallVector RVLocs; CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs, - I->getParent()->getContext()); + I->getParent()->getContext()); unsigned ResultReg = FuncInfo.CreateRegs(I->getType()); CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86); for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -1880,7 +2024,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { RVLocs[i].getLocReg() == X86::ST1)) { if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) { CopyVT = MVT::f80; - CopyReg = createResultReg(X86::RFP80RegisterClass); + CopyReg = createResultReg(&X86::RFP80RegClass); } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::FpPOP_RETVAL), CopyReg); @@ -1969,51 +2113,55 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { MVT VT; if (!isTypeLegal(C->getType(), VT)) - return false; + return 0; + + // Can't handle alternate code models yet. + if (TM.getCodeModel() != CodeModel::Small) + return 0; // Get opcode and regclass of the output for the given load instruction. unsigned Opc = 0; const TargetRegisterClass *RC = NULL; switch (VT.SimpleTy) { - default: return false; + default: return 0; case MVT::i8: Opc = X86::MOV8rm; - RC = X86::GR8RegisterClass; + RC = &X86::GR8RegClass; break; case MVT::i16: Opc = X86::MOV16rm; - RC = X86::GR16RegisterClass; + RC = &X86::GR16RegClass; break; case MVT::i32: Opc = X86::MOV32rm; - RC = X86::GR32RegisterClass; + RC = &X86::GR32RegClass; break; case MVT::i64: // Must be in x86-64 mode. Opc = X86::MOV64rm; - RC = X86::GR64RegisterClass; + RC = &X86::GR64RegClass; break; case MVT::f32: if (X86ScalarSSEf32) { Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; - RC = X86::FR32RegisterClass; + RC = &X86::FR32RegClass; } else { Opc = X86::LD_Fp32m; - RC = X86::RFP32RegisterClass; + RC = &X86::RFP32RegClass; } break; case MVT::f64: if (X86ScalarSSEf64) { Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm; - RC = X86::FR64RegisterClass; + RC = &X86::FR64RegClass; } else { Opc = X86::LD_Fp64m; - RC = X86::RFP64RegisterClass; + RC = &X86::RFP64RegClass; } break; case MVT::f80: // No f80 support yet. - return false; + return 0; } // Materialize addresses with LEA instructions. @@ -2081,7 +2229,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { if (!X86SelectAddress(C, AM)) return 0; unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; - TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); + const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); unsigned ResultReg = createResultReg(RC); addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg), AM); @@ -2091,34 +2239,34 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { MVT VT; if (!isTypeLegal(CF->getType(), VT)) - return false; + return 0; // Get opcode and regclass for the given zero. unsigned Opc = 0; const TargetRegisterClass *RC = NULL; switch (VT.SimpleTy) { - default: return false; - case MVT::f32: - if (X86ScalarSSEf32) { - Opc = Subtarget->hasAVX() ? X86::VFsFLD0SS : X86::FsFLD0SS; - RC = X86::FR32RegisterClass; - } else { - Opc = X86::LD_Fp032; - RC = X86::RFP32RegisterClass; - } - break; - case MVT::f64: - if (X86ScalarSSEf64) { - Opc = Subtarget->hasAVX() ? X86::VFsFLD0SD : X86::FsFLD0SD; - RC = X86::FR64RegisterClass; - } else { - Opc = X86::LD_Fp064; - RC = X86::RFP64RegisterClass; - } - break; - case MVT::f80: - // No f80 support yet. - return false; + default: return 0; + case MVT::f32: + if (X86ScalarSSEf32) { + Opc = X86::FsFLD0SS; + RC = &X86::FR32RegClass; + } else { + Opc = X86::LD_Fp032; + RC = &X86::RFP32RegClass; + } + break; + case MVT::f64: + if (X86ScalarSSEf64) { + Opc = X86::FsFLD0SD; + RC = &X86::FR64RegClass; + } else { + Opc = X86::LD_Fp064; + RC = &X86::RFP64RegClass; + } + break; + case MVT::f80: + // No f80 support yet. + return 0; } unsigned ResultReg = createResultReg(RC); @@ -2137,7 +2285,7 @@ bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, if (!X86SelectAddress(LI->getOperand(0), AM)) return false; - X86InstrInfo &XII = (X86InstrInfo&)TII; + const X86InstrInfo &XII = (const X86InstrInfo&)TII; unsigned Size = TD.getTypeAllocSize(LI->getType()); unsigned Alignment = LI->getAlignment(); @@ -2156,7 +2304,8 @@ bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, namespace llvm { - llvm::FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) { - return new X86FastISel(funcInfo); + FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo, + const TargetLibraryInfo *libInfo) { + return new X86FastISel(funcInfo, libInfo); } }