X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86FastISel.cpp;h=ee6643336fd4f092c0126a72faec4af8959bb486;hb=d9e3385ced2dc887e2fe8e1c071bd2611e4d3ede;hp=15553c25110cb0d878ea9536ad4a78ae26b60d9d;hpb=34dcc6fadca0a1117cdbd0e9b35c991a55b6e556;p=oota-llvm.git diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 15553c25110..ee6643336fd 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -23,7 +23,9 @@ #include "llvm/GlobalVariable.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" +#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -52,20 +54,7 @@ class X86FastISel : public FastISel { bool X86ScalarSSEf32; public: - explicit X86FastISel(MachineFunction &mf, - DenseMap &vm, - DenseMap &bm, - DenseMap &am, - std::vector > &pn -#ifndef NDEBUG - , SmallSet &cil -#endif - ) - : FastISel(mf, vm, bm, am, pn -#ifndef NDEBUG - , cil -#endif - ) { + explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) { Subtarget = &TM.getSubtarget(); StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; X86ScalarSSEf64 = Subtarget->hasSSE2(); @@ -74,6 +63,13 @@ public: virtual bool TargetSelectInstruction(const Instruction *I); + /// TryToFoldLoad - The specified machine instr operand is a vreg, and that + /// vreg is being provided by the specified load instruction. If possible, + /// try to fold the load as an operand to the instruction, returning true if + /// possible. + virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI); + #include "X86GenFastISel.inc" private: @@ -96,6 +92,8 @@ private: bool X86SelectStore(const Instruction *I); + bool X86SelectRet(const Instruction *I); + bool X86SelectCmp(const Instruction *I); bool X86SelectZExt(const Instruction *I); @@ -116,8 +114,6 @@ private: bool X86VisitIntrinsicCall(const IntrinsicInst &I); bool X86SelectCall(const Instruction *I); - CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false); - const X86InstrInfo *getInstrInfo() const { return getTargetMachine()->getInstrInfo(); } @@ -136,17 +132,18 @@ private: (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 } - bool isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1 = false); + bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false); }; } // end anonymous namespace. -bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) { - VT = TLI.getValueType(Ty, /*HandleUnknown=*/true); - if (VT == MVT::Other || !VT.isSimple()) +bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) { + EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true); + if (evt == MVT::Other || !evt.isSimple()) // Unhandled type. Halt "fast" selection and bail. return false; - + + VT = evt.getSimpleVT(); // For now, require SSE/SSE2 for performing floating-point operations, // since x87 requires additional work. if (VT == MVT::f64 && !X86ScalarSSEf64) @@ -165,29 +162,6 @@ bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) { #include "X86GenCallingConv.inc" -/// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling -/// convention. -CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC, - bool isTaillCall) { - if (Subtarget->is64Bit()) { - if (CC == CallingConv::GHC) - return CC_X86_64_GHC; - else if (Subtarget->isTargetWin64()) - return CC_X86_Win64_C; - else - return CC_X86_64_C; - } - - if (CC == CallingConv::X86_FastCall) - return CC_X86_32_FastCall; - else if (CC == CallingConv::Fast) - return CC_X86_32_FastCC; - else if (CC == CallingConv::GHC) - return CC_X86_32_GHC; - else - return CC_X86_32_C; -} - /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. /// Return true and the result register by reference if it is possible. @@ -240,7 +214,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, } ResultReg = createResultReg(RC); - addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc), ResultReg), AM); return true; } @@ -259,7 +234,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, case MVT::i1: { // Mask out all but lowest bit. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(MBB, DL, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); Val = AndResult; } @@ -276,7 +251,8 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, break; } - addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc)), AM).addReg(Val); return true; } @@ -304,7 +280,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, } if (Opc) { - addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM) + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc)), AM) .addImm(Signed ? (uint64_t) CI->getSExtValue() : CI->getZExtValue()); return true; @@ -324,7 +301,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, unsigned &ResultReg) { - unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src); + unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, + Src, /*TODO: Kill=*/false); if (RR != 0) { ResultReg = RR; @@ -339,13 +317,25 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { const User *U = NULL; unsigned Opcode = Instruction::UserOp1; if (const Instruction *I = dyn_cast(V)) { - Opcode = I->getOpcode(); - U = I; + // Don't walk into other basic blocks; it's possible we haven't + // visited them yet, so the instructions may not yet be assigned + // virtual registers. + if (FuncInfo.StaticAllocaMap.count(static_cast(V)) || + FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { + Opcode = I->getOpcode(); + U = I; + } } else if (const ConstantExpr *C = dyn_cast(V)) { Opcode = C->getOpcode(); U = C; } + if (const PointerType *Ty = dyn_cast(V->getType())) + if (Ty->getAddressSpace() > 255) + // Fast instruction selection doesn't support the special + // address spaces. + return false; + switch (Opcode) { default: break; case Instruction::BitCast: @@ -367,8 +357,9 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { case Instruction::Alloca: { // Do static allocas. const AllocaInst *A = cast(V); - DenseMap::iterator SI = StaticAllocaMap.find(A); - if (SI != StaticAllocaMap.end()) { + DenseMap::iterator SI = + FuncInfo.StaticAllocaMap.find(A); + if (SI != FuncInfo.StaticAllocaMap.end()) { AM.BaseType = X86AddressMode::FrameIndexBase; AM.Base.FrameIndex = SI->second; return true; @@ -408,20 +399,33 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { Disp += SL->getElementOffset(Idx); } else { uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); - if (const ConstantInt *CI = dyn_cast(Op)) { - // Constant-offset addressing. - Disp += CI->getSExtValue() * S; - } else if (IndexReg == 0 && - (!AM.GV || !Subtarget->isPICStyleRIPRel()) && - (S == 1 || S == 2 || S == 4 || S == 8)) { - // Scaled-index addressing. - Scale = S; - IndexReg = getRegForGEPIndex(Op); - if (IndexReg == 0) - return false; - } else - // Unsupported. - goto unsupported_gep; + SmallVector Worklist; + Worklist.push_back(Op); + do { + Op = Worklist.pop_back_val(); + if (const ConstantInt *CI = dyn_cast(Op)) { + // Constant-offset addressing. + Disp += CI->getSExtValue() * S; + } else if (isa(Op) && + isa(cast(Op)->getOperand(1))) { + // An add with a constant operand. Fold the constant. + ConstantInt *CI = + cast(cast(Op)->getOperand(1)); + Disp += CI->getSExtValue() * S; + // Add the other operand back to the work list. + Worklist.push_back(cast(Op)->getOperand(0)); + } else if (IndexReg == 0 && + (!AM.GV || !Subtarget->isPICStyleRIPRel()) && + (S == 1 || S == 2 || S == 4 || S == 8)) { + // Scaled-index addressing. + Scale = S; + IndexReg = getRegForGEPIndex(Op).first; + if (IndexReg == 0) + return false; + } else + // Unsupported. + goto unsupported_gep; + } while (!Worklist.empty()); } } // Check for displacement overflow. @@ -470,7 +474,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { // If this reference is relative to the pic base, set it now. if (isGlobalRelativeToPICBase(GVFlags)) { // FIXME: How do we know Base.Reg is free?? - AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF); + AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); } // Unless the ABI requires an extra load, return a direct reference to @@ -501,6 +505,9 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { StubAM.GV = GV; StubAM.GVOpFlags = GVFlags; + // Prepare for inserting code in the local-value area. + SavePoint SaveInsertPt = enterLocalValueArea(); + if (TLI.getPointerTy() == MVT::i64) { Opc = X86::MOV64rm; RC = X86::GR64RegisterClass; @@ -513,8 +520,13 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { } LoadReg = createResultReg(RC); - addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM); - + MachineInstrBuilder LoadMI = + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg); + addFullAddress(LoadMI, StubAM); + + // Ok, back to normal mode. + leaveLocalValueArea(SaveInsertPt); + // Prevent loading GV stub multiple times in same MBB. LocalValueMap[V] = LoadReg; } @@ -628,7 +640,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) { /// X86SelectStore - Select and emit code to implement store instructions. bool X86FastISel::X86SelectStore(const Instruction *I) { - EVT VT; + MVT VT; if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true)) return false; @@ -639,10 +651,97 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { return X86FastEmitStore(VT, I->getOperand(0), AM); } +/// X86SelectRet - Select and emit code to implement ret instructions. +bool X86FastISel::X86SelectRet(const Instruction *I) { + const ReturnInst *Ret = cast(I); + const Function &F = *I->getParent()->getParent(); + + if (!FuncInfo.CanLowerReturn) + return false; + + CallingConv::ID CC = F.getCallingConv(); + if (CC != CallingConv::C && + CC != CallingConv::Fast && + CC != CallingConv::X86_FastCall) + return false; + + if (Subtarget->isTargetWin64()) + return false; + + // Don't handle popping bytes on return for now. + if (FuncInfo.MF->getInfo() + ->getBytesToPopOnReturn() != 0) + return 0; + + // fastcc with -tailcallopt is intended to provide a guaranteed + // tail call optimization. Fastisel doesn't know how to do that. + if (CC == CallingConv::Fast && GuaranteedTailCallOpt) + return false; + + // Let SDISel handle vararg functions. + if (F.isVarArg()) + return false; + + if (Ret->getNumOperands() > 0) { + SmallVector Outs; + GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), + Outs, TLI); + + // Analyze operands of the call, assigning locations to each operand. + SmallVector ValLocs; + CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); + CCInfo.AnalyzeReturn(Outs, RetCC_X86); + + const Value *RV = Ret->getOperand(0); + unsigned Reg = getRegForValue(RV); + if (Reg == 0) + return false; + + // Only handle a single return value for now. + if (ValLocs.size() != 1) + return false; + + CCValAssign &VA = ValLocs[0]; + + // Don't bother handling odd stuff for now. + if (VA.getLocInfo() != CCValAssign::Full) + return false; + // Only handle register returns for now. + if (!VA.isRegLoc()) + return false; + // TODO: For now, don't try to handle cases where getLocInfo() + // says Full but the types don't match. + if (TLI.getValueType(RV->getType()) != VA.getValVT()) + return false; + + // The calling-convention tables for x87 returns don't tell + // the whole story. + if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) + return false; + + // Make the copy. + unsigned SrcReg = Reg + VA.getValNo(); + unsigned DstReg = VA.getLocReg(); + const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); + // Avoid a cross-class copy. This is very unlikely. + if (!SrcRC->contains(DstReg)) + return false; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + DstReg).addReg(SrcReg); + + // Mark the register as live out of the function. + MRI.addLiveOut(VA.getLocReg()); + } + + // Now emit the RET. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET)); + return true; +} + /// X86SelectLoad - Select and emit code to implement load instructions. /// bool X86FastISel::X86SelectLoad(const Instruction *I) { - EVT VT; + MVT VT; if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true)) return false; @@ -658,15 +757,15 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) { return false; } -static unsigned X86ChooseCmpOpcode(EVT VT) { +static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) { switch (VT.getSimpleVT().SimpleTy) { default: return 0; case MVT::i8: return X86::CMP8rr; case MVT::i16: return X86::CMP16rr; case MVT::i32: return X86::CMP32rr; case MVT::i64: return X86::CMP64rr; - case MVT::f32: return X86::UCOMISSrr; - case MVT::f64: return X86::UCOMISDrr; + case MVT::f32: return Subtarget->hasSSE1() ? X86::UCOMISSrr : 0; + case MVT::f64: return Subtarget->hasSSE2() ? X86::UCOMISDrr : 0; } } @@ -703,18 +802,21 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, // CMPri, otherwise use CMPrr. if (const ConstantInt *Op1C = dyn_cast(Op1)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { - BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg) - .addImm(Op1C->getSExtValue()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc)) + .addReg(Op0Reg) + .addImm(Op1C->getSExtValue()); return true; } } - unsigned CompareOpc = X86ChooseCmpOpcode(VT); + unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget); if (CompareOpc == 0) return false; unsigned Op1Reg = getRegForValue(Op1); if (Op1Reg == 0) return false; - BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc)) + .addReg(Op0Reg) + .addReg(Op1Reg); return true; } @@ -722,7 +824,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, bool X86FastISel::X86SelectCmp(const Instruction *I) { const CmpInst *CI = cast(I); - EVT VT; + MVT VT; if (!isTypeLegal(I->getOperand(0)->getType(), VT)) return false; @@ -736,9 +838,10 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned EReg = createResultReg(&X86::GR8RegClass); unsigned NPReg = createResultReg(&X86::GR8RegClass); - BuildMI(MBB, DL, TII.get(X86::SETEr), EReg); - BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg); - BuildMI(MBB, DL, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::SETNPr), NPReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); UpdateValueMap(I, ResultReg); return true; @@ -749,9 +852,13 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned NEReg = createResultReg(&X86::GR8RegClass); unsigned PReg = createResultReg(&X86::GR8RegClass); - BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg); - BuildMI(MBB, DL, TII.get(X86::SETPr), PReg); - BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::SETNEr), NEReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::SETPr), PReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::OR8rr), ResultReg) + .addReg(PReg).addReg(NEReg); UpdateValueMap(I, ResultReg); return true; } @@ -790,7 +897,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg); UpdateValueMap(I, ResultReg); return true; } @@ -802,7 +909,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { unsigned ResultReg = getRegForValue(I->getOperand(0)); if (ResultReg == 0) return false; // Set the high bits to zero. - ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg); + ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); if (ResultReg == 0) return false; UpdateValueMap(I, ResultReg); return true; @@ -816,17 +923,19 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // Unconditional branches are selected by tablegen-generated code. // Handle a conditional branch. const BranchInst *BI = cast(I); - MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)]; - MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)]; + MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; + MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; - // Fold the common case of a conditional branch with a comparison. + // Fold the common case of a conditional branch with a comparison + // in the same block (values defined on other blocks may not have + // initialized registers). if (const CmpInst *CI = dyn_cast(BI->getCondition())) { - if (CI->hasOneUse()) { + if (CI->hasOneUse() && CI->getParent() == I->getParent()) { EVT VT = TLI.getValueType(CI->getOperand(0)->getType()); // Try to take advantage of fallthrough opportunities. CmpInst::Predicate Predicate = CI->getPredicate(); - if (MBB->isLayoutSuccessor(TrueMBB)) { + if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { std::swap(TrueMBB, FalseMBB); Predicate = CmpInst::getInversePredicate(Predicate); } @@ -875,16 +984,18 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc)) + .addMBB(TrueMBB); if (Predicate == CmpInst::FCMP_UNE) { // X86 requires a second branch to handle UNE (and OEQ, // which is mapped to UNE above). - BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4)) + .addMBB(TrueMBB); } - FastEmitBranch(FalseMBB); - MBB->addSuccessor(TrueMBB); + FastEmitBranch(FalseMBB, DL); + FuncInfo.MBB->addSuccessor(TrueMBB); return true; } } else if (ExtractValueInst *EI = @@ -907,17 +1018,16 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow || CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) { const MachineInstr *SetMI = 0; - unsigned Reg = lookUpRegForValue(EI); + unsigned Reg = getRegForValue(EI); for (MachineBasicBlock::const_reverse_iterator - RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) { + RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend(); + RI != RE; ++RI) { const MachineInstr &MI = *RI; - if (MI.modifiesRegister(Reg)) { - unsigned Src, Dst, SrcSR, DstSR; - - if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) { - Reg = Src; + if (MI.definesRegister(Reg)) { + if (MI.isCopy()) { + Reg = MI.getOperand(1).getReg(); continue; } @@ -935,11 +1045,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpCode = SetMI->getOpcode(); if (OpCode == X86::SETOr || OpCode == X86::SETBr) { - BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? - X86::JO_4 : X86::JB_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4)) .addMBB(TrueMBB); - FastEmitBranch(FalseMBB); - MBB->addSuccessor(TrueMBB); + FastEmitBranch(FalseMBB, DL); + FuncInfo.MBB->addSuccessor(TrueMBB); return true; } } @@ -951,10 +1061,12 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpReg = getRegForValue(BI->getCondition()); if (OpReg == 0) return false; - BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg); - BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB); - FastEmitBranch(FalseMBB); - MBB->addSuccessor(TrueMBB); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) + .addReg(OpReg).addReg(OpReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4)) + .addMBB(TrueMBB); + FastEmitBranch(FalseMBB, DL); + FuncInfo.MBB->addSuccessor(TrueMBB); return true; } @@ -1001,8 +1113,8 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { return false; } - EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); - if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) + MVT VT; + if (!isTypeLegal(I->getType(), VT)) return false; unsigned Op0Reg = getRegForValue(I->getOperand(0)); @@ -1011,7 +1123,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { // Fold immediate in shl(x,3). if (const ConstantInt *CI = dyn_cast(I->getOperand(1))) { unsigned ResultReg = createResultReg(RC); - BuildMI(MBB, DL, TII.get(OpImm), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm), ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff); UpdateValueMap(I, ResultReg); return true; @@ -1019,35 +1131,40 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { unsigned Op1Reg = getRegForValue(I->getOperand(1)); if (Op1Reg == 0) return false; - TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + CReg).addReg(Op1Reg); // The shift instruction uses X86::CL. If we defined a super-register - // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what - // we're doing here. + // of X86::CL, emit a subreg KILL to precisely describe what we're doing here. if (CReg != X86::CL) - BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL) - .addReg(CReg).addImm(X86::SUBREG_8BIT); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(TargetOpcode::KILL), X86::CL) + .addReg(CReg, RegState::Kill); unsigned ResultReg = createResultReg(RC); - BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg) + .addReg(Op0Reg); UpdateValueMap(I, ResultReg); return true; } bool X86FastISel::X86SelectSelect(const Instruction *I) { - EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); - if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) + MVT VT; + if (!isTypeLegal(I->getType(), VT)) return false; + // We only use cmov here, if we don't have a cmov instruction bail. + if (!Subtarget->hasCMov()) return false; + unsigned Opc = 0; const TargetRegisterClass *RC = NULL; - if (VT.getSimpleVT() == MVT::i16) { + if (VT == MVT::i16) { Opc = X86::CMOVE16rr; RC = &X86::GR16RegClass; - } else if (VT.getSimpleVT() == MVT::i32) { + } else if (VT == MVT::i32) { Opc = X86::CMOVE32rr; RC = &X86::GR32RegClass; - } else if (VT.getSimpleVT() == MVT::i64) { + } else if (VT == MVT::i64) { Opc = X86::CMOVE64rr; RC = &X86::GR64RegClass; } else { @@ -1061,9 +1178,11 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { unsigned Op2Reg = getRegForValue(I->getOperand(2)); if (Op2Reg == 0) return false; - BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) + .addReg(Op0Reg).addReg(Op0Reg); unsigned ResultReg = createResultReg(RC); - BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) + .addReg(Op1Reg).addReg(Op2Reg); UpdateValueMap(I, ResultReg); return true; } @@ -1077,7 +1196,9 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR64RegisterClass); - BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::CVTSS2SDrr), ResultReg) + .addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1094,7 +1215,9 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR32RegisterClass); - BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::CVTSD2SSrr), ResultReg) + .addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1125,15 +1248,16 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { return false; // First issue a copy to GR16_ABCD or GR32_ABCD. - unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr; const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; unsigned CopyReg = createResultReg(CopyRC); - BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + CopyReg).addReg(InputReg); // Then issue an extract_subreg. unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, - CopyReg, X86::SUBREG_8BIT); + CopyReg, /*Kill=*/true, + X86::sub_8bit); if (!ResultReg) return false; @@ -1149,14 +1273,18 @@ bool X86FastISel::X86SelectExtractValue(const Instruction *I) { switch (CI->getIntrinsicID()) { default: break; case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: + case Intrinsic::uadd_with_overflow: { // Cheat a little. We know that the registers for "add" and "seto" are // allocated sequentially. However, we only keep track of the register // for "add" in the value map. Use extractvalue's index to get the // correct register for "seto". - UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin()); + unsigned OpReg = getRegForValue(Agg); + if (OpReg == 0) + return false; + UpdateValueMap(I, OpReg + *EI->idx_begin()); return true; } + } } return false; @@ -1170,8 +1298,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { // Emit code inline code to store the stack guard onto the stack. EVT PtrTy = TLI.getPointerTy(); - const Value *Op1 = I.getOperand(1); // The guard's value. - const AllocaInst *Slot = cast(I.getOperand(2)); + const Value *Op1 = I.getArgOperand(0); // The guard's value. + const AllocaInst *Slot = cast(I.getArgOperand(1)); // Grab the frame index. X86AddressMode AM; @@ -1182,12 +1310,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return true; } case Intrinsic::objectsize: { - ConstantInt *CI = dyn_cast(I.getOperand(2)); + ConstantInt *CI = dyn_cast(I.getArgOperand(1)); const Type *Ty = I.getCalledFunction()->getReturnType(); assert(CI && "Non-constant type in Intrinsic::objectsize?"); - EVT VT; + MVT VT; if (!isTypeLegal(Ty, VT)) return false; @@ -1200,8 +1328,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(MBB, DL, TII.get(OpC), ResultReg). - addImm(CI->getZExtValue() == 0 ? -1ULL : 0); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg). + addImm(CI->isZero() ? -1ULL : 0); UpdateValueMap(&I, ResultReg); return true; } @@ -1214,12 +1342,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); // FIXME may need to add RegState::Debug to any registers produced, // although ESP/EBP should be the only ones at the moment. - addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0). - addMetadata(DI->getVariable()); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM). + addImm(0).addMetadata(DI->getVariable()); return true; } case Intrinsic::trap: { - BuildMI(MBB, DL, TII.get(X86::TRAP)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP)); return true; } case Intrinsic::sadd_with_overflow: @@ -1233,12 +1361,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { const Type *RetTy = cast(Callee->getReturnType())->getTypeAtIndex(unsigned(0)); - EVT VT; + MVT VT; if (!isTypeLegal(RetTy, VT)) return false; - const Value *Op1 = I.getOperand(1); - const Value *Op2 = I.getOperand(2); + const Value *Op1 = I.getArgOperand(0); + const Value *Op2 = I.getArgOperand(1); unsigned Reg1 = getRegForValue(Op1); unsigned Reg2 = getRegForValue(Op2); @@ -1255,7 +1383,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg) + .addReg(Reg1).addReg(Reg2); unsigned DestReg1 = UpdateValueMap(&I, ResultReg); // If the add with overflow is an intra-block value then we just want to @@ -1273,7 +1402,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { unsigned Opc = X86::SETBr; if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow) Opc = X86::SETOr; - BuildMI(MBB, DL, TII.get(Opc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg); return true; } } @@ -1281,7 +1410,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { bool X86FastISel::X86SelectCall(const Instruction *I) { const CallInst *CI = cast(I); - const Value *Callee = I->getOperand(0); + const Value *Callee = CI->getCalledValue(); // Can't handle inline asm yet. if (isa(Callee)) @@ -1310,9 +1439,13 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (FTy->isVarArg()) return false; + // Fast-isel doesn't know about callee-pop yet. + if (Subtarget->IsCalleePop(FTy->isVarArg(), CC)) + return false; + // Handle *simple* calls for now. const Type *RetTy = CS.getType(); - EVT RetVT; + MVT RetVT; if (RetTy->isVoidTy()) RetVT = MVT::isVoid; else if (!isTypeLegal(RetTy, RetVT, true)) @@ -1342,7 +1475,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Deal with call operands first. SmallVector ArgVals; SmallVector Args; - SmallVector ArgVTs; + SmallVector ArgVTs; SmallVector ArgFlags; Args.reserve(CS.arg_size()); ArgVals.reserve(CS.arg_size()); @@ -1368,7 +1501,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { return false; const Type *ArgTy = (*i)->getType(); - EVT ArgVT; + MVT ArgVT; if (!isTypeLegal(ArgTy, ArgVT)) return false; unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); @@ -1383,14 +1516,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext()); - CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC)); + + // Allocate shadow area for Win64 + if (Subtarget->isTargetWin64()) { + CCInfo.AllocateStack(32, 8); + } + + CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86); // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = CCInfo.getNextStackOffset(); // Issue CALLSEQ_START unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); - BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown)) + .addImm(NumBytes); // Process argument: walk the register/memloc assignments, inserting // copies / loads. @@ -1421,6 +1561,9 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { break; } case CCValAssign::AExt: { + // We don't handle MMX parameters yet. + if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() == 128) + return false; bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), Arg, ArgVT, Arg); if (!Emitted) @@ -1435,8 +1578,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { break; } case CCValAssign::BCvt: { - unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT().getSimpleVT(), - ISD::BIT_CONVERT, Arg); + unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(), + ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false); assert(BC != 0 && "Failed to emit a bitcast!"); Arg = BC; ArgVT = VA.getLocVT(); @@ -1445,11 +1588,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } if (VA.isRegLoc()) { - TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT); - bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(), - Arg, RC, RC, DL); - assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; - Emitted = true; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + VA.getLocReg()).addReg(Arg); RegArgs.push_back(VA.getLocReg()); } else { unsigned LocMemOffset = VA.getLocMemOffset(); @@ -1471,26 +1611,35 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // ELF / PIC requires GOT in the EBX register before function calls via PLT // GOT pointer. if (Subtarget->isPICStyleGOT()) { - TargetRegisterClass *RC = X86::GR32RegisterClass; - unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF); - bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC, - DL); - assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; - Emitted = true; + unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + X86::EBX).addReg(Base); } // Issue the call. MachineInstrBuilder MIB; if (CalleeOp) { // Register-indirect call. - unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r; - MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp); + unsigned CallOpc; + if (Subtarget->isTargetWin64()) + CallOpc = X86::WINCALL64r; + else if (Subtarget->is64Bit()) + CallOpc = X86::CALL64r; + else + CallOpc = X86::CALL32r; + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) + .addReg(CalleeOp); } else { // Direct call. assert(GV && "Not a direct call"); - unsigned CallOpc = - Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; + unsigned CallOpc; + if (Subtarget->isTargetWin64()) + CallOpc = X86::WINCALL64pcrel32; + else if (Subtarget->is64Bit()) + CallOpc = X86::CALL64pcrel32; + else + CallOpc = X86::CALLpcrel32; // See if we need any target-specific flags on the GV operand. unsigned char OpFlags = 0; @@ -1513,7 +1662,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } - MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) + .addGlobalAddress(GV, 0, OpFlags); } // Add an implicit use GOT pointer in EBX. @@ -1526,10 +1676,12 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Issue CALLSEQ_END unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); - BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) + .addImm(NumBytes).addImm(0); // Now handle call return value (if any). - if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) { + SmallVector UsedRegs; + if (RetVT != MVT::isVoid) { SmallVector RVLocs; CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext()); CCInfo.AnalyzeCallResult(RetVT, RetCC_X86); @@ -1538,7 +1690,6 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { assert(RVLocs.size() == 1 && "Can't handle multi-value calls!"); EVT CopyVT = RVLocs[0].getValVT(); TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); - TargetRegisterClass *SrcRC = DstRC; // If this is a call to a function that returns an fp value on the x87 fp // stack, but where we prefer to use the value in xmm registers, copy it @@ -1547,15 +1698,14 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { RVLocs[0].getLocReg() == X86::ST1) && isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) { CopyVT = MVT::f80; - SrcRC = X86::RSTRegisterClass; DstRC = X86::RFP80RegisterClass; } unsigned ResultReg = createResultReg(DstRC); - bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - RVLocs[0].getLocReg(), DstRC, SrcRC, DL); - assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; - Emitted = true; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + ResultReg).addReg(RVLocs[0].getLocReg()); + UsedRegs.push_back(RVLocs[0].getLocReg()); + if (CopyVT != RVLocs[0].getValVT()) { // Round the F80 the right size, which also moves to the appropriate xmm // register. This is accomplished by storing the F80 value in memory and @@ -1564,18 +1714,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned MemSize = ResVT.getSizeInBits()/8; int FI = MFI.CreateStackObject(MemSize, MemSize, false); - addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg); + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc)), FI) + .addReg(ResultReg); DstRC = ResVT == MVT::f32 ? X86::FR32RegisterClass : X86::FR64RegisterClass; Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; ResultReg = createResultReg(DstRC); - addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI); + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), FI); } if (AndToI1) { // Mask out all but lowest bit for some call which produces an i1. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(MBB, DL, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); ResultReg = AndResult; } @@ -1583,6 +1736,9 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { UpdateValueMap(I, ResultReg); } + // Set all unused physreg defs as dead. + static_cast(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); + return true; } @@ -1595,6 +1751,8 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return X86SelectLoad(I); case Instruction::Store: return X86SelectStore(I); + case Instruction::Ret: + return X86SelectRet(I); case Instruction::ICmp: case Instruction::FCmp: return X86SelectCmp(I); @@ -1637,14 +1795,14 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { } unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { - EVT VT; + MVT VT; if (!isTypeLegal(C->getType(), VT)) return false; // Get opcode and regclass of the output for the given load instruction. unsigned Opc = 0; const TargetRegisterClass *RC = NULL; - switch (VT.getSimpleVT().SimpleTy) { + switch (VT.SimpleTy) { default: return false; case MVT::i8: Opc = X86::MOV8rm; @@ -1695,7 +1853,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { else Opc = X86::LEA64r; unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), AM); return ResultReg; } return 0; @@ -1713,10 +1872,10 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { unsigned char OpFlag = 0; if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic OpFlag = X86II::MO_PIC_BASE_OFFSET; - PICBase = getInstrInfo()->getGlobalBaseReg(&MF); + PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); } else if (Subtarget->isPICStyleGOT()) { OpFlag = X86II::MO_GOTOFF; - PICBase = getInstrInfo()->getGlobalBaseReg(&MF); + PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); } else if (Subtarget->isPICStyleRIPRel() && TM.getCodeModel() == CodeModel::Small) { PICBase = X86::RIP; @@ -1725,7 +1884,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { // Create the load from the constant pool. unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); unsigned ResultReg = createResultReg(RC); - addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), + addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), MCPOffset, PICBase, OpFlag); return ResultReg; @@ -1739,7 +1899,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { // various places, but TargetMaterializeAlloca also needs a check // in order to avoid recursion between getRegForValue, // X86SelectAddrss, and TargetMaterializeAlloca. - if (!StaticAllocaMap.count(C)) + if (!FuncInfo.StaticAllocaMap.count(C)) return 0; X86AddressMode AM; @@ -1748,24 +1908,41 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), AM); return ResultReg; } +/// TryToFoldLoad - The specified machine instr operand is a vreg, and that +/// vreg is being provided by the specified load instruction. If possible, +/// try to fold the load as an operand to the instruction, returning true if +/// possible. +bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI) { + X86AddressMode AM; + if (!X86SelectAddress(LI->getOperand(0), AM)) + return false; + + X86InstrInfo &XII = (X86InstrInfo&)TII; + + unsigned Size = TD.getTypeAllocSize(LI->getType()); + unsigned Alignment = LI->getAlignment(); + + SmallVector AddrOps; + AM.getFullAddress(AddrOps); + + MachineInstr *Result = + XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment); + if (Result == 0) return false; + + MI->getParent()->insert(MI, Result); + MI->eraseFromParent(); + return true; +} + + namespace llvm { - llvm::FastISel *X86::createFastISel(MachineFunction &mf, - DenseMap &vm, - DenseMap &bm, - DenseMap &am, - std::vector > &pn -#ifndef NDEBUG - , SmallSet &cil -#endif - ) { - return new X86FastISel(mf, vm, bm, am, pn -#ifndef NDEBUG - , cil -#endif - ); + llvm::FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) { + return new X86FastISel(funcInfo); } }