X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FARM%2FARMFastISel.cpp;h=6f8fb1a3521cf9c20c1830c4e7aa4cd63dc290a0;hb=737f207468d3abaf91c7b6e16b18aca4b48d789c;hp=c98156e88694ed673d19e8852fa4073251663b09;hpb=42536af5ce152593f489ca88bd0732218594d536;p=oota-llvm.git diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index c98156e8869..6f8fb1a3521 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -14,46 +14,41 @@ //===----------------------------------------------------------------------===// #include "ARM.h" -#include "ARMBaseInstrInfo.h" +#include "ARMBaseRegisterInfo.h" #include "ARMCallingConv.h" -#include "ARMRegisterInfo.h" -#include "ARMTargetMachine.h" -#include "ARMSubtarget.h" #include "ARMConstantPoolValue.h" +#include "ARMISelLowering.h" +#include "ARMMachineFunctionInfo.h" +#include "ARMSubtarget.h" #include "MCTargetDesc/ARMAddressingModes.h" -#include "llvm/CallingConv.h" -#include "llvm/DerivedTypes.h" -#include "llvm/GlobalVariable.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Module.h" -#include "llvm/Operator.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/PseudoSourceValue.h" -#include "llvm/Support/CallSite.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GetElementPtrTypeIterator.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" using namespace llvm; -static cl::opt -DisableARMFastISel("disable-arm-fast-isel", - cl::desc("Turn off experimental ARM fast-isel support"), - cl::init(false), cl::Hidden); - extern cl::opt EnableARMLongCalls; namespace { @@ -79,76 +74,71 @@ namespace { } } Address; -class ARMFastISel : public FastISel { +class ARMFastISel final : public FastISel { /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can /// make the right decision when generating code for different targets. const ARMSubtarget *Subtarget; + Module &M; const TargetMachine &TM; const TargetInstrInfo &TII; const TargetLowering &TLI; ARMFunctionInfo *AFI; // Convenience variables to avoid some queries. - bool isThumb; + bool isThumb2; LLVMContext *Context; public: - explicit ARMFastISel(FunctionLoweringInfo &funcInfo) - : FastISel(funcInfo), + explicit ARMFastISel(FunctionLoweringInfo &funcInfo, + const TargetLibraryInfo *libInfo) + : FastISel(funcInfo, libInfo), + M(const_cast(*funcInfo.Fn->getParent())), TM(funcInfo.MF->getTarget()), TII(*TM.getInstrInfo()), TLI(*TM.getTargetLowering()) { Subtarget = &TM.getSubtarget(); AFI = funcInfo.MF->getInfo(); - isThumb = AFI->isThumbFunction(); + isThumb2 = AFI->isThumbFunction(); Context = &funcInfo.Fn->getContext(); } // Code from FastISel.cpp. - virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, - const TargetRegisterClass *RC); - virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill); - virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); - virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill, - unsigned Op2, bool Op2IsKill); - virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - uint64_t Imm); - virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - const ConstantFP *FPImm); - virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill, - uint64_t Imm); - virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - uint64_t Imm); - virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - uint64_t Imm1, uint64_t Imm2); - - virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, - unsigned Op0, bool Op0IsKill, - uint32_t Idx); + private: + unsigned FastEmitInst_r(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill); + unsigned FastEmitInst_rr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill); + unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill, + unsigned Op2, bool Op2IsKill); + unsigned FastEmitInst_ri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + uint64_t Imm); + unsigned FastEmitInst_rri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill, + uint64_t Imm); + unsigned FastEmitInst_i(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + uint64_t Imm); // Backend specific FastISel code. - virtual bool TargetSelectInstruction(const Instruction *I); - virtual unsigned TargetMaterializeConstant(const Constant *C); - virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); - + private: + bool TargetSelectInstruction(const Instruction *I) override; + unsigned TargetMaterializeConstant(const Constant *C) override; + unsigned TargetMaterializeAlloca(const AllocaInst *AI) override; + bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI) override; + bool FastLowerArguments() override; + private: #include "ARMGenFastISel.inc" // Instruction selection routines. @@ -156,19 +146,23 @@ class ARMFastISel : public FastISel { bool SelectLoad(const Instruction *I); bool SelectStore(const Instruction *I); bool SelectBranch(const Instruction *I); + bool SelectIndirectBr(const Instruction *I); bool SelectCmp(const Instruction *I); bool SelectFPExt(const Instruction *I); bool SelectFPTrunc(const Instruction *I); - bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); - bool SelectSIToFP(const Instruction *I); - bool SelectFPToSI(const Instruction *I); - bool SelectSDiv(const Instruction *I); - bool SelectSRem(const Instruction *I); - bool SelectCall(const Instruction *I); + bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); + bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); + bool SelectIToFP(const Instruction *I, bool isSigned); + bool SelectFPToI(const Instruction *I, bool isSigned); + bool SelectDiv(const Instruction *I, bool isSigned); + bool SelectRem(const Instruction *I, bool isSigned); + bool SelectCall(const Instruction *I, const char *IntrMemName); + bool SelectIntrinsicCall(const IntrinsicInst &I); bool SelectSelect(const Instruction *I); bool SelectRet(const Instruction *I); bool SelectTrunc(const Instruction *I); bool SelectIntExt(const Instruction *I); + bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); // Utility routines. private: @@ -176,33 +170,44 @@ class ARMFastISel : public FastISel { bool isLoadTypeLegal(Type *Ty, MVT &VT); bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, bool isZExt); - bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); - bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); + bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, + unsigned Alignment = 0, bool isZExt = true, + bool allocReg = true); + bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, + unsigned Alignment = 0); bool ARMComputeAddress(const Value *Obj, Address &Addr); - void ARMSimplifyAddress(Address &Addr, EVT VT); - unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); - unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); - unsigned ARMMaterializeInt(const Constant *C, EVT VT); - unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); - unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); - unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); - unsigned ARMSelectCallOp(const GlobalValue *GV); + void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); + bool ARMIsMemCpySmall(uint64_t Len); + bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, + unsigned Alignment); + unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); + unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); + unsigned ARMMaterializeInt(const Constant *C, MVT VT); + unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); + unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); + unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); + unsigned ARMSelectCallOp(bool UseReg); + unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); + + const TargetLowering *getTargetLowering() { return TM.getTargetLowering(); } // Call handling routines. private: - bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, - unsigned &ResultReg); - CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); + CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, + bool Return, + bool isVarArg); bool ProcessCallArgs(SmallVectorImpl &Args, SmallVectorImpl &ArgRegs, SmallVectorImpl &ArgVTs, SmallVectorImpl &ArgFlags, SmallVectorImpl &RegArgs, CallingConv::ID CC, - unsigned &NumBytes); + unsigned &NumBytes, + bool isVarArg); + unsigned getLibcallReg(const Twine &Name); bool FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, const Instruction *I, CallingConv::ID CC, - unsigned &NumBytes); + unsigned &NumBytes, bool isVarArg); bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); // OptionalDef handling routines. @@ -210,9 +215,9 @@ class ARMFastISel : public FastISel { bool isARMNEONPred(const MachineInstr *MI); bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); - void AddLoadStoreOperands(EVT VT, Address &Addr, + void AddLoadStoreOperands(MVT VT, Address &Addr, const MachineInstrBuilder &MIB, - unsigned Flags); + unsigned Flags, bool useAM3); }; } // end anonymous namespace @@ -223,8 +228,7 @@ class ARMFastISel : public FastISel { // we don't care about implicit defs here, just places we'll need to add a // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { - const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.hasOptionalDef()) + if (!MI->hasOptionalDef()) return false; // Look to see if our OptionalDef is defining CPSR or CCR. @@ -240,10 +244,10 @@ bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { const MCInstrDesc &MCID = MI->getDesc(); - // If we're a thumb2 or not NEON function we were handled via isPredicable. + // If we're a thumb2 or not NEON function we'll be handled via isPredicable. if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || AFI->isThumb2Function()) - return false; + return MI->isPredicable(); for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) if (MCID.OpInfo[i].isPredicate()) @@ -264,7 +268,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { // Do we use a predicate? or... // Are we NEON in ARM mode and have a predicate operand? If so, I know // we're not predicable but add it anyways. - if (TII.isPredicable(MI) || isARMNEONPred(MI)) + if (isARMNEONPred(MI)) AddDefaultPred(MIB); // Do we optionally set a predicate? Preds is size > 0 iff the predicate @@ -279,28 +283,22 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { return MIB; } -unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, - const TargetRegisterClass* RC) { - unsigned ResultReg = createResultReg(RC); - const MCInstrDesc &II = TII.get(MachineInstOpcode); - - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); - return ResultReg; -} - unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill) { unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + // Make sure the input operand is sufficiently constrained to be legal + // for this instruction. + Op0 = constrainOperandRegClass(II, Op0, 1); + if (II.getNumDefs() >= 1) { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); + } else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) .addReg(Op0, Op0IsKill * RegState::Kill)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -314,15 +312,21 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addReg(Op1, Op1IsKill * RegState::Kill)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + // Make sure the input operands are sufficiently constrained to be legal + // for this instruction. + Op0 = constrainOperandRegClass(II, Op0, 1); + Op1 = constrainOperandRegClass(II, Op1, 2); + + if (II.getNumDefs() >= 1) { + AddOptionalDefs( + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, Op0IsKill * RegState::Kill) + .addReg(Op1, Op1IsKill * RegState::Kill)); + } else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -337,17 +341,24 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addReg(Op1, Op1IsKill * RegState::Kill) - .addReg(Op2, Op2IsKill * RegState::Kill)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + // Make sure the input operands are sufficiently constrained to be legal + // for this instruction. + Op0 = constrainOperandRegClass(II, Op0, 1); + Op1 = constrainOperandRegClass(II, Op1, 2); + Op2 = constrainOperandRegClass(II, Op1, 3); + + if (II.getNumDefs() >= 1) { + AddOptionalDefs( + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, Op0IsKill * RegState::Kill) + .addReg(Op1, Op1IsKill * RegState::Kill) + .addReg(Op2, Op2IsKill * RegState::Kill)); + } else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addReg(Op2, Op2IsKill * RegState::Kill)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -361,37 +372,19 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addImm(Imm)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + // Make sure the input operand is sufficiently constrained to be legal + // for this instruction. + Op0 = constrainOperandRegClass(II, Op0, 1); + if (II.getNumDefs() >= 1) { + AddOptionalDefs( + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, Op0IsKill * RegState::Kill) + .addImm(Imm)); + } else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Imm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(TargetOpcode::COPY), ResultReg) - .addReg(II.ImplicitDefs[0])); - } - return ResultReg; -} - -unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - const ConstantFP *FPImm) { - unsigned ResultReg = createResultReg(RC); - const MCInstrDesc &II = TII.get(MachineInstOpcode); - - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addFPImm(FPImm)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addFPImm(FPImm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -406,17 +399,22 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addReg(Op1, Op1IsKill * RegState::Kill) - .addImm(Imm)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + // Make sure the input operands are sufficiently constrained to be legal + // for this instruction. + Op0 = constrainOperandRegClass(II, Op0, 1); + Op1 = constrainOperandRegClass(II, Op1, 2); + if (II.getNumDefs() >= 1) { + AddOptionalDefs( + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, Op0IsKill * RegState::Kill) + .addReg(Op1, Op1IsKill * RegState::Kill) + .addImm(Imm)); + } else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addImm(Imm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -429,69 +427,37 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addImm(Imm)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + if (II.getNumDefs() >= 1) { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + ResultReg).addImm(Imm)); + } else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) .addImm(Imm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } return ResultReg; } -unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - uint64_t Imm1, uint64_t Imm2) { - unsigned ResultReg = createResultReg(RC); - const MCInstrDesc &II = TII.get(MachineInstOpcode); - - if (II.getNumDefs() >= 1) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) - .addImm(Imm1).addImm(Imm2)); - else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) - .addImm(Imm1).addImm(Imm2)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(TargetOpcode::COPY), - ResultReg) - .addReg(II.ImplicitDefs[0])); - } - return ResultReg; -} - -unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, - unsigned Op0, bool Op0IsKill, - uint32_t Idx) { - unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); - assert(TargetRegisterInfo::isVirtualRegister(Op0) && - "Cannot yet extract from physregs"); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(TargetOpcode::COPY), ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill), Idx)); - return ResultReg; -} - // TODO: Don't worry about 64-bit now, but when this is fixed remove the // checks from the various callers. -unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { +unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { if (VT == MVT::f64) return 0; unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(ARM::VMOVRS), MoveReg) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(ARM::VMOVSR), MoveReg) .addReg(SrcReg)); return MoveReg; } -unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { +unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { if (VT == MVT::i64) return 0; unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(ARM::VMOVSR), MoveReg) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(ARM::VMOVRS), MoveReg) .addReg(SrcReg)); return MoveReg; } @@ -499,7 +465,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { // For double width floating point we need to materialize two constants // (the high and the low) into integer registers then use a move to get // the combined constant into an FP reg. -unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { +unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { const APFloat Val = CFP->getValueAPF(); bool is64bit = VT == MVT::f64; @@ -516,9 +482,8 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { Opc = ARM::FCONSTS; } unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), - DestReg) - .addImm(Imm)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), DestReg).addImm(Imm)); return DestReg; } @@ -526,24 +491,24 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { if (!Subtarget->hasVFP2()) return false; // MachineConstantPool wants an explicit alignment. - unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); + unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); if (Align == 0) { // TODO: Figure out if this is correct. - Align = TD.getTypeAllocSize(CFP->getType()); + Align = DL.getTypeAllocSize(CFP->getType()); } unsigned Idx = MCP.getConstantPoolIndex(cast(CFP), Align); unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; // The extra reg is for addrmode5. - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), - DestReg) - .addConstantPoolIndex(Idx) - .addReg(0)); + AddOptionalDefs( + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + .addConstantPoolIndex(Idx) + .addReg(0)); return DestReg; } -unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { +unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) return false; @@ -552,97 +517,161 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { // do so now. const ConstantInt *CI = cast(C); if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { - EVT SrcVT = MVT::i32; - unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; - unsigned ImmReg = createResultReg(TLI.getRegClassFor(SrcVT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; + const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : + &ARM::GPRRegClass; + unsigned ImmReg = createResultReg(RC); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg) .addImm(CI->getZExtValue())); return ImmReg; } - // For now 32-bit only. + // Use MVN to emit negative constants. + if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { + unsigned Imm = (unsigned)~(CI->getSExtValue()); + bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : + (ARM_AM::getSOImmVal(Imm) != -1); + if (UseImm) { + unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; + unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ImmReg) + .addImm(Imm)); + return ImmReg; + } + } + + // Load from constant pool. For now 32-bit only. if (VT != MVT::i32) return false; unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); // MachineConstantPool wants an explicit alignment. - unsigned Align = TD.getPrefTypeAlignment(C->getType()); + unsigned Align = DL.getPrefTypeAlignment(C->getType()); if (Align == 0) { // TODO: Figure out if this is correct. - Align = TD.getTypeAllocSize(C->getType()); + Align = DL.getTypeAllocSize(C->getType()); } unsigned Idx = MCP.getConstantPoolIndex(C, Align); - if (isThumb) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + if (isThumb2) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::t2LDRpci), DestReg) .addConstantPoolIndex(Idx)); - else + else { // The extra immediate is for addrmode2. - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::LDRcp), DestReg) .addConstantPoolIndex(Idx) .addImm(0)); + } return DestReg; } -unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { +unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { // For now 32-bit only. if (VT != MVT::i32) return 0; Reloc::Model RelocM = TM.getRelocationModel(); + bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); + const TargetRegisterClass *RC = isThumb2 ? + (const TargetRegisterClass*)&ARM::rGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; + unsigned DestReg = createResultReg(RC); - // TODO: Need more magic for ARM PIC. - if (!isThumb && (RelocM == Reloc::PIC_)) return 0; - - // MachineConstantPool wants an explicit alignment. - unsigned Align = TD.getPrefTypeAlignment(GV->getType()); - if (Align == 0) { - // TODO: Figure out if this is correct. - Align = TD.getTypeAllocSize(GV->getType()); - } + // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. + const GlobalVariable *GVar = dyn_cast(GV); + bool IsThreadLocal = GVar && GVar->isThreadLocal(); + if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; - // Grab index. - unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); - unsigned Id = AFI->createPICLabelUId(); - ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, - ARMCP::CPValue, - PCAdj); - unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); + // Use movw+movt when possible, it avoids constant pool entries. + // Non-darwin targets only support static movt relocations in FastISel. + if (Subtarget->useMovt() && + (Subtarget->isTargetMachO() || RelocM == Reloc::Static)) { + unsigned Opc; + unsigned char TF = 0; + if (Subtarget->isTargetMachO()) + TF = ARMII::MO_NONLAZY; - // Load value. - MachineInstrBuilder MIB; - unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); - if (isThumb) { - unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) - .addConstantPoolIndex(Idx); - if (RelocM == Reloc::PIC_) - MIB.addImm(Id); + switch (RelocM) { + case Reloc::PIC_: + Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; + break; + default: + Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; + break; + } + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); } else { - // The extra immediate is for addrmode2. - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), - DestReg) - .addConstantPoolIndex(Idx) - .addImm(0); + // MachineConstantPool wants an explicit alignment. + unsigned Align = DL.getPrefTypeAlignment(GV->getType()); + if (Align == 0) { + // TODO: Figure out if this is correct. + Align = DL.getTypeAllocSize(GV->getType()); + } + + if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) + return ARMLowerPICELF(GV, Align, VT); + + // Grab index. + unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : + (Subtarget->isThumb() ? 4 : 8); + unsigned Id = AFI->createPICLabelUId(); + ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, + ARMCP::CPValue, + PCAdj); + unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); + + // Load value. + MachineInstrBuilder MIB; + if (isThumb2) { + unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + DestReg).addConstantPoolIndex(Idx); + if (RelocM == Reloc::PIC_) + MIB.addImm(Id); + AddOptionalDefs(MIB); + } else { + // The extra immediate is for addrmode2. + DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(ARM::LDRcp), DestReg) + .addConstantPoolIndex(Idx) + .addImm(0); + AddOptionalDefs(MIB); + + if (RelocM == Reloc::PIC_) { + unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; + unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); + + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DbgLoc, TII.get(Opc), NewDestReg) + .addReg(DestReg) + .addImm(Id); + AddOptionalDefs(MIB); + return NewDestReg; + } + } } - AddOptionalDefs(MIB); - if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { + if (IsIndirect) { + MachineInstrBuilder MIB; unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); - if (isThumb) - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + if (isThumb2) + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::t2LDRi12), NewDestReg) .addReg(DestReg) .addImm(0); else - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), - NewDestReg) - .addReg(DestReg) - .addImm(0); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(ARM::LDRi12), NewDestReg) + .addReg(DestReg) + .addImm(0); DestReg = NewDestReg; AddOptionalDefs(MIB); } @@ -651,10 +680,11 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { } unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { - EVT VT = TLI.getValueType(C->getType(), true); + EVT CEVT = TLI.getValueType(C->getType(), true); // Only handle simple types. - if (!VT.isSimple()) return 0; + if (!CEVT.isSimple()) return 0; + MVT VT = CEVT.getSimpleVT(); if (const ConstantFP *CFP = dyn_cast(C)) return ARMMaterializeFP(CFP, VT); @@ -666,12 +696,14 @@ unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { return 0; } +// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); + unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { // Don't handle dynamic allocas. if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; MVT VT; - if (!isLoadTypeLegal(AI->getType(), VT)) return false; + if (!isLoadTypeLegal(AI->getType(), VT)) return 0; DenseMap::iterator SI = FuncInfo.StaticAllocaMap.find(AI); @@ -679,10 +711,12 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { // This will get lowered later into the correct offsets and registers // via rewriteXFrameIndex. if (SI != FuncInfo.StaticAllocaMap.end()) { - TargetRegisterClass* RC = TLI.getRegClassFor(VT); + unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; + const TargetRegisterClass* RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); - unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, + ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); + + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addFrameIndex(SI->second) .addImm(0)); @@ -709,7 +743,7 @@ bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { // If this is a type than can be sign or zero-extended to a basic operation // go ahead and accept it now. - if (VT == MVT::i8 || VT == MVT::i16) + if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) return true; return false; @@ -718,7 +752,7 @@ bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { // Computes the address to get to an object. bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { // Some boilerplate from the X86 FastISel. - const User *U = NULL; + const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; if (const Instruction *I = dyn_cast(Obj)) { // Don't walk into other basic blocks unless the object is an alloca from @@ -742,22 +776,19 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { switch (Opcode) { default: break; - case Instruction::BitCast: { + case Instruction::BitCast: // Look through bitcasts. return ARMComputeAddress(U->getOperand(0), Addr); - } - case Instruction::IntToPtr: { + case Instruction::IntToPtr: // Look past no-op inttoptrs. if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) return ARMComputeAddress(U->getOperand(0), Addr); break; - } - case Instruction::PtrToInt: { + case Instruction::PtrToInt: // Look past no-op ptrtoints. if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) return ARMComputeAddress(U->getOperand(0), Addr); break; - } case Instruction::GetElementPtr: { Address SavedAddr = Addr; int TmpOffset = Addr.Offset; @@ -769,24 +800,19 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { i != e; ++i, ++GTI) { const Value *Op = *i; if (StructType *STy = dyn_cast(*GTI)) { - const StructLayout *SL = TD.getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); } else { - uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); + uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); for (;;) { if (const ConstantInt *CI = dyn_cast(Op)) { // Constant-offset addressing. TmpOffset += CI->getSExtValue() * S; break; } - if (isa(Op) && - (!isa(Op) || - FuncInfo.MBBMap[cast(Op)->getParent()] - == FuncInfo.MBB) && - isa(cast(Op)->getOperand(1))) { - // An add (in the same block) with a constant operand. Fold the - // constant. + if (canFoldAddIntoGEP(U, Op)) { + // A compatible add with a constant operand. Fold the constant. ConstantInt *CI = cast(cast(Op)->getOperand(1)); TmpOffset += CI->getSExtValue() * S; @@ -823,35 +849,30 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { } } - // Materialize the global variable's address into a reg which can - // then be used later to load the variable. - if (const GlobalValue *GV = dyn_cast(Obj)) { - unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); - if (Tmp == 0) return false; - - Addr.Base.Reg = Tmp; - return true; - } - // Try to get this in a register if nothing else has worked. if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); return Addr.Base.Reg != 0; } -void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { - - assert(VT.isSimple() && "Non-simple types are invalid here!"); - +void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { bool needsLowering = false; - switch (VT.getSimpleVT().SimpleTy) { - default: - assert(false && "Unhandled load/store type!"); + switch (VT.SimpleTy) { + default: llvm_unreachable("Unhandled load/store type!"); case MVT::i1: case MVT::i8: case MVT::i16: case MVT::i32: - // Integer loads/stores handle 12-bit offsets. - needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); + if (!useAM3) { + // Integer loads/stores handle 12-bit offsets. + needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); + // Handle negative offsets. + if (needsLowering && isThumb2) + needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && + Addr.Offset > -256); + } else { + // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. + needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); + } break; case MVT::f32: case MVT::f64: @@ -864,11 +885,12 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { // put the alloca address into a register, set the base type back to // register and continue. This should almost never happen. if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { - TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : - ARM::GPRRegisterClass; + const TargetRegisterClass *RC = isThumb2 ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned ResultReg = createResultReg(RC); - unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, + unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addFrameIndex(Addr.Base.FI) .addImm(0)); @@ -885,13 +907,12 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { } } -void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, +void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, const MachineInstrBuilder &MIB, - unsigned Flags) { + unsigned Flags, bool useAM3) { // addrmode5 output depends on the selection dag addressing dividing the // offset by 4 that it then later multiplies. Do this here as well. - if (VT.getSimpleVT().SimpleTy == MVT::f32 || - VT.getSimpleVT().SimpleTy == MVT::f64) + if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) Addr.Offset /= 4; // Frame base works a bit differently. Handle it separately. @@ -907,60 +928,132 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, // Now add the rest of the operands. MIB.addFrameIndex(FI); - // ARM halfword load/stores need an additional operand. - if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); - - MIB.addImm(Addr.Offset); + // ARM halfword load/stores and signed byte loads need an additional + // operand. + if (useAM3) { + signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; + MIB.addReg(0); + MIB.addImm(Imm); + } else { + MIB.addImm(Addr.Offset); + } MIB.addMemOperand(MMO); } else { // Now add the rest of the operands. MIB.addReg(Addr.Base.Reg); - // ARM halfword load/stores need an additional operand. - if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); - - MIB.addImm(Addr.Offset); + // ARM halfword load/stores and signed byte loads need an additional + // operand. + if (useAM3) { + signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; + MIB.addReg(0); + MIB.addImm(Imm); + } else { + MIB.addImm(Addr.Offset); + } } AddOptionalDefs(MIB); } -bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { - - assert(VT.isSimple() && "Non-simple types are invalid here!"); +bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, + unsigned Alignment, bool isZExt, bool allocReg) { unsigned Opc; - TargetRegisterClass *RC; - switch (VT.getSimpleVT().SimpleTy) { + bool useAM3 = false; + bool needVMOV = false; + const TargetRegisterClass *RC; + switch (VT.SimpleTy) { // This is mostly going to be Neon/vector support. default: return false; - case MVT::i16: - Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; - RC = ARM::GPRRegisterClass; - break; + case MVT::i1: case MVT::i8: - Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; - RC = ARM::GPRRegisterClass; + if (isThumb2) { + if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) + Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; + else + Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; + } else { + if (isZExt) { + Opc = ARM::LDRBi12; + } else { + Opc = ARM::LDRSB; + useAM3 = true; + } + } + RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; + break; + case MVT::i16: + if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) + return false; + + if (isThumb2) { + if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) + Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; + else + Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; + } else { + Opc = isZExt ? ARM::LDRH : ARM::LDRSH; + useAM3 = true; + } + RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; break; case MVT::i32: - Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; - RC = ARM::GPRRegisterClass; + if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) + return false; + + if (isThumb2) { + if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) + Opc = ARM::t2LDRi8; + else + Opc = ARM::t2LDRi12; + } else { + Opc = ARM::LDRi12; + } + RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; break; case MVT::f32: - Opc = ARM::VLDRS; - RC = TLI.getRegClassFor(VT); + if (!Subtarget->hasVFP2()) return false; + // Unaligned loads need special handling. Floats require word-alignment. + if (Alignment && Alignment < 4) { + needVMOV = true; + VT = MVT::i32; + Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; + RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; + } else { + Opc = ARM::VLDRS; + RC = TLI.getRegClassFor(VT); + } break; case MVT::f64: + if (!Subtarget->hasVFP2()) return false; + // FIXME: Unaligned loads need special handling. Doublewords require + // word-alignment. + if (Alignment && Alignment < 4) + return false; + Opc = ARM::VLDRD; RC = TLI.getRegClassFor(VT); break; } // Simplify this down to something we can handle. - ARMSimplifyAddress(Addr, VT); + ARMSimplifyAddress(Addr, VT, useAM3); // Create the base instruction, then add the operands. - ResultReg = createResultReg(RC); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + if (allocReg) + ResultReg = createResultReg(RC); + assert (ResultReg > 255 && "Expected an allocated virtual register."); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); - AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad); + AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); + + // If we had an unaligned load of a float we've converted it to an regular + // load. Now we must move from the GRP to the FP register. + if (needVMOV) { + unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(ARM::VMOVSR), MoveReg) + .addReg(ResultReg)); + ResultReg = MoveReg; + } return true; } @@ -979,51 +1072,101 @@ bool ARMFastISel::SelectLoad(const Instruction *I) { if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; unsigned ResultReg; - if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; + if (!ARMEmitLoad(VT, ResultReg, Addr, cast(I)->getAlignment())) + return false; UpdateValueMap(I, ResultReg); return true; } -bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { +bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, + unsigned Alignment) { unsigned StrOpc; - switch (VT.getSimpleVT().SimpleTy) { + bool useAM3 = false; + switch (VT.SimpleTy) { // This is mostly going to be Neon/vector support. default: return false; case MVT::i1: { - unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : - ARM::GPRRegisterClass); - unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + unsigned Res = createResultReg(isThumb2 ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass); + unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; + SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Res) .addReg(SrcReg).addImm(1)); SrcReg = Res; } // Fallthrough here. case MVT::i8: - StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; + if (isThumb2) { + if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) + StrOpc = ARM::t2STRBi8; + else + StrOpc = ARM::t2STRBi12; + } else { + StrOpc = ARM::STRBi12; + } break; case MVT::i16: - StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; + if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) + return false; + + if (isThumb2) { + if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) + StrOpc = ARM::t2STRHi8; + else + StrOpc = ARM::t2STRHi12; + } else { + StrOpc = ARM::STRH; + useAM3 = true; + } break; case MVT::i32: - StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; + if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) + return false; + + if (isThumb2) { + if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) + StrOpc = ARM::t2STRi8; + else + StrOpc = ARM::t2STRi12; + } else { + StrOpc = ARM::STRi12; + } break; case MVT::f32: if (!Subtarget->hasVFP2()) return false; - StrOpc = ARM::VSTRS; + // Unaligned stores need special handling. Floats require word-alignment. + if (Alignment && Alignment < 4) { + unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(ARM::VMOVRS), MoveReg) + .addReg(SrcReg)); + SrcReg = MoveReg; + VT = MVT::i32; + StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; + } else { + StrOpc = ARM::VSTRS; + } break; case MVT::f64: if (!Subtarget->hasVFP2()) return false; + // FIXME: Unaligned stores need special handling. Doublewords require + // word-alignment. + if (Alignment && Alignment < 4) + return false; + StrOpc = ARM::VSTRD; break; } // Simplify this down to something we can handle. - ARMSimplifyAddress(Addr, VT); + ARMSimplifyAddress(Addr, VT, useAM3); // Create the base instruction, then add the operands. - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(StrOpc)) - .addReg(SrcReg, getKillRegState(true)); - AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore); + .addReg(SrcReg); + AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); return true; } @@ -1049,7 +1192,8 @@ bool ARMFastISel::SelectStore(const Instruction *I) { if (!ARMComputeAddress(I->getOperand(1), Addr)) return false; - if (!ARMEmitStore(VT, SrcReg, Addr)) return false; + if (!ARMEmitStore(VT, SrcReg, Addr, cast(I)->getAlignment())) + return false; return true; } @@ -1129,10 +1273,10 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) return false; - unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) + unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); - FastEmitBranch(FBB, DL); + FastEmitBranch(FBB, DbgLoc); FuncInfo.MBB->addSuccessor(TBB); return true; } @@ -1140,9 +1284,10 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { MVT SourceVT; if (TI->hasOneUse() && TI->getParent() == I->getParent() && (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { - unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; + unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; unsigned OpReg = getRegForValue(TI->getOperand(0)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) .addReg(OpReg).addImm(1)); @@ -1152,11 +1297,11 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { CCMode = ARMCC::EQ; } - unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) + unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); - FastEmitBranch(FBB, DL); + FastEmitBranch(FBB, DbgLoc); FuncInfo.MBB->addSuccessor(TBB); return true; } @@ -1164,7 +1309,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; - FastEmitBranch(Target, DL); + FastEmitBranch(Target, DbgLoc); return true; } @@ -1178,9 +1323,12 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { // Regardless, the compare has been done in the predecessor block, // and it left a value for us in a virtual register. Ergo, we test // the one-bit value left in the virtual register. - unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) - .addReg(CmpReg).addImm(1)); + unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; + CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); + AddOptionalDefs( + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) + .addReg(CmpReg) + .addImm(1)); unsigned CCMode = ARMCC::NE; if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { @@ -1188,34 +1336,81 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { CCMode = ARMCC::EQ; } - unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) + unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); - FastEmitBranch(FBB, DL); + FastEmitBranch(FBB, DbgLoc); FuncInfo.MBB->addSuccessor(TBB); return true; } +bool ARMFastISel::SelectIndirectBr(const Instruction *I) { + unsigned AddrReg = getRegForValue(I->getOperand(0)); + if (AddrReg == 0) return false; + + unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc)).addReg(AddrReg)); + + const IndirectBrInst *IB = cast(I); + for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) + FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); + + return true; +} + bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, bool isZExt) { Type *Ty = Src1Value->getType(); - EVT SrcVT = TLI.getValueType(Ty, true); - if (!SrcVT.isSimple()) return false; + EVT SrcEVT = TLI.getValueType(Ty, true); + if (!SrcEVT.isSimple()) return false; + MVT SrcVT = SrcEVT.getSimpleVT(); bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); if (isFloat && !Subtarget->hasVFP2()) return false; + // Check to see if the 2nd operand is a constant that we can encode directly + // in the compare. + int Imm = 0; + bool UseImm = false; + bool isNegativeImm = false; + // FIXME: At -O0 we don't have anything that canonicalizes operand order. + // Thus, Src1Value may be a ConstantInt, but we're missing it. + if (const ConstantInt *ConstInt = dyn_cast(Src2Value)) { + if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || + SrcVT == MVT::i1) { + const APInt &CIVal = ConstInt->getValue(); + Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); + // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather + // then a cmn, because there is no way to represent 2147483648 as a + // signed 32-bit int. + if (Imm < 0 && Imm != (int)0x80000000) { + isNegativeImm = true; + Imm = -Imm; + } + UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : + (ARM_AM::getSOImmVal(Imm) != -1); + } + } else if (const ConstantFP *ConstFP = dyn_cast(Src2Value)) { + if (SrcVT == MVT::f32 || SrcVT == MVT::f64) + if (ConstFP->isZero() && !ConstFP->isNegative()) + UseImm = true; + } + unsigned CmpOpc; + bool isICmp = true; bool needsExt = false; - switch (SrcVT.getSimpleVT().SimpleTy) { + switch (SrcVT.SimpleTy) { default: return false; // TODO: Verify compares. case MVT::f32: - CmpOpc = ARM::VCMPES; + isICmp = false; + CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; break; case MVT::f64: - CmpOpc = ARM::VCMPED; + isICmp = false; + CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; break; case MVT::i1: case MVT::i8: @@ -1223,42 +1418,66 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, needsExt = true; // Intentional fall-through. case MVT::i32: - CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; + if (isThumb2) { + if (!UseImm) + CmpOpc = ARM::t2CMPrr; + else + CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; + } else { + if (!UseImm) + CmpOpc = ARM::CMPrr; + else + CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; + } break; } unsigned SrcReg1 = getRegForValue(Src1Value); if (SrcReg1 == 0) return false; - unsigned SrcReg2 = getRegForValue(Src2Value); - if (SrcReg2 == 0) return false; + unsigned SrcReg2 = 0; + if (!UseImm) { + SrcReg2 = getRegForValue(Src2Value); + if (SrcReg2 == 0) return false; + } // We have i1, i8, or i16, we need to either zero extend or sign extend. if (needsExt) { - unsigned ResultReg; - EVT DestVT = MVT::i32; - ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, DestVT, isZExt); - if (ResultReg == 0) return false; - SrcReg1 = ResultReg; - ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, DestVT, isZExt); - if (ResultReg == 0) return false; - SrcReg2 = ResultReg; + SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); + if (SrcReg1 == 0) return false; + if (!UseImm) { + SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); + if (SrcReg2 == 0) return false; + } } - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) - .addReg(SrcReg1).addReg(SrcReg2)); + const MCInstrDesc &II = TII.get(CmpOpc); + SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); + if (!UseImm) { + SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(SrcReg1).addReg(SrcReg2)); + } else { + MachineInstrBuilder MIB; + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(SrcReg1); + + // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. + if (isICmp) + MIB.addImm(Imm); + AddOptionalDefs(MIB); + } // For floating point we need to move the result to a comparison register // that we can then use for branches. if (Ty->isFloatTy() || Ty->isDoubleTy()) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::FMSTAT))); return true; } bool ARMFastISel::SelectCmp(const Instruction *I) { const CmpInst *CI = cast(I); - Type *Ty = CI->getOperand(0)->getType(); // Get the compare predicate. ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); @@ -1272,17 +1491,17 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { // Now set a register based on the comparison. Explicitly set the predicates // here. - unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; - TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass - : ARM::GPRRegisterClass; + unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; + const TargetRegisterClass *RC = isThumb2 ? + (const TargetRegisterClass*)&ARM::rGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned DestReg = createResultReg(RC); Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); unsigned ZeroReg = TargetMaterializeConstant(Zero); - bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); - unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) + // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) .addReg(ZeroReg).addImm(1) - .addImm(ARMPred).addReg(CondReg); + .addImm(ARMPred).addReg(ARM::CPSR); UpdateValueMap(I, DestReg); return true; @@ -1299,8 +1518,8 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) { unsigned Op = getRegForValue(V); if (Op == 0) return false; - unsigned Result = createResultReg(ARM::DPRRegisterClass); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + unsigned Result = createResultReg(&ARM::DPRRegClass); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::VCVTDS), Result) .addReg(Op)); UpdateValueMap(I, Result); @@ -1318,15 +1537,15 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) { unsigned Op = getRegForValue(V); if (Op == 0) return false; - unsigned Result = createResultReg(ARM::SPRRegisterClass); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + unsigned Result = createResultReg(&ARM::SPRRegClass); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::VCVTSD), Result) .addReg(Op)); UpdateValueMap(I, Result); return true; } -bool ARMFastISel::SelectSIToFP(const Instruction *I) { +bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { // Make sure we have VFP. if (!Subtarget->hasVFP2()) return false; @@ -1336,7 +1555,10 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) { return false; Value *Src = I->getOperand(0); - EVT SrcVT = TLI.getValueType(Src->getType(), true); + EVT SrcEVT = TLI.getValueType(Src->getType(), true); + if (!SrcEVT.isSimple()) + return false; + MVT SrcVT = SrcEVT.getSimpleVT(); if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) return false; @@ -1345,10 +1567,9 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) { // Handle sign-extension. if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { - EVT DestVT = MVT::i32; - unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); - if (ResultReg == 0) return false; - SrcReg = ResultReg; + SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, + /*isZExt*/!isSigned); + if (SrcReg == 0) return false; } // The conversion routine works on fp-reg to fp-reg and the operand above @@ -1357,19 +1578,18 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) { if (FP == 0) return false; unsigned Opc; - if (Ty->isFloatTy()) Opc = ARM::VSITOS; - else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; + if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; + else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; else return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), - ResultReg) - .addReg(FP)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg).addReg(FP)); UpdateValueMap(I, ResultReg); return true; } -bool ARMFastISel::SelectFPToSI(const Instruction *I) { +bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { // Make sure we have VFP. if (!Subtarget->hasVFP2()) return false; @@ -1383,15 +1603,14 @@ bool ARMFastISel::SelectFPToSI(const Instruction *I) { unsigned Opc; Type *OpTy = I->getOperand(0)->getType(); - if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; - else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; + if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; + else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; else return false; - // f64->s32 or f32->s32 both need an intermediate f32 reg. + // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), - ResultReg) - .addReg(Op)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg).addReg(Op)); // This result needs to be in an integer register, but the conversion only // takes place in fp-regs. @@ -1409,28 +1628,76 @@ bool ARMFastISel::SelectSelect(const Instruction *I) { // Things need to be register sized for register moves. if (VT != MVT::i32) return false; - const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned CondReg = getRegForValue(I->getOperand(0)); if (CondReg == 0) return false; unsigned Op1Reg = getRegForValue(I->getOperand(1)); if (Op1Reg == 0) return false; - unsigned Op2Reg = getRegForValue(I->getOperand(2)); - if (Op2Reg == 0) return false; - unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) - .addReg(CondReg).addImm(1)); + // Check to see if we can use an immediate in the conditional move. + int Imm = 0; + bool UseImm = false; + bool isNegativeImm = false; + if (const ConstantInt *ConstInt = dyn_cast(I->getOperand(2))) { + assert (VT == MVT::i32 && "Expecting an i32."); + Imm = (int)ConstInt->getValue().getZExtValue(); + if (Imm < 0) { + isNegativeImm = true; + Imm = ~Imm; + } + UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : + (ARM_AM::getSOImmVal(Imm) != -1); + } + + unsigned Op2Reg = 0; + if (!UseImm) { + Op2Reg = getRegForValue(I->getOperand(2)); + if (Op2Reg == 0) return false; + } + + unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; + CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0); + AddOptionalDefs( + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) + .addReg(CondReg) + .addImm(0)); + + unsigned MovCCOpc; + const TargetRegisterClass *RC; + if (!UseImm) { + RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; + MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; + } else { + RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; + if (!isNegativeImm) + MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; + else + MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; + } unsigned ResultReg = createResultReg(RC); - unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) - .addReg(Op1Reg).addReg(Op2Reg) - .addImm(ARMCC::EQ).addReg(ARM::CPSR); + if (!UseImm) { + Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); + Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), + ResultReg) + .addReg(Op2Reg) + .addReg(Op1Reg) + .addImm(ARMCC::NE) + .addReg(ARM::CPSR); + } else { + Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), + ResultReg) + .addReg(Op1Reg) + .addImm(Imm) + .addImm(ARMCC::EQ) + .addReg(ARM::CPSR); + } UpdateValueMap(I, ResultReg); return true; } -bool ARMFastISel::SelectSDiv(const Instruction *I) { +bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { MVT VT; Type *Ty = I->getType(); if (!isTypeLegal(Ty, VT)) @@ -1444,21 +1711,21 @@ bool ARMFastISel::SelectSDiv(const Instruction *I) { // Otherwise emit a libcall. RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; if (VT == MVT::i8) - LC = RTLIB::SDIV_I8; + LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; else if (VT == MVT::i16) - LC = RTLIB::SDIV_I16; + LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; else if (VT == MVT::i32) - LC = RTLIB::SDIV_I32; + LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; else if (VT == MVT::i64) - LC = RTLIB::SDIV_I64; + LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; else if (VT == MVT::i128) - LC = RTLIB::SDIV_I128; + LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); return ARMEmitLibcall(I, LC); } -bool ARMFastISel::SelectSRem(const Instruction *I) { +bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { MVT VT; Type *Ty = I->getType(); if (!isTypeLegal(Ty, VT)) @@ -1466,22 +1733,64 @@ bool ARMFastISel::SelectSRem(const Instruction *I) { RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; if (VT == MVT::i8) - LC = RTLIB::SREM_I8; + LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; else if (VT == MVT::i16) - LC = RTLIB::SREM_I16; + LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; else if (VT == MVT::i32) - LC = RTLIB::SREM_I32; + LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; else if (VT == MVT::i64) - LC = RTLIB::SREM_I64; + LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; else if (VT == MVT::i128) - LC = RTLIB::SREM_I128; + LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); return ARMEmitLibcall(I, LC); } -bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { - EVT VT = TLI.getValueType(I->getType(), true); +bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { + EVT DestVT = TLI.getValueType(I->getType(), true); + + // We can get here in the case when we have a binary operation on a non-legal + // type and the target independent selector doesn't know how to handle it. + if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) + return false; + + unsigned Opc; + switch (ISDOpcode) { + default: return false; + case ISD::ADD: + Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; + break; + case ISD::OR: + Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; + break; + case ISD::SUB: + Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; + break; + } + + unsigned SrcReg1 = getRegForValue(I->getOperand(0)); + if (SrcReg1 == 0) return false; + + // TODO: Often the 2nd operand is an immediate, which can be encoded directly + // in the instruction, rather then materializing the value in a register. + unsigned SrcReg2 = getRegForValue(I->getOperand(1)); + if (SrcReg2 == 0) return false; + + unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); + SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); + SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg) + .addReg(SrcReg1).addReg(SrcReg2)); + UpdateValueMap(I, ResultReg); + return true; +} + +bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { + EVT FPVT = TLI.getValueType(I->getType(), true); + if (!FPVT.isSimple()) return false; + MVT VT = FPVT.getSimpleVT(); // We can get here in the case when we want to use NEON for our fp // operations, but can't figure out how to. Just use the vfp instructions @@ -1492,12 +1801,6 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { if (isFloat && !Subtarget->hasVFP2()) return false; - unsigned Op1 = getRegForValue(I->getOperand(0)); - if (Op1 == 0) return false; - - unsigned Op2 = getRegForValue(I->getOperand(1)); - if (Op2 == 0) return false; - unsigned Opc; bool is64bit = VT == MVT::f64 || VT == MVT::i64; switch (ISDOpcode) { @@ -1512,8 +1815,14 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { Opc = is64bit ? ARM::VMULD : ARM::VMULS; break; } - unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + unsigned Op1 = getRegForValue(I->getOperand(0)); + if (Op1 == 0) return false; + + unsigned Op2 = getRegForValue(I->getOperand(1)); + if (Op2 == 0) return false; + + unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addReg(Op1).addReg(Op2)); UpdateValueMap(I, ResultReg); @@ -1522,46 +1831,46 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { // Call Handling Code -bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, - EVT SrcVT, unsigned &ResultReg) { - unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, - Src, /*TODO: Kill=*/false); - - if (RR != 0) { - ResultReg = RR; - return true; - } else - return false; -} - -// This is largely taken directly from CCAssignFnForNode - we don't support -// varargs in FastISel so that part has been removed. +// This is largely taken directly from CCAssignFnForNode // TODO: We may not support all of this. -CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { +CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, + bool Return, + bool isVarArg) { switch (CC) { default: llvm_unreachable("Unsupported calling convention"); case CallingConv::Fast: - // Ignore fastcc. Silence compiler warnings. - (void)RetFastCC_ARM_APCS; - (void)FastCC_ARM_APCS; + if (Subtarget->hasVFP2() && !isVarArg) { + if (!Subtarget->isAAPCS_ABI()) + return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); + // For AAPCS ABI targets, just use VFP variant of the calling convention. + return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); + } // Fallthrough case CallingConv::C: // Use target triple & subtarget features to do actual dispatch. if (Subtarget->isAAPCS_ABI()) { if (Subtarget->hasVFP2() && - FloatABIType == FloatABI::Hard) + TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); else return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); } else return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); case CallingConv::ARM_AAPCS_VFP: - return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); + if (!isVarArg) + return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); + // Fall through to soft float variant, variadic functions don't + // use hard floating point ABI. case CallingConv::ARM_AAPCS: return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); case CallingConv::ARM_APCS: return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); + case CallingConv::GHC: + if (Return) + llvm_unreachable("Can't return in GHC call convention"); + else + return CC_ARM_APCS_GHC; } } @@ -1571,17 +1880,61 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, SmallVectorImpl &ArgFlags, SmallVectorImpl &RegArgs, CallingConv::ID CC, - unsigned &NumBytes) { + unsigned &NumBytes, + bool isVarArg) { SmallVector ArgLocs; - CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); - CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); + CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); + CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, + CCAssignFnForCall(CC, false, isVarArg)); + + // Check that we can handle all of the arguments. If we can't, then bail out + // now before we add code to the MBB. + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + MVT ArgVT = ArgVTs[VA.getValNo()]; + + // We don't handle NEON/vector parameters yet. + if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) + return false; + + // Now copy/store arg to correct locations. + if (VA.isRegLoc() && !VA.needsCustom()) { + continue; + } else if (VA.needsCustom()) { + // TODO: We need custom lowering for vector (v2f64) args. + if (VA.getLocVT() != MVT::f64 || + // TODO: Only handle register args for now. + !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) + return false; + } else { + switch (ArgVT.SimpleTy) { + default: + return false; + case MVT::i1: + case MVT::i8: + case MVT::i16: + case MVT::i32: + break; + case MVT::f32: + if (!Subtarget->hasVFP2()) + return false; + break; + case MVT::f64: + if (!Subtarget->hasVFP2()) + return false; + break; + } + } + } + + // At the point, we are able to handle the call's arguments in fast isel. // Get a count of how many bytes are to be pushed on the stack. NumBytes = CCInfo.getNextStackOffset(); // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) .addImm(NumBytes)); @@ -1591,29 +1944,26 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, unsigned Arg = ArgRegs[VA.getValNo()]; MVT ArgVT = ArgVTs[VA.getValNo()]; - // We don't handle NEON/vector parameters yet. - if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) - return false; + assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && + "We don't handle NEON/vector parameters yet."); // Handle arg promotion, etc. switch (VA.getLocInfo()) { case CCValAssign::Full: break; case CCValAssign::SExt: { - EVT DestVT = VA.getLocVT(); - unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, - /*isZExt*/false); - assert (ResultReg != 0 && "Failed to emit a sext"); - Arg = ResultReg; + MVT DestVT = VA.getLocVT(); + Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); + assert (Arg != 0 && "Failed to emit a sext"); + ArgVT = DestVT; break; } case CCValAssign::AExt: // Intentional fall-through. Handle AExt and ZExt. case CCValAssign::ZExt: { - EVT DestVT = VA.getLocVT(); - unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, - /*isZExt*/true); - assert (ResultReg != 0 && "Failed to emit a sext"); - Arg = ResultReg; + MVT DestVT = VA.getLocVT(); + Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); + assert (Arg != 0 && "Failed to emit a zext"); + ArgVT = DestVT; break; } case CCValAssign::BCvt: { @@ -1629,20 +1979,20 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, // Now copy/store arg to correct locations. if (VA.isRegLoc() && !VA.needsCustom()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), - VA.getLocReg()) - .addReg(Arg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); RegArgs.push_back(VA.getLocReg()); } else if (VA.needsCustom()) { // TODO: We need custom lowering for vector (v2f64) args. - if (VA.getLocVT() != MVT::f64) return false; + assert(VA.getLocVT() == MVT::f64 && + "Custom lowering for v2f64 args not available"); CCValAssign &NextVA = ArgLocs[++i]; - // TODO: Only handle register args for now. - if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; + assert(VA.isRegLoc() && NextVA.isRegLoc() && + "We only handle register args!"); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::VMOVRRD), VA.getLocReg()) .addReg(NextVA.getLocReg(), RegState::Define) .addReg(Arg)); @@ -1656,35 +2006,37 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, Addr.Base.Reg = ARM::SP; Addr.Offset = VA.getLocMemOffset(); - if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; + bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; + assert(EmitRet && "Could not emit a store for argument!"); } } + return true; } bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, const Instruction *I, CallingConv::ID CC, - unsigned &NumBytes) { + unsigned &NumBytes, bool isVarArg) { // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(0)); // Now the return value. if (RetVT != MVT::isVoid) { SmallVector RVLocs; - CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); - CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); + CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); + CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); // Copy all of the result registers out of their specified physreg. if (RVLocs.size() == 2 && RetVT == MVT::f64) { // For this move we copy into two registers and then move into the // double fp reg we want. - EVT DestVT = RVLocs[0].getValVT(); - TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); + MVT DestVT = RVLocs[0].getValVT(); + const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); unsigned ResultReg = createResultReg(DstRC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(ARM::VMOVDRR), ResultReg) .addReg(RVLocs[0].getLocReg()) .addReg(RVLocs[1].getLocReg())); @@ -1696,11 +2048,17 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, UpdateValueMap(I, ResultReg); } else { assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); - EVT CopyVT = RVLocs[0].getValVT(); - TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); + MVT CopyVT = RVLocs[0].getValVT(); + + // Special handling for extended integers. + if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) + CopyVT = MVT::i32; + + const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); unsigned ResultReg = createResultReg(DstRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(RVLocs[0].getLocReg()); UsedRegs.push_back(RVLocs[0].getLocReg()); @@ -1719,19 +2077,19 @@ bool ARMFastISel::SelectRet(const Instruction *I) { if (!FuncInfo.CanLowerReturn) return false; - if (F.isVarArg()) - return false; + // Build a list of return value registers. + SmallVector RetRegs; CallingConv::ID CC = F.getCallingConv(); if (Ret->getNumOperands() > 0) { SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), - Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); - CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); + CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, + F.isVarArg())); const Value *RV = Ret->getOperand(0); unsigned Reg = getRegForValue(RV); @@ -1752,22 +2110,23 @@ bool ARMFastISel::SelectRet(const Instruction *I) { return false; unsigned SrcReg = Reg + VA.getValNo(); - EVT RVVT = TLI.getValueType(RV->getType()); - EVT DestVT = VA.getValVT(); + EVT RVEVT = TLI.getValueType(RV->getType()); + if (!RVEVT.isSimple()) return false; + MVT RVVT = RVEVT.getSimpleVT(); + MVT DestVT = VA.getValVT(); // Special handling for extended integers. if (RVVT != DestVT) { if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) return false; - if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) - return false; - assert(DestVT == MVT::i32 && "ARM should always ext to i32"); - bool isZExt = Outs[0].Flags.isZExt(); - unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt); - if (ResultReg == 0) return false; - SrcReg = ResultReg; + // Perform extension if flagged as either zext or sext. Otherwise, do + // nothing. + if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { + SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); + if (SrcReg == 0) return false; + } } // Make the copy. @@ -1776,28 +2135,40 @@ bool ARMFastISel::SelectRet(const Instruction *I) { // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), - DstReg).addReg(SrcReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); - // Mark the register as live out of the function. - MRI.addLiveOut(VA.getLocReg()); + // Add register to return instruction. + RetRegs.push_back(VA.getLocReg()); } - unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(RetOpc))); + unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(RetOpc)); + AddOptionalDefs(MIB); + for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) + MIB.addReg(RetRegs[i], RegState::Implicit); return true; } -unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { +unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { + if (UseReg) + return isThumb2 ? ARM::tBLXr : ARM::BLX; + else + return isThumb2 ? ARM::tBL : ARM::BL; +} - // Darwin needs the r9 versions of the opcodes. - bool isDarwin = Subtarget->isTargetDarwin(); - if (isThumb) { - return isDarwin ? ARM::tBLr9 : ARM::tBL; - } else { - return isDarwin ? ARM::BLr9 : ARM::BL; - } +unsigned ARMFastISel::getLibcallReg(const Twine &Name) { + // Manually compute the global's type to avoid building it when unnecessary. + Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); + EVT LCREVT = TLI.getValueType(GVTy); + if (!LCREVT.isSimple()) return 0; + + GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, + GlobalValue::ExternalLinkage, nullptr, + Name); + assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); + return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); } // A quick function that will emit a call for a named libcall in F with the @@ -1818,8 +2189,14 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { else if (!isTypeLegal(RetTy, RetVT)) return false; - // TODO: For now if we have long calls specified we don't handle the call. - if (EnableARMLongCalls) return false; + // Can't handle non-double multi-reg retvals. + if (RetVT != MVT::isVoid && RetVT != MVT::i32) { + SmallVector RVLocs; + CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); + CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); + if (RVLocs.size() >= 2 && RetVT != MVT::f64) + return false; + } // Set up the argument vectors. SmallVector Args; @@ -1840,7 +2217,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { if (!isTypeLegal(ArgTy, ArgVT)) return false; ISD::ArgFlagsTy Flags; - unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); + unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); Flags.setOrigAlign(OriginalAlignment); Args.push_back(Op); @@ -1852,31 +2229,39 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { // Handle the arguments now that we've gotten them. SmallVector RegArgs; unsigned NumBytes; - if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) + if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, + RegArgs, CC, NumBytes, false)) return false; - // Issue the call, BLr9 for darwin, BL otherwise. - // TODO: Turn this into the table of arm call ops. - MachineInstrBuilder MIB; - unsigned CallOpc = ARMSelectCallOp(NULL); - if(isThumb) - // Explicitly adding the predicate here. - MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(CallOpc))) - .addExternalSymbol(TLI.getLibcallName(Call)); + unsigned CalleeReg = 0; + if (EnableARMLongCalls) { + CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); + if (CalleeReg == 0) return false; + } + + // Issue the call. + unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DbgLoc, TII.get(CallOpc)); + // BL / BLX don't take a predicate, but tBL / tBLX do. + if (isThumb2) + AddDefaultPred(MIB); + if (EnableARMLongCalls) + MIB.addReg(CalleeReg); else - // Explicitly adding the predicate here. - MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(CallOpc)) - .addExternalSymbol(TLI.getLibcallName(Call))); + MIB.addExternalSymbol(TLI.getLibcallName(Call)); // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) - MIB.addReg(RegArgs[i]); + MIB.addReg(RegArgs[i], RegState::Implicit); + + // Add a register mask with the call-preserved registers. + // Proper defs for return values will be added by setPhysRegsDeadExcept(). + MIB.addRegMask(TRI.getCallPreservedMask(CC)); // Finish off the call including any return values. SmallVector UsedRegs; - if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; + if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; // Set all unused physreg defs as dead. static_cast(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); @@ -1884,17 +2269,16 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { return true; } -bool ARMFastISel::SelectCall(const Instruction *I) { +bool ARMFastISel::SelectCall(const Instruction *I, + const char *IntrMemName = nullptr) { const CallInst *CI = cast(I); const Value *Callee = CI->getCalledValue(); - // Can't handle inline asm or worry about intrinsics yet. - if (isa(Callee) || isa(CI)) return false; + // Can't handle inline asm. + if (isa(Callee)) return false; - // Only handle global variable Callees. - const GlobalValue *GV = dyn_cast(Callee); - if (!GV) - return false; + // Allow SelectionDAG isel to handle tail calls. + if (CI->isTailCall()) return false; // Check the calling convention. ImmutableCallSite CS(CI); @@ -1902,38 +2286,46 @@ bool ARMFastISel::SelectCall(const Instruction *I) { // TODO: Avoid some calling conventions? - // Let SDISel handle vararg functions. PointerType *PT = cast(CS.getCalledValue()->getType()); FunctionType *FTy = cast(PT->getElementType()); - if (FTy->isVarArg()) - return false; + bool isVarArg = FTy->isVarArg(); // Handle *simple* calls for now. Type *RetTy = I->getType(); MVT RetVT; if (RetTy->isVoidTy()) RetVT = MVT::isVoid; - else if (!isTypeLegal(RetTy, RetVT)) + else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && + RetVT != MVT::i8 && RetVT != MVT::i1) return false; - // TODO: For now if we have long calls specified we don't handle the call. - if (EnableARMLongCalls) return false; + // Can't handle non-double multi-reg retvals. + if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && + RetVT != MVT::i16 && RetVT != MVT::i32) { + SmallVector RVLocs; + CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); + CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); + if (RVLocs.size() >= 2 && RetVT != MVT::f64) + return false; + } // Set up the argument vectors. SmallVector Args; SmallVector ArgRegs; SmallVector ArgVTs; SmallVector ArgFlags; - Args.reserve(CS.arg_size()); - ArgRegs.reserve(CS.arg_size()); - ArgVTs.reserve(CS.arg_size()); - ArgFlags.reserve(CS.arg_size()); + unsigned arg_size = CS.arg_size(); + Args.reserve(arg_size); + ArgRegs.reserve(arg_size); + ArgVTs.reserve(arg_size); + ArgFlags.reserve(arg_size); for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) { - unsigned Arg = getRegForValue(*i); + // If we're lowering a memory intrinsic instead of a regular call, skip the + // last two arguments, which shouldn't be passed to the underlying function. + if (IntrMemName && e-i <= 2) + break; - if (Arg == 0) - return false; ISD::ArgFlagsTy Flags; unsigned AttrInd = i - CS.arg_begin() + 1; if (CS.paramHasAttr(AttrInd, Attribute::SExt)) @@ -1953,7 +2345,12 @@ bool ARMFastISel::SelectCall(const Instruction *I) { if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && ArgVT != MVT::i1) return false; - unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); + + unsigned Arg = getRegForValue(*i); + if (Arg == 0) + return false; + + unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); Flags.setOrigAlign(OriginalAlignment); Args.push_back(*i); @@ -1965,32 +2362,58 @@ bool ARMFastISel::SelectCall(const Instruction *I) { // Handle the arguments now that we've gotten them. SmallVector RegArgs; unsigned NumBytes; - if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) + if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, + RegArgs, CC, NumBytes, isVarArg)) return false; - // Issue the call, BLr9 for darwin, BL otherwise. - // TODO: Turn this into the table of arm call ops. - MachineInstrBuilder MIB; - unsigned CallOpc = ARMSelectCallOp(GV); - // Explicitly adding the predicate here. - if(isThumb) - // Explicitly adding the predicate here. - MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(CallOpc))) - .addGlobalAddress(GV, 0, 0); + bool UseReg = false; + const GlobalValue *GV = dyn_cast(Callee); + if (!GV || EnableARMLongCalls) UseReg = true; + + unsigned CalleeReg = 0; + if (UseReg) { + if (IntrMemName) + CalleeReg = getLibcallReg(IntrMemName); + else + CalleeReg = getRegForValue(Callee); + + if (CalleeReg == 0) return false; + } + + // Issue the call. + unsigned CallOpc = ARMSelectCallOp(UseReg); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DbgLoc, TII.get(CallOpc)); + + unsigned char OpFlags = 0; + + // Add MO_PLT for global address or external symbol in the PIC relocation + // model. + if (Subtarget->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_) + OpFlags = ARMII::MO_PLT; + + // ARM calls don't take a predicate, but tBL / tBLX do. + if(isThumb2) + AddDefaultPred(MIB); + if (UseReg) + MIB.addReg(CalleeReg); + else if (!IntrMemName) + MIB.addGlobalAddress(GV, 0, OpFlags); else - // Explicitly adding the predicate here. - MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(CallOpc)) - .addGlobalAddress(GV, 0, 0)); + MIB.addExternalSymbol(IntrMemName, OpFlags); // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) - MIB.addReg(RegArgs[i]); + MIB.addReg(RegArgs[i], RegState::Implicit); + + // Add a register mask with the call-preserved registers. + // Proper defs for return values will be added by setPhysRegsDeadExcept(). + MIB.addRegMask(TRI.getCallPreservedMask(CC)); // Finish off the call including any return values. SmallVector UsedRegs; - if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; + if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) + return false; // Set all unused physreg defs as dead. static_cast(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); @@ -1998,8 +2421,151 @@ bool ARMFastISel::SelectCall(const Instruction *I) { return true; } +bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { + return Len <= 16; +} + +bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, + uint64_t Len, unsigned Alignment) { + // Make sure we don't bloat code by inlining very large memcpy's. + if (!ARMIsMemCpySmall(Len)) + return false; + + while (Len) { + MVT VT; + if (!Alignment || Alignment >= 4) { + if (Len >= 4) + VT = MVT::i32; + else if (Len >= 2) + VT = MVT::i16; + else { + assert (Len == 1 && "Expected a length of 1!"); + VT = MVT::i8; + } + } else { + // Bound based on alignment. + if (Len >= 2 && Alignment == 2) + VT = MVT::i16; + else { + VT = MVT::i8; + } + } + + bool RV; + unsigned ResultReg; + RV = ARMEmitLoad(VT, ResultReg, Src); + assert (RV == true && "Should be able to handle this load."); + RV = ARMEmitStore(VT, ResultReg, Dest); + assert (RV == true && "Should be able to handle this store."); + (void)RV; + + unsigned Size = VT.getSizeInBits()/8; + Len -= Size; + Dest.Offset += Size; + Src.Offset += Size; + } + + return true; +} + +bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { + // FIXME: Handle more intrinsics. + switch (I.getIntrinsicID()) { + default: return false; + case Intrinsic::frameaddress: { + MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); + MFI->setFrameAddressIsTaken(true); + + unsigned LdrOpc; + const TargetRegisterClass *RC; + if (isThumb2) { + LdrOpc = ARM::t2LDRi12; + RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; + } else { + LdrOpc = ARM::LDRi12; + RC = (const TargetRegisterClass*)&ARM::GPRRegClass; + } + + const ARMBaseRegisterInfo *RegInfo = + static_cast(TM.getRegisterInfo()); + unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); + unsigned SrcReg = FramePtr; + + // Recursively load frame address + // ldr r0 [fp] + // ldr r0 [r0] + // ldr r0 [r0] + // ... + unsigned DestReg; + unsigned Depth = cast(I.getOperand(0))->getZExtValue(); + while (Depth--) { + DestReg = createResultReg(RC); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(LdrOpc), DestReg) + .addReg(SrcReg).addImm(0)); + SrcReg = DestReg; + } + UpdateValueMap(&I, SrcReg); + return true; + } + case Intrinsic::memcpy: + case Intrinsic::memmove: { + const MemTransferInst &MTI = cast(I); + // Don't handle volatile. + if (MTI.isVolatile()) + return false; + + // Disable inlining for memmove before calls to ComputeAddress. Otherwise, + // we would emit dead code because we don't currently handle memmoves. + bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); + if (isa(MTI.getLength()) && isMemCpy) { + // Small memcpy's are common enough that we want to do them without a call + // if possible. + uint64_t Len = cast(MTI.getLength())->getZExtValue(); + if (ARMIsMemCpySmall(Len)) { + Address Dest, Src; + if (!ARMComputeAddress(MTI.getRawDest(), Dest) || + !ARMComputeAddress(MTI.getRawSource(), Src)) + return false; + unsigned Alignment = MTI.getAlignment(); + if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) + return true; + } + } + + if (!MTI.getLength()->getType()->isIntegerTy(32)) + return false; + + if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) + return false; + + const char *IntrMemName = isa(I) ? "memcpy" : "memmove"; + return SelectCall(&I, IntrMemName); + } + case Intrinsic::memset: { + const MemSetInst &MSI = cast(I); + // Don't handle volatile. + if (MSI.isVolatile()) + return false; + + if (!MSI.getLength()->getType()->isIntegerTy(32)) + return false; + + if (MSI.getDestAddressSpace() > 255) + return false; + + return SelectCall(&I, "memset"); + } + case Intrinsic::trap: { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get( + Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); + return true; + } + } +} + bool ARMFastISel::SelectTrunc(const Instruction *I) { - // The high bits for a type smaller than the register size are assumed to be + // The high bits for a type smaller than the register size are assumed to be // undefined. Value *Op = I->getOperand(0); @@ -2021,74 +2587,223 @@ bool ARMFastISel::SelectTrunc(const Instruction *I) { return true; } -unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, +unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt) { if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) return 0; + if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) + return 0; - unsigned Opc; - bool isBoolZext = false; - if (!SrcVT.isSimple()) return 0; - switch (SrcVT.getSimpleVT().SimpleTy) { - default: return 0; - case MVT::i16: - if (!Subtarget->hasV6Ops()) return 0; - if (isZExt) - Opc = isThumb ? ARM::t2UXTH : ARM::UXTH; - else - Opc = isThumb ? ARM::t2SXTH : ARM::SXTH; - break; - case MVT::i8: - if (!Subtarget->hasV6Ops()) return 0; - if (isZExt) - Opc = isThumb ? ARM::t2UXTB : ARM::UXTB; - else - Opc = isThumb ? ARM::t2SXTB : ARM::SXTB; - break; - case MVT::i1: - if (isZExt) { - Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; - isBoolZext = true; - break; + // Table of which combinations can be emitted as a single instruction, + // and which will require two. + static const uint8_t isSingleInstrTbl[3][2][2][2] = { + // ARM Thumb + // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops + // ext: s z s z s z s z + /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, + /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, + /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } + }; + + // Target registers for: + // - For ARM can never be PC. + // - For 16-bit Thumb are restricted to lower 8 registers. + // - For 32-bit Thumb are restricted to non-SP and non-PC. + static const TargetRegisterClass *RCTbl[2][2] = { + // Instructions: Two Single + /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, + /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } + }; + + // Table governing the instruction(s) to be emitted. + static const struct InstructionTable { + uint32_t Opc : 16; + uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. + uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. + uint32_t Imm : 8; // All instructions have either a shift or a mask. + } IT[2][2][3][2] = { + { // Two instructions (first is left shift, second is in this table). + { // ARM Opc S Shift Imm + /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, + /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, + /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, + /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, + /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, + /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } + }, + { // Thumb Opc S Shift Imm + /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, + /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, + /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, + /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, + /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, + /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } + } + }, + { // Single instruction. + { // ARM Opc S Shift Imm + /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, + /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, + /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, + /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, + /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, + /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } + }, + { // Thumb Opc S Shift Imm + /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, + /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, + /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, + /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, + /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, + /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } + } } - return 0; + }; + + unsigned SrcBits = SrcVT.getSizeInBits(); + unsigned DestBits = DestVT.getSizeInBits(); + (void) DestBits; + assert((SrcBits < DestBits) && "can only extend to larger types"); + assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && + "other sizes unimplemented"); + assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && + "other sizes unimplemented"); + + bool hasV6Ops = Subtarget->hasV6Ops(); + unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} + assert((Bitness < 3) && "sanity-check table bounds"); + + bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; + const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; + const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; + unsigned Opc = ITP->Opc; + assert(ARM::KILL != Opc && "Invalid table entry"); + unsigned hasS = ITP->hasS; + ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; + assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && + "only MOVsi has shift operand addressing mode"); + unsigned Imm = ITP->Imm; + + // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). + bool setsCPSR = &ARM::tGPRRegClass == RC; + unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; + unsigned ResultReg; + // MOVsi encodes shift and immediate in shift operand addressing mode. + // The following condition has the same value when emitting two + // instruction sequences: both are shifts. + bool ImmIsSO = (Shift != ARM_AM::no_shift); + + // Either one or two instructions are emitted. + // They're always of the form: + // dst = in OP imm + // CPSR is set only by 16-bit Thumb instructions. + // Predicate, if any, is AL. + // S bit, if available, is always 0. + // When two are emitted the first's result will feed as the second's input, + // that value is then dead. + unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; + for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { + ResultReg = createResultReg(RC); + bool isLsl = (0 == Instr) && !isSingleInstr; + unsigned Opcode = isLsl ? LSLOpc : Opc; + ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; + unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; + bool isKill = 1 == Instr; + MachineInstrBuilder MIB = BuildMI( + *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); + if (setsCPSR) + MIB.addReg(ARM::CPSR, RegState::Define); + SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); + AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); + if (hasS) + AddDefaultCC(MIB); + // Second instruction consumes the first's result. + SrcReg = ResultReg; } - unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); - MachineInstrBuilder MIB; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) - .addReg(SrcReg); - if (isBoolZext) - MIB.addImm(1); - else - MIB.addImm(0); - AddOptionalDefs(MIB); return ResultReg; } bool ARMFastISel::SelectIntExt(const Instruction *I) { // On ARM, in general, integer casts don't involve legal types; this code // handles promotable integers. - // FIXME: We could save an instruction in many cases by special-casing - // load instructions. Type *DestTy = I->getType(); Value *Src = I->getOperand(0); Type *SrcTy = Src->getType(); - EVT SrcVT, DestVT; - SrcVT = TLI.getValueType(SrcTy, true); - DestVT = TLI.getValueType(DestTy, true); - bool isZExt = isa(I); unsigned SrcReg = getRegForValue(Src); if (!SrcReg) return false; + EVT SrcEVT, DestEVT; + SrcEVT = TLI.getValueType(SrcTy, true); + DestEVT = TLI.getValueType(DestTy, true); + if (!SrcEVT.isSimple()) return false; + if (!DestEVT.isSimple()) return false; + + MVT SrcVT = SrcEVT.getSimpleVT(); + MVT DestVT = DestEVT.getSimpleVT(); unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); if (ResultReg == 0) return false; UpdateValueMap(I, ResultReg); return true; } +bool ARMFastISel::SelectShift(const Instruction *I, + ARM_AM::ShiftOpc ShiftTy) { + // We handle thumb2 mode by target independent selector + // or SelectionDAG ISel. + if (isThumb2) + return false; + + // Only handle i32 now. + EVT DestVT = TLI.getValueType(I->getType(), true); + if (DestVT != MVT::i32) + return false; + + unsigned Opc = ARM::MOVsr; + unsigned ShiftImm; + Value *Src2Value = I->getOperand(1); + if (const ConstantInt *CI = dyn_cast(Src2Value)) { + ShiftImm = CI->getZExtValue(); + + // Fall back to selection DAG isel if the shift amount + // is zero or greater than the width of the value type. + if (ShiftImm == 0 || ShiftImm >=32) + return false; + + Opc = ARM::MOVsi; + } + + Value *Src1Value = I->getOperand(0); + unsigned Reg1 = getRegForValue(Src1Value); + if (Reg1 == 0) return false; + + unsigned Reg2 = 0; + if (Opc == ARM::MOVsr) { + Reg2 = getRegForValue(Src2Value); + if (Reg2 == 0) return false; + } + + unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); + if(ResultReg == 0) return false; + + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg) + .addReg(Reg1); + + if (Opc == ARM::MOVsi) + MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); + else if (Opc == ARM::MOVsr) { + MIB.addReg(Reg2); + MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); + } + + AddOptionalDefs(MIB); + UpdateValueMap(I, ResultReg); + return true; +} + // TODO: SoftFP support. bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { @@ -2099,6 +2814,8 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { return SelectStore(I); case Instruction::Br: return SelectBranch(I); + case Instruction::IndirectBr: + return SelectIndirectBr(I); case Instruction::ICmp: case Instruction::FCmp: return SelectCmp(I); @@ -2107,20 +2824,36 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { case Instruction::FPTrunc: return SelectFPTrunc(I); case Instruction::SIToFP: - return SelectSIToFP(I); + return SelectIToFP(I, /*isSigned*/ true); + case Instruction::UIToFP: + return SelectIToFP(I, /*isSigned*/ false); case Instruction::FPToSI: - return SelectFPToSI(I); + return SelectFPToI(I, /*isSigned*/ true); + case Instruction::FPToUI: + return SelectFPToI(I, /*isSigned*/ false); + case Instruction::Add: + return SelectBinaryIntOp(I, ISD::ADD); + case Instruction::Or: + return SelectBinaryIntOp(I, ISD::OR); + case Instruction::Sub: + return SelectBinaryIntOp(I, ISD::SUB); case Instruction::FAdd: - return SelectBinaryOp(I, ISD::FADD); + return SelectBinaryFPOp(I, ISD::FADD); case Instruction::FSub: - return SelectBinaryOp(I, ISD::FSUB); + return SelectBinaryFPOp(I, ISD::FSUB); case Instruction::FMul: - return SelectBinaryOp(I, ISD::FMUL); + return SelectBinaryFPOp(I, ISD::FMUL); case Instruction::SDiv: - return SelectSDiv(I); + return SelectDiv(I, /*isSigned*/ true); + case Instruction::UDiv: + return SelectDiv(I, /*isSigned*/ false); case Instruction::SRem: - return SelectSRem(I); + return SelectRem(I, /*isSigned*/ true); + case Instruction::URem: + return SelectRem(I, /*isSigned*/ false); case Instruction::Call: + if (const IntrinsicInst *II = dyn_cast(I)) + return SelectIntrinsicCall(*II); return SelectCall(I); case Instruction::Select: return SelectSelect(I); @@ -2131,21 +2864,218 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { case Instruction::ZExt: case Instruction::SExt: return SelectIntExt(I); + case Instruction::Shl: + return SelectShift(I, ARM_AM::lsl); + case Instruction::LShr: + return SelectShift(I, ARM_AM::lsr); + case Instruction::AShr: + return SelectShift(I, ARM_AM::asr); default: break; } return false; } +namespace { +// This table describes sign- and zero-extend instructions which can be +// folded into a preceding load. All of these extends have an immediate +// (sometimes a mask and sometimes a shift) that's applied after +// extension. +const struct FoldableLoadExtendsStruct { + uint16_t Opc[2]; // ARM, Thumb. + uint8_t ExpectedImm; + uint8_t isZExt : 1; + uint8_t ExpectedVT : 7; +} FoldableLoadExtends[] = { + { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, + { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, + { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, + { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, + { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } +}; +} + +/// \brief The specified machine instr operand is a vreg, and that +/// vreg is being provided by the specified load instruction. If possible, +/// try to fold the load as an operand to the instruction, returning true if +/// successful. +bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI) { + // Verify we have a legal type before going any further. + MVT VT; + if (!isLoadTypeLegal(LI->getType(), VT)) + return false; + + // Combine load followed by zero- or sign-extend. + // ldrb r1, [r0] ldrb r1, [r0] + // uxtb r2, r1 => + // mov r3, r2 mov r3, r1 + if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) + return false; + const uint64_t Imm = MI->getOperand(2).getImm(); + + bool Found = false; + bool isZExt; + for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); + i != e; ++i) { + if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && + (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && + MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { + Found = true; + isZExt = FoldableLoadExtends[i].isZExt; + } + } + if (!Found) return false; + + // See if we can handle this address. + Address Addr; + if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; + + unsigned ResultReg = MI->getOperand(0).getReg(); + if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) + return false; + MI->eraseFromParent(); + return true; +} + +unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, + unsigned Align, MVT VT) { + bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); + ARMConstantPoolConstant *CPV = + ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); + unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); + + unsigned Opc; + unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); + // Load value. + if (isThumb2) { + DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(ARM::t2LDRpci), DestReg1) + .addConstantPoolIndex(Idx)); + Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; + } else { + // The extra immediate is for addrmode2. + DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DbgLoc, TII.get(ARM::LDRcp), DestReg1) + .addConstantPoolIndex(Idx).addImm(0)); + Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; + } + + unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); + if (GlobalBaseReg == 0) { + GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); + AFI->setGlobalBaseReg(GlobalBaseReg); + } + + unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); + DestReg2 = constrainOperandRegClass(TII.get(Opc), DestReg2, 0); + DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1); + GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DbgLoc, TII.get(Opc), DestReg2) + .addReg(DestReg1) + .addReg(GlobalBaseReg); + if (!UseGOTOFF) + MIB.addImm(0); + AddOptionalDefs(MIB); + + return DestReg2; +} + +bool ARMFastISel::FastLowerArguments() { + if (!FuncInfo.CanLowerReturn) + return false; + + const Function *F = FuncInfo.Fn; + if (F->isVarArg()) + return false; + + CallingConv::ID CC = F->getCallingConv(); + switch (CC) { + default: + return false; + case CallingConv::Fast: + case CallingConv::C: + case CallingConv::ARM_AAPCS_VFP: + case CallingConv::ARM_AAPCS: + case CallingConv::ARM_APCS: + break; + } + + // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments + // which are passed in r0 - r3. + unsigned Idx = 1; + for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); + I != E; ++I, ++Idx) { + if (Idx > 4) + return false; + + if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || + F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || + F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) + return false; + + Type *ArgTy = I->getType(); + if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) + return false; + + EVT ArgVT = TLI.getValueType(ArgTy); + if (!ArgVT.isSimple()) return false; + switch (ArgVT.getSimpleVT().SimpleTy) { + case MVT::i8: + case MVT::i16: + case MVT::i32: + break; + default: + return false; + } + } + + + static const uint16_t GPRArgRegs[] = { + ARM::R0, ARM::R1, ARM::R2, ARM::R3 + }; + + const TargetRegisterClass *RC = &ARM::rGPRRegClass; + Idx = 0; + for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); + I != E; ++I, ++Idx) { + unsigned SrcReg = GPRArgRegs[Idx]; + unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); + // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. + // Without this, EmitLiveInCopies may eliminate the livein if its only + // use is a bitcast (which isn't turned into an instruction). + unsigned ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), + ResultReg).addReg(DstReg, getKillRegState(true)); + UpdateValueMap(I, ResultReg); + } + + return true; +} + namespace llvm { - llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { - // Completely untested on non-darwin. + FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, + const TargetLibraryInfo *libInfo) { const TargetMachine &TM = funcInfo.MF->getTarget(); - // Darwin and thumb1 only for now. const ARMSubtarget *Subtarget = &TM.getSubtarget(); - if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && - !DisableARMFastISel) - return new ARMFastISel(funcInfo); - return 0; + // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. + bool UseFastISel = false; + UseFastISel |= Subtarget->isTargetMachO() && !Subtarget->isThumb1Only(); + UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb(); + UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb(); + + if (UseFastISel) { + // iOS always has a FP for backtracking, force other targets + // to keep their FP when doing FastISel. The emitted code is + // currently superior, and in cases like test-suite's lencod + // FastISel isn't quite correct when FP is eliminated. + TM.Options.NoFramePointerElim = true; + return new ARMFastISel(funcInfo, libInfo); + } + return nullptr; } }