X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FARM%2FARMFastISel.cpp;h=f5c4605e3eab20d42cb8104f15ca315d0dd656cf;hb=61512ba251097888963a8f07a35605564bcfc537;hp=9dc2a153cecfbe99e29c1c134b90dc1ef128fede;hpb=6a880d6ba8e489fc85d18cfbc5f8f6187d438630;p=oota-llvm.git diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 9dc2a153cec..f5c4605e3ea 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -14,6 +14,7 @@ //===----------------------------------------------------------------------===// #include "ARM.h" +#include "ARMAddressingModes.h" #include "ARMBaseInstrInfo.h" #include "ARMCallingConv.h" #include "ARMRegisterInfo.h" @@ -26,6 +27,7 @@ #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" #include "llvm/Module.h" +#include "llvm/Operator.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" @@ -33,7 +35,9 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" @@ -46,12 +50,37 @@ using namespace llvm; static cl::opt -EnableARMFastISel("arm-fast-isel", - cl::desc("Turn on experimental ARM fast-isel support"), - cl::init(false), cl::Hidden); +DisableARMFastISel("disable-arm-fast-isel", + cl::desc("Turn off experimental ARM fast-isel support"), + cl::init(false), cl::Hidden); + +extern cl::opt EnableARMLongCalls; namespace { + // All possible address modes, plus some. + typedef struct Address { + enum { + RegBase, + FrameIndexBase + } BaseType; + + union { + unsigned Reg; + int FI; + } Base; + + int Offset; + unsigned Scale; + unsigned PlusReg; + + // Innocuous defaults for our address. + Address() + : BaseType(RegBase), Offset(0), Scale(0), PlusReg(0) { + Base.Reg = 0; + } + } Address; + class ARMFastISel : public FastISel { /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can @@ -88,6 +117,11 @@ class ARMFastISel : public FastISel { const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill); + virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill, + unsigned Op2, bool Op2IsKill); virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, @@ -96,14 +130,18 @@ class ARMFastISel : public FastISel { const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, const ConstantFP *FPImm); - virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - uint64_t Imm); virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm); + virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + uint64_t Imm); + virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + uint64_t Imm1, uint64_t Imm2); + virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx); @@ -117,54 +155,60 @@ class ARMFastISel : public FastISel { // Instruction selection routines. private: - virtual bool SelectLoad(const Instruction *I); - virtual bool SelectStore(const Instruction *I); - virtual bool SelectBranch(const Instruction *I); - virtual bool SelectCmp(const Instruction *I); - virtual bool SelectFPExt(const Instruction *I); - virtual bool SelectFPTrunc(const Instruction *I); - virtual bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); - virtual bool SelectSIToFP(const Instruction *I); - virtual bool SelectFPToSI(const Instruction *I); - virtual bool SelectSDiv(const Instruction *I); - virtual bool SelectSRem(const Instruction *I); - virtual bool SelectCall(const Instruction *I); - virtual bool SelectSelect(const Instruction *I); + bool SelectLoad(const Instruction *I); + bool SelectStore(const Instruction *I); + bool SelectBranch(const Instruction *I); + bool SelectCmp(const Instruction *I); + bool SelectFPExt(const Instruction *I); + bool SelectFPTrunc(const Instruction *I); + bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); + bool SelectSIToFP(const Instruction *I); + bool SelectFPToSI(const Instruction *I); + bool SelectSDiv(const Instruction *I); + bool SelectSRem(const Instruction *I); + bool SelectCall(const Instruction *I); + bool SelectSelect(const Instruction *I); + bool SelectRet(const Instruction *I); // Utility routines. private: - bool isTypeLegal(const Type *Ty, EVT &VT); - bool isLoadTypeLegal(const Type *Ty, EVT &VT); - bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset); - bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset); - bool ARMLoadAlloca(const Instruction *I, EVT VT); - bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT); - bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset); + bool isTypeLegal(const Type *Ty, MVT &VT); + bool isLoadTypeLegal(const Type *Ty, MVT &VT); + bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); + bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); + bool ARMComputeAddress(const Value *Obj, Address &Addr); + void ARMSimplifyAddress(Address &Addr, EVT VT); unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); unsigned ARMMaterializeInt(const Constant *C, EVT VT); unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); + unsigned ARMSelectCallOp(const GlobalValue *GV); // Call handling routines. private: + bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, + unsigned &ResultReg); CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); - bool ProcessCallArgs(SmallVectorImpl &Args, + bool ProcessCallArgs(SmallVectorImpl &Args, SmallVectorImpl &ArgRegs, - SmallVectorImpl &ArgVTs, + SmallVectorImpl &ArgVTs, SmallVectorImpl &ArgFlags, SmallVectorImpl &RegArgs, CallingConv::ID CC, unsigned &NumBytes); - bool FinishCall(EVT RetVT, SmallVectorImpl &UsedRegs, + bool FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, const Instruction *I, CallingConv::ID CC, unsigned &NumBytes); bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); // OptionalDef handling routines. private: + bool isARMNEONPred(const MachineInstr *MI); bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); + void AddLoadStoreOperands(EVT VT, Address &Addr, + const MachineInstrBuilder &MIB); }; } // end anonymous namespace @@ -189,14 +233,34 @@ bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { return true; } +bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { + const TargetInstrDesc &TID = MI->getDesc(); + + // If we're a thumb2 or not NEON function we were handled via isPredicable. + if ((TID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || + AFI->isThumb2Function()) + return false; + + for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) + if (TID.OpInfo[i].isPredicate()) + return true; + + return false; +} + // If the machine is predicable go ahead and add the predicate operands, if // it needs default CC operands add those. +// TODO: If we want to support thumb1 then we'll need to deal with optional +// CPSR defs that need to be added before the remaining operands. See s_cc_out +// for descriptions why. const MachineInstrBuilder & ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { MachineInstr *MI = &*MIB; - // Do we use a predicate? - if (TII.isPredicable(MI)) + // Do we use a predicate? or... + // Are we NEON in ARM mode and have a predicate operand? If so, I know + // we're not predicable but add it anyways. + if (TII.isPredicable(MI) || isARMNEONPred(MI)) AddDefaultPred(MIB); // Do we optionally set a predicate? Preds is size > 0 iff the predicate @@ -261,6 +325,31 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, return ResultReg; } +unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill, + unsigned Op2, bool Op2IsKill) { + unsigned ResultReg = createResultReg(RC); + const TargetInstrDesc &II = TII.get(MachineInstOpcode); + + if (II.getNumDefs() >= 1) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + .addReg(Op0, Op0IsKill * RegState::Kill) + .addReg(Op1, Op1IsKill * RegState::Kill) + .addReg(Op2, Op2IsKill * RegState::Kill)); + else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addReg(Op0, Op0IsKill * RegState::Kill) + .addReg(Op1, Op1IsKill * RegState::Kill) + .addReg(Op2, Op2IsKill * RegState::Kill)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(TargetOpcode::COPY), ResultReg) + .addReg(II.ImplicitDefs[0])); + } + return ResultReg; +} + unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, @@ -349,6 +438,26 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, return ResultReg; } +unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + uint64_t Imm1, uint64_t Imm2) { + unsigned ResultReg = createResultReg(RC); + const TargetInstrDesc &II = TII.get(MachineInstOpcode); + + if (II.getNumDefs() >= 1) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + .addImm(Imm1).addImm(Imm2)); + else { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addImm(Imm1).addImm(Imm2)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(TargetOpcode::COPY), + ResultReg) + .addReg(II.ImplicitDefs[0])); + } + return ResultReg; +} + unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx) { @@ -364,8 +473,8 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, // TODO: Don't worry about 64-bit now, but when this is fixed remove the // checks from the various callers. unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { - if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0; - + if (VT == MVT::f64) return 0; + unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::VMOVRS), MoveReg) @@ -374,8 +483,8 @@ unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { } unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { - if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0; - + if (VT == MVT::i64) return 0; + unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::VMOVSR), MoveReg) @@ -388,7 +497,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { // the combined constant into an FP reg. unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { const APFloat Val = CFP->getValueAPF(); - bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64; + bool is64bit = VT == MVT::f64; // This checks to see if we can use VFP3 instructions to materialize // a constant, otherwise we have to go through the constant pool. @@ -400,10 +509,10 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { .addFPImm(CFP)); return DestReg; } - + // Require VFP2 for loading fp constants. if (!Subtarget->hasVFP2()) return false; - + // MachineConstantPool wants an explicit alignment. unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); if (Align == 0) { @@ -413,7 +522,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { unsigned Idx = MCP.getConstantPoolIndex(cast(CFP), Align); unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; - + // The extra reg is for addrmode5. AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) @@ -423,10 +532,23 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { } unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { - + // For now 32-bit only. - if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; - + if (VT != MVT::i32) return false; + + unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); + + // If we can do this in a single instruction without a constant pool entry + // do so now. + const ConstantInt *CI = cast(C); + if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { + unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), DestReg) + .addImm(CI->getSExtValue())); + return DestReg; + } + // MachineConstantPool wants an explicit alignment. unsigned Align = TD.getPrefTypeAlignment(C->getType()); if (Align == 0) { @@ -434,48 +556,47 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { Align = TD.getTypeAllocSize(C->getType()); } unsigned Idx = MCP.getConstantPoolIndex(C, Align); - unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); - + if (isThumb) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::t2LDRpci), DestReg) .addConstantPoolIndex(Idx)); else - // The extra reg and immediate are for addrmode2. + // The extra immediate is for addrmode2. AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), DestReg) .addConstantPoolIndex(Idx) - .addReg(0).addImm(0)); + .addImm(0)); return DestReg; } unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { // For now 32-bit only. - if (VT.getSimpleVT().SimpleTy != MVT::i32) return 0; - + if (VT != MVT::i32) return 0; + Reloc::Model RelocM = TM.getRelocationModel(); - + // TODO: No external globals for now. if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; - + // TODO: Need more magic for ARM PIC. if (!isThumb && (RelocM == Reloc::PIC_)) return 0; - + // MachineConstantPool wants an explicit alignment. unsigned Align = TD.getPrefTypeAlignment(GV->getType()); if (Align == 0) { // TODO: Figure out if this is correct. Align = TD.getTypeAllocSize(GV->getType()); } - + // Grab index. unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); - unsigned Id = AFI->createConstPoolEntryUId(); + unsigned Id = AFI->createPICLabelUId(); ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, ARMCP::CPValue, PCAdj); unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); - + // Load value. MachineInstrBuilder MIB; unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); @@ -486,11 +607,11 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { if (RelocM == Reloc::PIC_) MIB.addImm(Id); } else { - // The extra reg and immediate are for addrmode2. + // The extra immediate is for addrmode2. MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), DestReg) .addConstantPoolIndex(Idx) - .addReg(0).addImm(0); + .addImm(0); } AddOptionalDefs(MIB); return DestReg; @@ -508,17 +629,17 @@ unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { return ARMMaterializeGV(GV, VT); else if (isa(C)) return ARMMaterializeInt(C, VT); - + return 0; } unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { // Don't handle dynamic allocas. if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; - - EVT VT; - if (!isTypeLegal(AI->getType(), VT)) return false; - + + MVT VT; + if (!isLoadTypeLegal(AI->getType(), VT)) return false; + DenseMap::iterator SI = FuncInfo.StaticAllocaMap.find(AI); @@ -534,22 +655,23 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { .addImm(0)); return ResultReg; } - + return 0; } -bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) { - VT = TLI.getValueType(Ty, true); +bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) { + EVT evt = TLI.getValueType(Ty, true); // Only handle simple types. - if (VT == MVT::Other || !VT.isSimple()) return false; + if (evt == MVT::Other || !evt.isSimple()) return false; + VT = evt.getSimpleVT(); // Handle all legal types, i.e. a register that will directly hold this // value. return TLI.isTypeLegal(VT); } -bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) { +bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) { if (isTypeLegal(Ty, VT)) return true; // If this is a type than can be sign or zero-extended to a basic operation @@ -560,20 +682,19 @@ bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) { return false; } -// Computes the Reg+Offset to get to an object. -bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg, - int &Offset) { +// Computes the address to get to an object. +bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { // Some boilerplate from the X86 FastISel. const User *U = NULL; unsigned Opcode = Instruction::UserOp1; if (const Instruction *I = dyn_cast(Obj)) { - // Don't walk into other basic blocks; it's possible we haven't - // visited them yet, so the instructions may not yet be assigned - // virtual registers. - if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) - return false; - Opcode = I->getOpcode(); - U = I; + // Don't walk into other basic blocks unless the object is an alloca from + // another block, otherwise it may not have a virtual register assigned. + if (FuncInfo.StaticAllocaMap.count(static_cast(Obj)) || + FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { + Opcode = I->getOpcode(); + U = I; + } } else if (const ConstantExpr *C = dyn_cast(Obj)) { Opcode = C->getOpcode(); U = C; @@ -588,208 +709,283 @@ bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg, switch (Opcode) { default: break; + case Instruction::BitCast: { + // Look through bitcasts. + return ARMComputeAddress(U->getOperand(0), Addr); + } + case Instruction::IntToPtr: { + // Look past no-op inttoptrs. + if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) + return ARMComputeAddress(U->getOperand(0), Addr); + break; + } + case Instruction::PtrToInt: { + // Look past no-op ptrtoints. + if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) + return ARMComputeAddress(U->getOperand(0), Addr); + break; + } + case Instruction::GetElementPtr: { + Address SavedAddr = Addr; + int TmpOffset = Addr.Offset; + + // Iterate through the GEP folding the constants into offsets where + // we can. + gep_type_iterator GTI = gep_type_begin(U); + for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); + i != e; ++i, ++GTI) { + const Value *Op = *i; + if (const StructType *STy = dyn_cast(*GTI)) { + const StructLayout *SL = TD.getStructLayout(STy); + unsigned Idx = cast(Op)->getZExtValue(); + TmpOffset += SL->getElementOffset(Idx); + } else { + uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); + for (;;) { + if (const ConstantInt *CI = dyn_cast(Op)) { + // Constant-offset addressing. + TmpOffset += CI->getSExtValue() * S; + break; + } + if (isa(Op) && + (!isa(Op) || + FuncInfo.MBBMap[cast(Op)->getParent()] + == FuncInfo.MBB) && + isa(cast(Op)->getOperand(1))) { + // An add (in the same block) with a constant operand. Fold the + // constant. + ConstantInt *CI = + cast(cast(Op)->getOperand(1)); + TmpOffset += CI->getSExtValue() * S; + // Iterate on the other operand. + Op = cast(Op)->getOperand(0); + continue; + } + // Unsupported + goto unsupported_gep; + } + } + } + + // Try to grab the base operand now. + Addr.Offset = TmpOffset; + if (ARMComputeAddress(U->getOperand(0), Addr)) return true; + + // We failed, restore everything and try the other options. + Addr = SavedAddr; + + unsupported_gep: + break; + } case Instruction::Alloca: { - assert(false && "Alloca should have been handled earlier!"); - return false; + const AllocaInst *AI = cast(Obj); + DenseMap::iterator SI = + FuncInfo.StaticAllocaMap.find(AI); + if (SI != FuncInfo.StaticAllocaMap.end()) { + Addr.BaseType = Address::FrameIndexBase; + Addr.Base.FI = SI->second; + return true; + } + break; } } - // FIXME: Handle global variables. + // Materialize the global variable's address into a reg which can + // then be used later to load the variable. if (const GlobalValue *GV = dyn_cast(Obj)) { - (void)GV; - return false; + unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); + if (Tmp == 0) return false; + + Addr.Base.Reg = Tmp; + return true; } // Try to get this in a register if nothing else has worked. - Reg = getRegForValue(Obj); - if (Reg == 0) return false; + if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); + return Addr.Base.Reg != 0; +} + +void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { + + assert(VT.isSimple() && "Non-simple types are invalid here!"); + + bool needsLowering = false; + switch (VT.getSimpleVT().SimpleTy) { + default: + assert(false && "Unhandled load/store type!"); + case MVT::i1: + case MVT::i8: + case MVT::i16: + case MVT::i32: + // Integer loads/stores handle 12-bit offsets. + needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); + break; + case MVT::f32: + case MVT::f64: + // Floating point operands handle 8-bit offsets. + needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); + break; + } - // Since the offset may be too large for the load instruction + // If this is a stack pointer and the offset needs to be simplified then + // put the alloca address into a register, set the base type back to + // register and continue. This should almost never happen. + if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { + TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : + ARM::GPRRegisterClass; + unsigned ResultReg = createResultReg(RC); + unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; + AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg) + .addFrameIndex(Addr.Base.FI) + .addImm(0)); + Addr.Base.Reg = ResultReg; + Addr.BaseType = Address::RegBase; + } + + // Since the offset is too large for the load/store instruction // get the reg+offset into a register. - // TODO: Verify the additions work, otherwise we'll need to add the - // offset instead of 0 to the instructions and do all sorts of operand - // munging. - // TODO: Optimize this somewhat. - if (Offset != 0) { - ARMCC::CondCodes Pred = ARMCC::AL; - unsigned PredReg = 0; - - if (!isThumb) - emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - Reg, Reg, Offset, Pred, PredReg, - static_cast(TII)); - else { - assert(AFI->isThumb2Function()); - emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - Reg, Reg, Offset, Pred, PredReg, - static_cast(TII)); - } + if (needsLowering) { + Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, + /*Op0IsKill*/false, Addr.Offset, MVT::i32); + Addr.Offset = 0; } - return true; } -bool ARMFastISel::ARMLoadAlloca(const Instruction *I, EVT VT) { - Value *Op0 = I->getOperand(0); - - // Promote load/store types. - if (VT == MVT::i8 || VT == MVT::i16) VT = MVT::i32; +void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, + const MachineInstrBuilder &MIB) { + // addrmode5 output depends on the selection dag addressing dividing the + // offset by 4 that it then later multiplies. Do this here as well. + if (VT.getSimpleVT().SimpleTy == MVT::f32 || + VT.getSimpleVT().SimpleTy == MVT::f64) + Addr.Offset /= 4; + + // Frame base works a bit differently. Handle it separately. + if (Addr.BaseType == Address::FrameIndexBase) { + int FI = Addr.Base.FI; + int Offset = Addr.Offset; + MachineMemOperand *MMO = + FuncInfo.MF->getMachineMemOperand( + MachinePointerInfo::getFixedStack(FI, Offset), + MachineMemOperand::MOLoad, + MFI.getObjectSize(FI), + MFI.getObjectAlignment(FI)); + // Now add the rest of the operands. + MIB.addFrameIndex(FI); + + // ARM halfword load/stores need an additional operand. + if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); + + MIB.addImm(Addr.Offset); + MIB.addMemOperand(MMO); + } else { + // Now add the rest of the operands. + MIB.addReg(Addr.Base.Reg); - // Verify it's an alloca. - if (const AllocaInst *AI = dyn_cast(Op0)) { - DenseMap::iterator SI = - FuncInfo.StaticAllocaMap.find(AI); + // ARM halfword load/stores need an additional operand. + if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); - if (SI != FuncInfo.StaticAllocaMap.end()) { - TargetRegisterClass* RC = TLI.getRegClassFor(VT); - unsigned ResultReg = createResultReg(RC); - TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt, - ResultReg, SI->second, RC, - TM.getRegisterInfo()); - UpdateValueMap(I, ResultReg); - return true; - } + MIB.addImm(Addr.Offset); } - return false; + AddOptionalDefs(MIB); } -bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, - unsigned Reg, int Offset) { +bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { assert(VT.isSimple() && "Non-simple types are invalid here!"); unsigned Opc; TargetRegisterClass *RC; - bool isFloat = false; switch (VT.getSimpleVT().SimpleTy) { - default: - // This is mostly going to be Neon/vector support. - return false; + // This is mostly going to be Neon/vector support. + default: return false; case MVT::i16: - Opc = isThumb ? ARM::t2LDRHi8 : ARM::LDRH; + Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; RC = ARM::GPRRegisterClass; - VT = MVT::i32; break; case MVT::i8: - Opc = isThumb ? ARM::t2LDRBi8 : ARM::LDRB; + Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; RC = ARM::GPRRegisterClass; - VT = MVT::i32; break; case MVT::i32: - Opc = isThumb ? ARM::t2LDRi8 : ARM::LDR; + Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; RC = ARM::GPRRegisterClass; break; case MVT::f32: Opc = ARM::VLDRS; RC = TLI.getRegClassFor(VT); - isFloat = true; break; case MVT::f64: Opc = ARM::VLDRD; RC = TLI.getRegClassFor(VT); - isFloat = true; break; } + // Simplify this down to something we can handle. + ARMSimplifyAddress(Addr, VT); + // Create the base instruction, then add the operands. ResultReg = createResultReg(RC); - - // For now with the additions above the offset should be zero - thus we - // can always fit into an i8. - assert(Offset == 0 && "Offset not zero!"); - - // The thumb and floating point instructions both take 2 operands, ARM takes - // another register. - if (isFloat || isThumb) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg) - .addReg(Reg).addImm(Offset)); - else - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg) - .addReg(Reg).addReg(0).addImm(Offset)); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg); + AddLoadStoreOperands(VT, Addr, MIB); return true; } bool ARMFastISel::SelectLoad(const Instruction *I) { // Verify we have a legal type before going any further. - EVT VT; + MVT VT; if (!isLoadTypeLegal(I->getType(), VT)) return false; - // If we're an alloca we know we have a frame index and can emit the load - // directly in short order. - if (ARMLoadAlloca(I, VT)) - return true; - - // Our register and offset with innocuous defaults. - unsigned Reg = 0; - int Offset = 0; - - // See if we can handle this as Reg + Offset - if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset)) - return false; + // See if we can handle this address. + Address Addr; + if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; unsigned ResultReg; - if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false; - + if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; UpdateValueMap(I, ResultReg); return true; } -bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT){ - Value *Op1 = I->getOperand(1); - - // Promote load/store types. - if (VT == MVT::i8 || VT == MVT::i16) VT = MVT::i32; - - // Verify it's an alloca. - if (const AllocaInst *AI = dyn_cast(Op1)) { - DenseMap::iterator SI = - FuncInfo.StaticAllocaMap.find(AI); - - if (SI != FuncInfo.StaticAllocaMap.end()) { - TargetRegisterClass* RC = TLI.getRegClassFor(VT); - assert(SrcReg != 0 && "Nothing to store!"); - TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt, - SrcReg, true /*isKill*/, SI->second, RC, - TM.getRegisterInfo()); - return true; - } - } - return false; -} - -bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, - unsigned DstReg, int Offset) { +bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { unsigned StrOpc; - bool isFloat = false; switch (VT.getSimpleVT().SimpleTy) { + // This is mostly going to be Neon/vector support. default: return false; - case MVT::i1: - case MVT::i8: StrOpc = isThumb ? ARM::t2STRBi8 : ARM::STRB; break; - case MVT::i16: StrOpc = isThumb ? ARM::t2STRHi8 : ARM::STRH; break; - case MVT::i32: StrOpc = isThumb ? ARM::t2STRi8 : ARM::STR; break; + case MVT::i1: { + unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : + ARM::GPRRegisterClass); + unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), Res) + .addReg(SrcReg).addImm(1)); + SrcReg = Res; + } // Fallthrough here. + case MVT::i8: + StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; + break; + case MVT::i16: + StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; + break; + case MVT::i32: + StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; + break; case MVT::f32: if (!Subtarget->hasVFP2()) return false; StrOpc = ARM::VSTRS; - isFloat = true; break; case MVT::f64: if (!Subtarget->hasVFP2()) return false; StrOpc = ARM::VSTRD; - isFloat = true; break; } - - // The thumb addressing mode has operands swapped from the arm addressing - // mode, the floating point one only has two operands. - if (isFloat || isThumb) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(StrOpc)) - .addReg(SrcReg).addReg(DstReg).addImm(Offset)); - else - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(StrOpc)) - .addReg(SrcReg).addReg(DstReg).addReg(0).addImm(Offset)); - + // Simplify this down to something we can handle. + ARMSimplifyAddress(Addr, VT); + + // Create the base instruction, then add the operands. + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(StrOpc)) + .addReg(SrcReg, getKillRegState(true)); + AddLoadStoreOperands(VT, Addr, MIB); return true; } @@ -797,31 +993,21 @@ bool ARMFastISel::SelectStore(const Instruction *I) { Value *Op0 = I->getOperand(0); unsigned SrcReg = 0; - // Yay type legalization - EVT VT; + // Verify we have a legal type before going any further. + MVT VT; if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) return false; // Get the value to be stored into a register. SrcReg = getRegForValue(Op0); - if (SrcReg == 0) - return false; - - // If we're an alloca we know we have a frame index and can emit the store - // quickly. - if (ARMStoreAlloca(I, SrcReg, VT)) - return true; - - // Our register and offset with innocuous defaults. - unsigned Reg = 0; - int Offset = 0; + if (SrcReg == 0) return false; - // See if we can handle this as Reg + Offset - if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset)) + // See if we can handle this address. + Address Addr; + if (!ARMComputeAddress(I->getOperand(1), Addr)) return false; - if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false; - + if (!ARMEmitStore(VT, SrcReg, Addr)) return false; return true; } @@ -829,9 +1015,9 @@ static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { switch (Pred) { // Needs two compares... case CmpInst::FCMP_ONE: - case CmpInst::FCMP_UEQ: + case CmpInst::FCMP_UEQ: default: - assert(false && "Unhandled CmpInst::Predicate!"); + // AL is our "false" for now. The other two need more compares. return ARMCC::AL; case CmpInst::ICMP_EQ: case CmpInst::FCMP_OEQ: @@ -858,7 +1044,7 @@ static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { return ARMCC::PL; case CmpInst::ICMP_SLT: case CmpInst::FCMP_ULT: - return ARMCC::LT; + return ARMCC::LT; case CmpInst::ICMP_SLE: case CmpInst::FCMP_ULE: return ARMCC::LE; @@ -878,27 +1064,128 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; // Simple branch support. - // TODO: Try to avoid the re-computation in some places. - unsigned CondReg = getRegForValue(BI->getCondition()); - if (CondReg == 0) return false; - // Re-set the flags just in case. - unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) - .addReg(CondReg).addImm(1)); - + // If we can, avoid recomputing the compare - redoing it could lead to wonky + // behavior. + // TODO: Factor this out. + if (const CmpInst *CI = dyn_cast(BI->getCondition())) { + MVT SourceVT; + const Type *Ty = CI->getOperand(0)->getType(); + if (CI->hasOneUse() && (CI->getParent() == I->getParent()) + && isTypeLegal(Ty, SourceVT)) { + bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); + if (isFloat && !Subtarget->hasVFP2()) + return false; + + unsigned CmpOpc; + switch (SourceVT.SimpleTy) { + default: return false; + // TODO: Verify compares. + case MVT::f32: + CmpOpc = ARM::VCMPES; + break; + case MVT::f64: + CmpOpc = ARM::VCMPED; + break; + case MVT::i32: + CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; + break; + } + + // Get the compare predicate. + // Try to take advantage of fallthrough opportunities. + CmpInst::Predicate Predicate = CI->getPredicate(); + if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { + std::swap(TBB, FBB); + Predicate = CmpInst::getInversePredicate(Predicate); + } + + ARMCC::CondCodes ARMPred = getComparePred(Predicate); + + // We may not handle every CC for now. + if (ARMPred == ARMCC::AL) return false; + + unsigned Arg1 = getRegForValue(CI->getOperand(0)); + if (Arg1 == 0) return false; + + unsigned Arg2 = getRegForValue(CI->getOperand(1)); + if (Arg2 == 0) return false; + + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(CmpOpc)) + .addReg(Arg1).addReg(Arg2)); + + // For floating point we need to move the result to a comparison register + // that we can then use for branches. + if (isFloat) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(ARM::FMSTAT))); + + unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) + .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); + FastEmitBranch(FBB, DL); + FuncInfo.MBB->addSuccessor(TBB); + return true; + } + } else if (TruncInst *TI = dyn_cast(BI->getCondition())) { + MVT SourceVT; + if (TI->hasOneUse() && TI->getParent() == I->getParent() && + (isTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { + unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; + unsigned OpReg = getRegForValue(TI->getOperand(0)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(TstOpc)) + .addReg(OpReg).addImm(1)); + + unsigned CCMode = ARMCC::NE; + if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { + std::swap(TBB, FBB); + CCMode = ARMCC::EQ; + } + + unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) + .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); + + FastEmitBranch(FBB, DL); + FuncInfo.MBB->addSuccessor(TBB); + return true; + } + } + + unsigned CmpReg = getRegForValue(BI->getCondition()); + if (CmpReg == 0) return false; + + // We've been divorced from our compare! Our block was split, and + // now our compare lives in a predecessor block. We musn't + // re-compare here, as the children of the compare aren't guaranteed + // live across the block boundary (we *could* check for this). + // Regardless, the compare has been done in the predecessor block, + // and it left a value for us in a virtual register. Ergo, we test + // the one-bit value left in the virtual register. + unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) + .addReg(CmpReg).addImm(1)); + + unsigned CCMode = ARMCC::NE; + if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { + std::swap(TBB, FBB); + CCMode = ARMCC::EQ; + } + unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) - .addMBB(TBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); + .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); FastEmitBranch(FBB, DL); FuncInfo.MBB->addSuccessor(TBB); - return true; + return true; } bool ARMFastISel::SelectCmp(const Instruction *I) { const CmpInst *CI = cast(I); - EVT VT; + MVT VT; const Type *Ty = CI->getOperand(0)->getType(); if (!isTypeLegal(Ty, VT)) return false; @@ -909,7 +1196,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { unsigned CmpOpc; unsigned CondReg; - switch (VT.getSimpleVT().SimpleTy) { + switch (VT.SimpleTy) { default: return false; // TODO: Verify compares. case MVT::f32: @@ -928,7 +1215,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { // Get the compare predicate. ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); - + // We may not handle every CC for now. if (ARMPred == ARMCC::AL) return false; @@ -950,10 +1237,10 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { // Now set a register based on the comparison. Explicitly set the predicates // here. unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; - TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass + TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass; unsigned DestReg = createResultReg(RC); - Constant *Zero + Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); unsigned ZeroReg = TargetMaterializeConstant(Zero); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) @@ -1005,25 +1292,25 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) { bool ARMFastISel::SelectSIToFP(const Instruction *I) { // Make sure we have VFP. if (!Subtarget->hasVFP2()) return false; - - EVT DstVT; + + MVT DstVT; const Type *Ty = I->getType(); if (!isTypeLegal(Ty, DstVT)) return false; - + unsigned Op = getRegForValue(I->getOperand(0)); if (Op == 0) return false; - + // The conversion routine works on fp-reg to fp-reg and the operand above // was an integer, move it to the fp registers if possible. unsigned FP = ARMMoveToFPReg(MVT::f32, Op); if (FP == 0) return false; - + unsigned Opc; if (Ty->isFloatTy()) Opc = ARM::VSITOS; else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; else return 0; - + unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) @@ -1035,43 +1322,43 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) { bool ARMFastISel::SelectFPToSI(const Instruction *I) { // Make sure we have VFP. if (!Subtarget->hasVFP2()) return false; - - EVT DstVT; + + MVT DstVT; const Type *RetTy = I->getType(); if (!isTypeLegal(RetTy, DstVT)) return false; - + unsigned Op = getRegForValue(I->getOperand(0)); if (Op == 0) return false; - + unsigned Opc; const Type *OpTy = I->getOperand(0)->getType(); if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; else return 0; - + // f64->s32 or f32->s32 both need an intermediate f32 reg. unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) .addReg(Op)); - + // This result needs to be in an integer register, but the conversion only // takes place in fp-regs. unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); if (IntReg == 0) return false; - + UpdateValueMap(I, IntReg); return true; } bool ARMFastISel::SelectSelect(const Instruction *I) { - EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); - if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) + MVT VT; + if (!isTypeLegal(I->getType(), VT)) return false; // Things need to be register sized for register moves. - if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; + if (VT != MVT::i32) return false; const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned CondReg = getRegForValue(I->getOperand(0)); @@ -1094,7 +1381,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) { } bool ARMFastISel::SelectSDiv(const Instruction *I) { - EVT VT; + MVT VT; const Type *Ty = I->getType(); if (!isTypeLegal(Ty, VT)) return false; @@ -1102,8 +1389,8 @@ bool ARMFastISel::SelectSDiv(const Instruction *I) { // If we have integer div support we should have selected this automagically. // In case we have a real miss go ahead and return false and we'll pick // it up later. - if (Subtarget->hasDivide()) return false; - + if (Subtarget->hasDivide()) return false; + // Otherwise emit a libcall. RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; if (VT == MVT::i8) @@ -1117,12 +1404,12 @@ bool ARMFastISel::SelectSDiv(const Instruction *I) { else if (VT == MVT::i128) LC = RTLIB::SDIV_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); - + return ARMEmitLibcall(I, LC); } bool ARMFastISel::SelectSRem(const Instruction *I) { - EVT VT; + MVT VT; const Type *Ty = I->getType(); if (!isTypeLegal(Ty, VT)) return false; @@ -1138,8 +1425,8 @@ bool ARMFastISel::SelectSRem(const Instruction *I) { LC = RTLIB::SREM_I64; else if (VT == MVT::i128) LC = RTLIB::SREM_I128; - assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); - + assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); + return ARMEmitLibcall(I, LC); } @@ -1162,8 +1449,7 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { if (Op2 == 0) return false; unsigned Opc; - bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 || - VT.getSimpleVT().SimpleTy == MVT::i64; + bool is64bit = VT == MVT::f64 || VT == MVT::i64; switch (ISDOpcode) { default: return false; case ISD::FADD: @@ -1186,6 +1472,18 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { // Call Handling Code +bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, + EVT SrcVT, unsigned &ResultReg) { + unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, + Src, /*TODO: Kill=*/false); + + if (RR != 0) { + ResultReg = RR; + return true; + } else + return false; +} + // This is largely taken directly from CCAssignFnForNode - we don't support // varargs in FastISel so that part has been removed. // TODO: We may not support all of this. @@ -1193,8 +1491,12 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { switch (CC) { default: llvm_unreachable("Unsupported calling convention"); - case CallingConv::C: case CallingConv::Fast: + // Ignore fastcc. Silence compiler warnings. + (void)RetFastCC_ARM_APCS; + (void)FastCC_ARM_APCS; + // Fallthrough + case CallingConv::C: // Use target triple & subtarget features to do actual dispatch. if (Subtarget->isAAPCS_ABI()) { if (Subtarget->hasVFP2() && @@ -1215,7 +1517,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, SmallVectorImpl &ArgRegs, - SmallVectorImpl &ArgVTs, + SmallVectorImpl &ArgVTs, SmallVectorImpl &ArgFlags, SmallVectorImpl &RegArgs, CallingConv::ID CC, @@ -1229,79 +1531,133 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, // Issue CALLSEQ_START unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown)) - .addImm(NumBytes); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(AdjStackDown)) + .addImm(NumBytes)); // Process the args. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; unsigned Arg = ArgRegs[VA.getValNo()]; - EVT ArgVT = ArgVTs[VA.getValNo()]; + MVT ArgVT = ArgVTs[VA.getValNo()]; + + // We don't handle NEON/vector parameters yet. + if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) + return false; // Handle arg promotion, etc. switch (VA.getLocInfo()) { case CCValAssign::Full: break; - default: - // TODO: Handle arg promotion. - return false; + case CCValAssign::SExt: { + bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), + Arg, ArgVT, Arg); + assert(Emitted && "Failed to emit a sext!"); (void)Emitted; + Emitted = true; + ArgVT = VA.getLocVT(); + break; + } + case CCValAssign::ZExt: { + bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), + Arg, ArgVT, Arg); + assert(Emitted && "Failed to emit a zext!"); (void)Emitted; + Emitted = true; + ArgVT = VA.getLocVT(); + break; + } + case CCValAssign::AExt: { + bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), + Arg, ArgVT, Arg); + if (!Emitted) + Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), + Arg, ArgVT, Arg); + if (!Emitted) + Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), + Arg, ArgVT, Arg); + + assert(Emitted && "Failed to emit a aext!"); (void)Emitted; + ArgVT = VA.getLocVT(); + break; + } + case CCValAssign::BCvt: { + unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, + /*TODO: Kill=*/false); + assert(BC != 0 && "Failed to emit a bitcast!"); + Arg = BC; + ArgVT = VA.getLocVT(); + break; + } + default: llvm_unreachable("Unknown arg promotion!"); } // Now copy/store arg to correct locations. - if (VA.isRegLoc()) { + if (VA.isRegLoc() && !VA.needsCustom()) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), VA.getLocReg()) .addReg(Arg); RegArgs.push_back(VA.getLocReg()); + } else if (VA.needsCustom()) { + // TODO: We need custom lowering for vector (v2f64) args. + if (VA.getLocVT() != MVT::f64) return false; + + CCValAssign &NextVA = ArgLocs[++i]; + + // TODO: Only handle register args for now. + if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; + + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(ARM::VMOVRRD), VA.getLocReg()) + .addReg(NextVA.getLocReg(), RegState::Define) + .addReg(Arg)); + RegArgs.push_back(VA.getLocReg()); + RegArgs.push_back(NextVA.getLocReg()); } else { - // Need to store - return false; + assert(VA.isMemLoc()); + // Need to store on the stack. + Address Addr; + Addr.BaseType = Address::RegBase; + Addr.Base.Reg = ARM::SP; + Addr.Offset = VA.getLocMemOffset(); + + if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; } } - return true; } -bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl &UsedRegs, +bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, const Instruction *I, CallingConv::ID CC, unsigned &NumBytes) { // Issue CALLSEQ_END unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) - .addImm(NumBytes).addImm(0); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(AdjStackUp)) + .addImm(NumBytes).addImm(0)); // Now the return value. - if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) { + if (RetVT != MVT::isVoid) { SmallVector RVLocs; CCState CCInfo(CC, false, TM, RVLocs, *Context); CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); // Copy all of the result registers out of their specified physreg. - if (RVLocs.size() == 2 && RetVT.getSimpleVT().SimpleTy == MVT::f64) { + if (RVLocs.size() == 2 && RetVT == MVT::f64) { // For this move we copy into two registers and then move into the // double fp reg we want. - // TODO: Are the copies necessary? - TargetRegisterClass *CopyRC = TLI.getRegClassFor(MVT::i32); - unsigned Copy1 = createResultReg(CopyRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), - Copy1).addReg(RVLocs[0].getLocReg()); - UsedRegs.push_back(RVLocs[0].getLocReg()); - - unsigned Copy2 = createResultReg(CopyRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), - Copy2).addReg(RVLocs[1].getLocReg()); - UsedRegs.push_back(RVLocs[1].getLocReg()); - EVT DestVT = RVLocs[0].getValVT(); TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); unsigned ResultReg = createResultReg(DstRC); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::VMOVDRR), ResultReg) - .addReg(Copy1).addReg(Copy2)); - - // Finally update the result. + .addReg(RVLocs[0].getLocReg()) + .addReg(RVLocs[1].getLocReg())); + + UsedRegs.push_back(RVLocs[0].getLocReg()); + UsedRegs.push_back(RVLocs[1].getLocReg()); + + // Finally update the result. UpdateValueMap(I, ResultReg); } else { - assert(RVLocs.size() == 1 && "Can't handle non-double multi-reg retvals!"); + assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); EVT CopyVT = RVLocs[0].getValVT(); TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); @@ -1310,39 +1666,113 @@ bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl &UsedRegs, ResultReg).addReg(RVLocs[0].getLocReg()); UsedRegs.push_back(RVLocs[0].getLocReg()); - // Finally update the result. + // Finally update the result. UpdateValueMap(I, ResultReg); } } - return true; + return true; +} + +bool ARMFastISel::SelectRet(const Instruction *I) { + const ReturnInst *Ret = cast(I); + const Function &F = *I->getParent()->getParent(); + + if (!FuncInfo.CanLowerReturn) + return false; + + if (F.isVarArg()) + return false; + + CallingConv::ID CC = F.getCallingConv(); + if (Ret->getNumOperands() > 0) { + SmallVector Outs; + GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), + Outs, TLI); + + // Analyze operands of the call, assigning locations to each operand. + SmallVector ValLocs; + CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); + CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); + + const Value *RV = Ret->getOperand(0); + unsigned Reg = getRegForValue(RV); + if (Reg == 0) + return false; + + // Only handle a single return value for now. + if (ValLocs.size() != 1) + return false; + + CCValAssign &VA = ValLocs[0]; + + // Don't bother handling odd stuff for now. + if (VA.getLocInfo() != CCValAssign::Full) + return false; + // Only handle register returns for now. + if (!VA.isRegLoc()) + return false; + // TODO: For now, don't try to handle cases where getLocInfo() + // says Full but the types don't match. + if (TLI.getValueType(RV->getType()) != VA.getValVT()) + return false; + + // Make the copy. + unsigned SrcReg = Reg + VA.getValNo(); + unsigned DstReg = VA.getLocReg(); + const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); + // Avoid a cross-class copy. This is very unlikely. + if (!SrcRC->contains(DstReg)) + return false; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + DstReg).addReg(SrcReg); + + // Mark the register as live out of the function. + MRI.addLiveOut(VA.getLocReg()); + } + + unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(RetOpc))); + return true; +} + +unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { + + // Darwin needs the r9 versions of the opcodes. + bool isDarwin = Subtarget->isTargetDarwin(); + if (isThumb) { + return isDarwin ? ARM::tBLr9 : ARM::tBL; + } else { + return isDarwin ? ARM::BLr9 : ARM::BL; + } } // A quick function that will emit a call for a named libcall in F with the // vector of passed arguments for the Instruction in I. We can assume that we -// can emit a call for any libcall we can produce. This is an abridged version -// of the full call infrastructure since we won't need to worry about things +// can emit a call for any libcall we can produce. This is an abridged version +// of the full call infrastructure since we won't need to worry about things // like computed function pointers or strange arguments at call sites. // TODO: Try to unify this and the normal call bits for ARM, then try to unify // with X86. bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { CallingConv::ID CC = TLI.getLibcallCallingConv(Call); - + // Handle *simple* calls for now. const Type *RetTy = I->getType(); - EVT RetVT; + MVT RetVT; if (RetTy->isVoidTy()) RetVT = MVT::isVoid; else if (!isTypeLegal(RetTy, RetVT)) return false; - - // For now we're using BLX etc on the assumption that we have v5t ops. - if (!Subtarget->hasV5TOps()) return false; - + + // TODO: For now if we have long calls specified we don't handle the call. + if (EnableARMLongCalls) return false; + // Set up the argument vectors. SmallVector Args; SmallVector ArgRegs; - SmallVector ArgVTs; + SmallVector ArgVTs; SmallVector ArgFlags; Args.reserve(I->getNumOperands()); ArgRegs.reserve(I->getNumOperands()); @@ -1352,49 +1782,53 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { Value *Op = I->getOperand(i); unsigned Arg = getRegForValue(Op); if (Arg == 0) return false; - + const Type *ArgTy = Op->getType(); - EVT ArgVT; + MVT ArgVT; if (!isTypeLegal(ArgTy, ArgVT)) return false; - + ISD::ArgFlagsTy Flags; unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); Flags.setOrigAlign(OriginalAlignment); - + Args.push_back(Op); ArgRegs.push_back(Arg); ArgVTs.push_back(ArgVT); ArgFlags.push_back(Flags); } - + // Handle the arguments now that we've gotten them. SmallVector RegArgs; unsigned NumBytes; if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) return false; - - // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. - // TODO: Turn this into the table of arm call ops. + + // Issue the call, BLr9 for darwin, BL otherwise. + // TODO: Turn this into the table of arm call ops. MachineInstrBuilder MIB; - unsigned CallOpc; + unsigned CallOpc = ARMSelectCallOp(NULL); if(isThumb) - CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; + // Explicitly adding the predicate here. + MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(CallOpc))) + .addExternalSymbol(TLI.getLibcallName(Call)); else - CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) - .addExternalSymbol(TLI.getLibcallName(Call)); - + // Explicitly adding the predicate here. + MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(CallOpc)) + .addExternalSymbol(TLI.getLibcallName(Call))); + // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) MIB.addReg(RegArgs[i]); - + // Finish off the call including any return values. - SmallVector UsedRegs; + SmallVector UsedRegs; if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; - + // Set all unused physreg defs as dead. static_cast(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); - + return true; } @@ -1405,42 +1839,38 @@ bool ARMFastISel::SelectCall(const Instruction *I) { // Can't handle inline asm or worry about intrinsics yet. if (isa(Callee) || isa(CI)) return false; - // Only handle global variable Callees that are direct calls. + // Only handle global variable Callees. const GlobalValue *GV = dyn_cast(Callee); - if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) + if (!GV) return false; - + // Check the calling convention. ImmutableCallSite CS(CI); CallingConv::ID CC = CS.getCallingConv(); + // TODO: Avoid some calling conventions? - if (CC != CallingConv::C) { - // errs() << "Can't handle calling convention: " << CC << "\n"; - return false; - } - + // Let SDISel handle vararg functions. const PointerType *PT = cast(CS.getCalledValue()->getType()); const FunctionType *FTy = cast(PT->getElementType()); if (FTy->isVarArg()) return false; - + // Handle *simple* calls for now. const Type *RetTy = I->getType(); - EVT RetVT; + MVT RetVT; if (RetTy->isVoidTy()) RetVT = MVT::isVoid; else if (!isTypeLegal(RetTy, RetVT)) return false; - - // For now we're using BLX etc on the assumption that we have v5t ops. - // TODO: Maybe? - if (!Subtarget->hasV5TOps()) return false; - + + // TODO: For now if we have long calls specified we don't handle the call. + if (EnableARMLongCalls) return false; + // Set up the argument vectors. SmallVector Args; SmallVector ArgRegs; - SmallVector ArgVTs; + SmallVector ArgVTs; SmallVector ArgFlags; Args.reserve(CS.arg_size()); ArgRegs.reserve(CS.arg_size()); @@ -1449,7 +1879,7 @@ bool ARMFastISel::SelectCall(const Instruction *I) { for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) { unsigned Arg = getRegForValue(*i); - + if (Arg == 0) return false; ISD::ArgFlagsTy Flags; @@ -1467,54 +1897,57 @@ bool ARMFastISel::SelectCall(const Instruction *I) { return false; const Type *ArgTy = (*i)->getType(); - EVT ArgVT; + MVT ArgVT; if (!isTypeLegal(ArgTy, ArgVT)) return false; unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); Flags.setOrigAlign(OriginalAlignment); - + Args.push_back(*i); ArgRegs.push_back(Arg); ArgVTs.push_back(ArgVT); ArgFlags.push_back(Flags); } - + // Handle the arguments now that we've gotten them. SmallVector RegArgs; unsigned NumBytes; if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) return false; - - // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. - // TODO: Turn this into the table of arm call ops. + + // Issue the call, BLr9 for darwin, BL otherwise. + // TODO: Turn this into the table of arm call ops. MachineInstrBuilder MIB; - unsigned CallOpc; + unsigned CallOpc = ARMSelectCallOp(GV); + // Explicitly adding the predicate here. if(isThumb) - CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; + // Explicitly adding the predicate here. + MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(CallOpc))) + .addGlobalAddress(GV, 0, 0); else - CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) - .addGlobalAddress(GV, 0, 0); - + // Explicitly adding the predicate here. + MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(CallOpc)) + .addGlobalAddress(GV, 0, 0)); + // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) MIB.addReg(RegArgs[i]); - + // Finish off the call including any return values. - SmallVector UsedRegs; + SmallVector UsedRegs; if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; - + // Set all unused physreg defs as dead. static_cast(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); - + return true; - + } // TODO: SoftFP support. bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { - // No Thumb-1 for now. - if (isThumb && !AFI->isThumb2Function()) return false; switch (I->getOpcode()) { case Instruction::Load: @@ -1548,6 +1981,8 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { return SelectCall(I); case Instruction::Select: return SelectSelect(I); + case Instruction::Ret: + return SelectRet(I); default: break; } return false; @@ -1555,7 +1990,14 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { namespace llvm { llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { - if (EnableARMFastISel) return new ARMFastISel(funcInfo); + // Completely untested on non-darwin. + const TargetMachine &TM = funcInfo.MF->getTarget(); + + // Darwin and thumb1 only for now. + const ARMSubtarget *Subtarget = &TM.getSubtarget(); + if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && + !DisableARMFastISel) + return new ARMFastISel(funcInfo); return 0; } }