1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the ARM-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // ARMGenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMRegisterInfo.h"
19 #include "ARMTargetMachine.h"
20 #include "ARMSubtarget.h"
21 #include "llvm/CallingConv.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/CodeGen/FastISel.h"
28 #include "llvm/CodeGen/FunctionLoweringInfo.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineModuleInfo.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/Support/CallSite.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/GetElementPtrTypeIterator.h"
38 #include "llvm/Target/TargetData.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetLowering.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include "llvm/Target/TargetOptions.h"
46 EnableARMFastISel("arm-fast-isel",
47 cl::desc("Turn on experimental ARM fast-isel support"),
48 cl::init(false), cl::Hidden);
52 class ARMFastISel : public FastISel {
54 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
55 /// make the right decision when generating code for different targets.
56 const ARMSubtarget *Subtarget;
57 const TargetMachine &TM;
58 const TargetInstrInfo &TII;
59 const TargetLowering &TLI;
60 const ARMFunctionInfo *AFI;
62 // Convenience variable to avoid checking all the time.
66 explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
68 TM(funcInfo.MF->getTarget()),
69 TII(*TM.getInstrInfo()),
70 TLI(*TM.getTargetLowering()) {
71 Subtarget = &TM.getSubtarget<ARMSubtarget>();
72 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
73 isThumb = AFI->isThumbFunction();
76 // Code from FastISel.cpp.
77 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
78 const TargetRegisterClass *RC);
79 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
80 const TargetRegisterClass *RC,
81 unsigned Op0, bool Op0IsKill);
82 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
83 const TargetRegisterClass *RC,
84 unsigned Op0, bool Op0IsKill,
85 unsigned Op1, bool Op1IsKill);
86 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
87 const TargetRegisterClass *RC,
88 unsigned Op0, bool Op0IsKill,
90 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
91 const TargetRegisterClass *RC,
92 unsigned Op0, bool Op0IsKill,
93 const ConstantFP *FPImm);
94 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
95 const TargetRegisterClass *RC,
97 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
98 const TargetRegisterClass *RC,
99 unsigned Op0, bool Op0IsKill,
100 unsigned Op1, bool Op1IsKill,
102 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
103 unsigned Op0, bool Op0IsKill,
106 // Backend specific FastISel code.
107 virtual bool TargetSelectInstruction(const Instruction *I);
108 virtual unsigned TargetMaterializeConstant(const Constant *C);
110 #include "ARMGenFastISel.inc"
112 // Instruction selection routines.
113 virtual bool ARMSelectLoad(const Instruction *I);
114 virtual bool ARMSelectStore(const Instruction *I);
115 virtual bool ARMSelectBranch(const Instruction *I);
119 bool isTypeLegal(const Type *Ty, EVT &VT);
120 bool isLoadTypeLegal(const Type *Ty, EVT &VT);
121 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset);
122 bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset);
123 bool ARMLoadAlloca(const Instruction *I);
124 bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg);
125 bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
126 bool ARMMaterializeConstant(const ConstantInt *Val, unsigned &Reg);
128 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
129 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
132 } // end anonymous namespace
134 // #include "ARMGenCallingConv.inc"
136 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
137 // we don't care about implicit defs here, just places we'll need to add a
138 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
139 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
140 const TargetInstrDesc &TID = MI->getDesc();
141 if (!TID.hasOptionalDef())
144 // Look to see if our OptionalDef is defining CPSR or CCR.
145 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
146 const MachineOperand &MO = MI->getOperand(i);
147 if (!MO.isReg() || !MO.isDef()) continue;
148 if (MO.getReg() == ARM::CPSR)
154 // If the machine is predicable go ahead and add the predicate operands, if
155 // it needs default CC operands add those.
156 const MachineInstrBuilder &
157 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
158 MachineInstr *MI = &*MIB;
160 // Do we use a predicate?
161 if (TII.isPredicable(MI))
164 // Do we optionally set a predicate? Preds is size > 0 iff the predicate
165 // defines CPSR. All other OptionalDefines in ARM are the CCR register.
167 if (DefinesOptionalPredicate(MI, &CPSR)) {
176 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
177 const TargetRegisterClass* RC) {
178 unsigned ResultReg = createResultReg(RC);
179 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
181 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
185 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
186 const TargetRegisterClass *RC,
187 unsigned Op0, bool Op0IsKill) {
188 unsigned ResultReg = createResultReg(RC);
189 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
191 if (II.getNumDefs() >= 1)
192 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
193 .addReg(Op0, Op0IsKill * RegState::Kill));
195 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
196 .addReg(Op0, Op0IsKill * RegState::Kill));
197 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
198 TII.get(TargetOpcode::COPY), ResultReg)
199 .addReg(II.ImplicitDefs[0]));
204 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
205 const TargetRegisterClass *RC,
206 unsigned Op0, bool Op0IsKill,
207 unsigned Op1, bool Op1IsKill) {
208 unsigned ResultReg = createResultReg(RC);
209 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
211 if (II.getNumDefs() >= 1)
212 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
213 .addReg(Op0, Op0IsKill * RegState::Kill)
214 .addReg(Op1, Op1IsKill * RegState::Kill));
216 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
217 .addReg(Op0, Op0IsKill * RegState::Kill)
218 .addReg(Op1, Op1IsKill * RegState::Kill));
219 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
220 TII.get(TargetOpcode::COPY), ResultReg)
221 .addReg(II.ImplicitDefs[0]));
226 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
227 const TargetRegisterClass *RC,
228 unsigned Op0, bool Op0IsKill,
230 unsigned ResultReg = createResultReg(RC);
231 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
233 if (II.getNumDefs() >= 1)
234 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
235 .addReg(Op0, Op0IsKill * RegState::Kill)
238 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
239 .addReg(Op0, Op0IsKill * RegState::Kill)
241 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
242 TII.get(TargetOpcode::COPY), ResultReg)
243 .addReg(II.ImplicitDefs[0]));
248 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
249 const TargetRegisterClass *RC,
250 unsigned Op0, bool Op0IsKill,
251 const ConstantFP *FPImm) {
252 unsigned ResultReg = createResultReg(RC);
253 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
255 if (II.getNumDefs() >= 1)
256 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
257 .addReg(Op0, Op0IsKill * RegState::Kill)
260 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
261 .addReg(Op0, Op0IsKill * RegState::Kill)
263 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
264 TII.get(TargetOpcode::COPY), ResultReg)
265 .addReg(II.ImplicitDefs[0]));
270 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
271 const TargetRegisterClass *RC,
272 unsigned Op0, bool Op0IsKill,
273 unsigned Op1, bool Op1IsKill,
275 unsigned ResultReg = createResultReg(RC);
276 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
278 if (II.getNumDefs() >= 1)
279 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
280 .addReg(Op0, Op0IsKill * RegState::Kill)
281 .addReg(Op1, Op1IsKill * RegState::Kill)
284 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
285 .addReg(Op0, Op0IsKill * RegState::Kill)
286 .addReg(Op1, Op1IsKill * RegState::Kill)
288 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
289 TII.get(TargetOpcode::COPY), ResultReg)
290 .addReg(II.ImplicitDefs[0]));
295 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
296 const TargetRegisterClass *RC,
298 unsigned ResultReg = createResultReg(RC);
299 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
301 if (II.getNumDefs() >= 1)
302 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
308 TII.get(TargetOpcode::COPY), ResultReg)
309 .addReg(II.ImplicitDefs[0]));
314 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
315 unsigned Op0, bool Op0IsKill,
317 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
318 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
319 "Cannot yet extract from physregs");
320 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
321 DL, TII.get(TargetOpcode::COPY), ResultReg)
322 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
326 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
327 EVT VT = TLI.getValueType(C->getType(), true);
329 // Only handle simple types.
330 if (!VT.isSimple()) return 0;
332 // TODO: This should be safe for fp because they're just bits from the
334 // TODO: Theoretically we could materialize fp constants with instructions
337 // MachineConstantPool wants an explicit alignment.
338 unsigned Align = TD.getPrefTypeAlignment(C->getType());
340 // TODO: Figure out if this is correct.
341 Align = TD.getTypeAllocSize(C->getType());
343 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
345 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
346 // Different addressing modes between ARM/Thumb2 for constant pool loads.
348 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
349 TII.get(ARM::t2LDRpci))
350 .addReg(DestReg).addConstantPoolIndex(Idx));
352 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
354 .addReg(DestReg).addConstantPoolIndex(Idx)
355 .addReg(0).addImm(0));
360 bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
361 VT = TLI.getValueType(Ty, true);
363 // Only handle simple types.
364 if (VT == MVT::Other || !VT.isSimple()) return false;
366 // Handle all legal types, i.e. a register that will directly hold this
368 return TLI.isTypeLegal(VT);
371 bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
372 if (isTypeLegal(Ty, VT)) return true;
374 // If this is a type than can be sign or zero-extended to a basic operation
375 // go ahead and accept it now.
376 if (VT == MVT::i8 || VT == MVT::i16)
382 // Computes the Reg+Offset to get to an object.
383 bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
385 // Some boilerplate from the X86 FastISel.
386 const User *U = NULL;
387 unsigned Opcode = Instruction::UserOp1;
388 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
389 // Don't walk into other basic blocks; it's possible we haven't
390 // visited them yet, so the instructions may not yet be assigned
391 // virtual registers.
392 if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
395 Opcode = I->getOpcode();
397 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
398 Opcode = C->getOpcode();
402 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
403 if (Ty->getAddressSpace() > 255)
404 // Fast instruction selection doesn't support the special
410 //errs() << "Failing Opcode is: " << *Op1 << "\n";
412 case Instruction::Alloca: {
413 assert(false && "Alloca should have been handled earlier!");
418 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
419 //errs() << "Failing GV is: " << GV << "\n";
424 // Try to get this in a register if nothing else has worked.
425 Reg = getRegForValue(Obj);
426 if (Reg == 0) return false;
428 // Since the offset may be too large for the load instruction
429 // get the reg+offset into a register.
430 // TODO: Verify the additions work, otherwise we'll need to add the
431 // offset instead of 0 to the instructions and do all sorts of operand
433 // TODO: Optimize this somewhat.
435 ARMCC::CondCodes Pred = ARMCC::AL;
436 unsigned PredReg = 0;
439 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
440 Reg, Reg, Offset, Pred, PredReg,
441 static_cast<const ARMBaseInstrInfo&>(TII));
443 assert(AFI->isThumb2Function());
444 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
445 Reg, Reg, Offset, Pred, PredReg,
446 static_cast<const ARMBaseInstrInfo&>(TII));
453 bool ARMFastISel::ARMLoadAlloca(const Instruction *I) {
454 Value *Op0 = I->getOperand(0);
456 // Verify it's an alloca.
457 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op0)) {
458 DenseMap<const AllocaInst*, int>::iterator SI =
459 FuncInfo.StaticAllocaMap.find(AI);
461 if (SI != FuncInfo.StaticAllocaMap.end()) {
462 TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
463 unsigned ResultReg = createResultReg(RC);
464 TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
465 ResultReg, SI->second, RC,
466 TM.getRegisterInfo());
467 UpdateValueMap(I, ResultReg);
474 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
475 unsigned Reg, int Offset) {
477 assert(VT.isSimple() && "Non-simple types are invalid here!");
480 switch (VT.getSimpleVT().SimpleTy) {
482 assert(false && "Trying to emit for an unhandled type!");
485 Opc = isThumb ? ARM::tLDRH : ARM::LDRH;
489 Opc = isThumb ? ARM::tLDRB : ARM::LDRB;
493 Opc = isThumb ? ARM::tLDR : ARM::LDR;
497 ResultReg = createResultReg(TLI.getRegClassFor(VT));
499 // TODO: Fix the Addressing modes so that these can share some code.
500 // Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
502 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
503 TII.get(Opc), ResultReg)
504 .addReg(Reg).addImm(Offset).addReg(0));
506 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
507 TII.get(Opc), ResultReg)
508 .addReg(Reg).addReg(0).addImm(Offset));
513 bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg) {
514 Value *Op1 = I->getOperand(1);
516 // Verify it's an alloca.
517 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
518 DenseMap<const AllocaInst*, int>::iterator SI =
519 FuncInfo.StaticAllocaMap.find(AI);
521 if (SI != FuncInfo.StaticAllocaMap.end()) {
522 TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
523 assert(SrcReg != 0 && "Nothing to store!");
524 TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
525 SrcReg, true /*isKill*/, SI->second, RC,
526 TM.getRegisterInfo());
533 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
534 unsigned DstReg, int Offset) {
536 switch (VT.getSimpleVT().SimpleTy) {
537 default: return false;
539 case MVT::i8: StrOpc = isThumb ? ARM::tSTRB : ARM::STRB; break;
540 case MVT::i16: StrOpc = isThumb ? ARM::tSTRH : ARM::STRH; break;
541 case MVT::i32: StrOpc = isThumb ? ARM::tSTR : ARM::STR; break;
543 if (!Subtarget->hasVFP2()) return false;
547 if (!Subtarget->hasVFP2()) return false;
553 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
554 TII.get(StrOpc), SrcReg)
555 .addReg(DstReg).addImm(Offset).addReg(0));
557 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
558 TII.get(StrOpc), SrcReg)
559 .addReg(DstReg).addReg(0).addImm(Offset));
564 bool ARMFastISel::ARMSelectStore(const Instruction *I) {
565 Value *Op0 = I->getOperand(0);
568 // Yay type legalization
570 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
573 // Get the value to be stored into a register.
574 SrcReg = getRegForValue(Op0);
578 // If we're an alloca we know we have a frame index and can emit the store
580 if (ARMStoreAlloca(I, SrcReg))
583 // Our register and offset with innocuous defaults.
587 // See if we can handle this as Reg + Offset
588 if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
591 if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
597 bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
598 // If we're an alloca we know we have a frame index and can emit the load
599 // directly in short order.
600 if (ARMLoadAlloca(I))
603 // Verify we have a legal type before going any further.
605 if (!isLoadTypeLegal(I->getType(), VT))
608 // Our register and offset with innocuous defaults.
612 // See if we can handle this as Reg + Offset
613 if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
617 if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
619 UpdateValueMap(I, ResultReg);
623 bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
624 const BranchInst *BI = cast<BranchInst>(I);
625 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
626 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
628 // Simple branch support.
629 unsigned CondReg = getRegForValue(BI->getCondition());
630 if (CondReg == 0) return false;
632 unsigned CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
633 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
634 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
635 .addReg(CondReg).addReg(CondReg));
636 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
637 .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
638 FastEmitBranch(FBB, DL);
639 FuncInfo.MBB->addSuccessor(TBB);
643 // TODO: SoftFP support.
644 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
645 // No Thumb-1 for now.
646 if (isThumb && !AFI->isThumb2Function()) return false;
648 switch (I->getOpcode()) {
649 case Instruction::Load:
650 return ARMSelectLoad(I);
651 case Instruction::Store:
652 return ARMSelectStore(I);
653 case Instruction::Br:
654 return ARMSelectBranch(I);
661 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
662 if (EnableARMFastISel) return new ARMFastISel(funcInfo);