1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the ARM-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // ARMGenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMRegisterInfo.h"
20 #include "ARMTargetMachine.h"
21 #include "ARMSubtarget.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/GlobalVariable.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/IntrinsicInst.h"
27 #include "llvm/Module.h"
28 #include "llvm/CodeGen/Analysis.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/FunctionLoweringInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineConstantPool.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/Support/CallSite.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/GetElementPtrTypeIterator.h"
40 #include "llvm/Target/TargetData.h"
41 #include "llvm/Target/TargetInstrInfo.h"
42 #include "llvm/Target/TargetLowering.h"
43 #include "llvm/Target/TargetMachine.h"
44 #include "llvm/Target/TargetOptions.h"
48 EnableARMFastISel("arm-fast-isel",
49 cl::desc("Turn on experimental ARM fast-isel support"),
50 cl::init(false), cl::Hidden);
54 class ARMFastISel : public FastISel {
56 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
57 /// make the right decision when generating code for different targets.
58 const ARMSubtarget *Subtarget;
59 const TargetMachine &TM;
60 const TargetInstrInfo &TII;
61 const TargetLowering &TLI;
62 const ARMFunctionInfo *AFI;
64 // Convenience variable to avoid checking all the time.
68 explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
70 TM(funcInfo.MF->getTarget()),
71 TII(*TM.getInstrInfo()),
72 TLI(*TM.getTargetLowering()) {
73 Subtarget = &TM.getSubtarget<ARMSubtarget>();
74 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
75 isThumb = AFI->isThumbFunction();
78 // Code from FastISel.cpp.
79 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
80 const TargetRegisterClass *RC);
81 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
82 const TargetRegisterClass *RC,
83 unsigned Op0, bool Op0IsKill);
84 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
85 const TargetRegisterClass *RC,
86 unsigned Op0, bool Op0IsKill,
87 unsigned Op1, bool Op1IsKill);
88 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
89 const TargetRegisterClass *RC,
90 unsigned Op0, bool Op0IsKill,
92 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
93 const TargetRegisterClass *RC,
94 unsigned Op0, bool Op0IsKill,
95 const ConstantFP *FPImm);
96 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
97 const TargetRegisterClass *RC,
99 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
100 const TargetRegisterClass *RC,
101 unsigned Op0, bool Op0IsKill,
102 unsigned Op1, bool Op1IsKill,
104 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
105 unsigned Op0, bool Op0IsKill,
108 // Backend specific FastISel code.
109 virtual bool TargetSelectInstruction(const Instruction *I);
110 virtual unsigned TargetMaterializeConstant(const Constant *C);
112 #include "ARMGenFastISel.inc"
114 // Instruction selection routines.
116 virtual bool ARMSelectLoad(const Instruction *I);
117 virtual bool ARMSelectStore(const Instruction *I);
118 virtual bool ARMSelectBranch(const Instruction *I);
119 virtual bool ARMSelectCmp(const Instruction *I);
120 virtual bool ARMSelectFPExt(const Instruction *I);
121 virtual bool ARMSelectFPTrunc(const Instruction *I);
122 virtual bool ARMSelectBinaryOp(const Instruction *I, unsigned ISDOpcode);
123 virtual bool ARMSelectSIToFP(const Instruction *I);
124 virtual bool ARMSelectFPToSI(const Instruction *I);
125 virtual bool ARMSelectSDiv(const Instruction *I);
129 bool isTypeLegal(const Type *Ty, EVT &VT);
130 bool isLoadTypeLegal(const Type *Ty, EVT &VT);
131 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset);
132 bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset);
133 bool ARMLoadAlloca(const Instruction *I, EVT VT);
134 bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT);
135 bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
136 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
137 unsigned ARMMaterializeInt(const Constant *C);
138 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
139 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
141 // Call handling routines.
143 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return);
144 bool ARMEmitLibcall(const Instruction *I, Function *F);
146 // OptionalDef handling routines.
148 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
149 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
152 } // end anonymous namespace
154 #include "ARMGenCallingConv.inc"
156 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
157 // we don't care about implicit defs here, just places we'll need to add a
158 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
159 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
160 const TargetInstrDesc &TID = MI->getDesc();
161 if (!TID.hasOptionalDef())
164 // Look to see if our OptionalDef is defining CPSR or CCR.
165 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
166 const MachineOperand &MO = MI->getOperand(i);
167 if (!MO.isReg() || !MO.isDef()) continue;
168 if (MO.getReg() == ARM::CPSR)
174 // If the machine is predicable go ahead and add the predicate operands, if
175 // it needs default CC operands add those.
176 const MachineInstrBuilder &
177 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
178 MachineInstr *MI = &*MIB;
180 // Do we use a predicate?
181 if (TII.isPredicable(MI))
184 // Do we optionally set a predicate? Preds is size > 0 iff the predicate
185 // defines CPSR. All other OptionalDefines in ARM are the CCR register.
187 if (DefinesOptionalPredicate(MI, &CPSR)) {
196 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
197 const TargetRegisterClass* RC) {
198 unsigned ResultReg = createResultReg(RC);
199 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
201 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
205 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
206 const TargetRegisterClass *RC,
207 unsigned Op0, bool Op0IsKill) {
208 unsigned ResultReg = createResultReg(RC);
209 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
211 if (II.getNumDefs() >= 1)
212 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
213 .addReg(Op0, Op0IsKill * RegState::Kill));
215 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
216 .addReg(Op0, Op0IsKill * RegState::Kill));
217 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
218 TII.get(TargetOpcode::COPY), ResultReg)
219 .addReg(II.ImplicitDefs[0]));
224 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
225 const TargetRegisterClass *RC,
226 unsigned Op0, bool Op0IsKill,
227 unsigned Op1, bool Op1IsKill) {
228 unsigned ResultReg = createResultReg(RC);
229 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
231 if (II.getNumDefs() >= 1)
232 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
233 .addReg(Op0, Op0IsKill * RegState::Kill)
234 .addReg(Op1, Op1IsKill * RegState::Kill));
236 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
237 .addReg(Op0, Op0IsKill * RegState::Kill)
238 .addReg(Op1, Op1IsKill * RegState::Kill));
239 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
240 TII.get(TargetOpcode::COPY), ResultReg)
241 .addReg(II.ImplicitDefs[0]));
246 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
247 const TargetRegisterClass *RC,
248 unsigned Op0, bool Op0IsKill,
250 unsigned ResultReg = createResultReg(RC);
251 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
253 if (II.getNumDefs() >= 1)
254 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
255 .addReg(Op0, Op0IsKill * RegState::Kill)
258 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
259 .addReg(Op0, Op0IsKill * RegState::Kill)
261 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
262 TII.get(TargetOpcode::COPY), ResultReg)
263 .addReg(II.ImplicitDefs[0]));
268 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
269 const TargetRegisterClass *RC,
270 unsigned Op0, bool Op0IsKill,
271 const ConstantFP *FPImm) {
272 unsigned ResultReg = createResultReg(RC);
273 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
275 if (II.getNumDefs() >= 1)
276 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
277 .addReg(Op0, Op0IsKill * RegState::Kill)
280 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
281 .addReg(Op0, Op0IsKill * RegState::Kill)
283 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
284 TII.get(TargetOpcode::COPY), ResultReg)
285 .addReg(II.ImplicitDefs[0]));
290 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
291 const TargetRegisterClass *RC,
292 unsigned Op0, bool Op0IsKill,
293 unsigned Op1, bool Op1IsKill,
295 unsigned ResultReg = createResultReg(RC);
296 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
298 if (II.getNumDefs() >= 1)
299 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
300 .addReg(Op0, Op0IsKill * RegState::Kill)
301 .addReg(Op1, Op1IsKill * RegState::Kill)
304 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
305 .addReg(Op0, Op0IsKill * RegState::Kill)
306 .addReg(Op1, Op1IsKill * RegState::Kill)
308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
309 TII.get(TargetOpcode::COPY), ResultReg)
310 .addReg(II.ImplicitDefs[0]));
315 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
316 const TargetRegisterClass *RC,
318 unsigned ResultReg = createResultReg(RC);
319 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
321 if (II.getNumDefs() >= 1)
322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
325 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
328 TII.get(TargetOpcode::COPY), ResultReg)
329 .addReg(II.ImplicitDefs[0]));
334 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
335 unsigned Op0, bool Op0IsKill,
337 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
338 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
339 "Cannot yet extract from physregs");
340 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
341 DL, TII.get(TargetOpcode::COPY), ResultReg)
342 .addReg(Op0, getKillRegState(Op0IsKill), Idx));
346 // TODO: Don't worry about 64-bit now, but when this is fixed remove the
347 // checks from the various callers.
348 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
349 if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0;
351 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
352 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
353 TII.get(ARM::VMOVRS), MoveReg)
358 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
359 if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0;
361 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
362 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
363 TII.get(ARM::VMOVSR), MoveReg)
368 // For double width floating point we need to materialize two constants
369 // (the high and the low) into integer registers then use a move to get
370 // the combined constant into an FP reg.
371 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
372 const APFloat Val = CFP->getValueAPF();
373 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64;
375 // This checks to see if we can use VFP3 instructions to materialize
376 // a constant, otherwise we have to go through the constant pool.
377 if (TLI.isFPImmLegal(Val, VT)) {
378 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS;
379 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
380 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
386 // Require VFP2 for loading fp constants.
387 if (!Subtarget->hasVFP2()) return false;
389 // MachineConstantPool wants an explicit alignment.
390 unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
392 // TODO: Figure out if this is correct.
393 Align = TD.getTypeAllocSize(CFP->getType());
395 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
396 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
397 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
399 // The extra reg is for addrmode5.
400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
401 .addReg(DestReg).addConstantPoolIndex(Idx)
406 // TODO: Verify 64-bit.
407 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C) {
408 // MachineConstantPool wants an explicit alignment.
409 unsigned Align = TD.getPrefTypeAlignment(C->getType());
411 // TODO: Figure out if this is correct.
412 Align = TD.getTypeAllocSize(C->getType());
414 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
415 unsigned DestReg = createResultReg(TLI.getRegClassFor(MVT::i32));
418 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
419 TII.get(ARM::t2LDRpci))
420 .addReg(DestReg).addConstantPoolIndex(Idx));
422 // The extra reg and immediate are for addrmode2.
423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
425 .addReg(DestReg).addConstantPoolIndex(Idx)
426 .addReg(0).addImm(0));
431 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
432 EVT VT = TLI.getValueType(C->getType(), true);
434 // Only handle simple types.
435 if (!VT.isSimple()) return 0;
437 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
438 return ARMMaterializeFP(CFP, VT);
439 return ARMMaterializeInt(C);
442 bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
443 VT = TLI.getValueType(Ty, true);
445 // Only handle simple types.
446 if (VT == MVT::Other || !VT.isSimple()) return false;
448 // Handle all legal types, i.e. a register that will directly hold this
450 return TLI.isTypeLegal(VT);
453 bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
454 if (isTypeLegal(Ty, VT)) return true;
456 // If this is a type than can be sign or zero-extended to a basic operation
457 // go ahead and accept it now.
458 if (VT == MVT::i8 || VT == MVT::i16)
464 // Computes the Reg+Offset to get to an object.
465 bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
467 // Some boilerplate from the X86 FastISel.
468 const User *U = NULL;
469 unsigned Opcode = Instruction::UserOp1;
470 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
471 // Don't walk into other basic blocks; it's possible we haven't
472 // visited them yet, so the instructions may not yet be assigned
473 // virtual registers.
474 if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
476 Opcode = I->getOpcode();
478 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
479 Opcode = C->getOpcode();
483 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
484 if (Ty->getAddressSpace() > 255)
485 // Fast instruction selection doesn't support the special
492 case Instruction::Alloca: {
493 assert(false && "Alloca should have been handled earlier!");
498 // FIXME: Handle global variables.
499 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
504 // Try to get this in a register if nothing else has worked.
505 Reg = getRegForValue(Obj);
506 if (Reg == 0) return false;
508 // Since the offset may be too large for the load instruction
509 // get the reg+offset into a register.
510 // TODO: Verify the additions work, otherwise we'll need to add the
511 // offset instead of 0 to the instructions and do all sorts of operand
513 // TODO: Optimize this somewhat.
515 ARMCC::CondCodes Pred = ARMCC::AL;
516 unsigned PredReg = 0;
519 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
520 Reg, Reg, Offset, Pred, PredReg,
521 static_cast<const ARMBaseInstrInfo&>(TII));
523 assert(AFI->isThumb2Function());
524 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
525 Reg, Reg, Offset, Pred, PredReg,
526 static_cast<const ARMBaseInstrInfo&>(TII));
532 bool ARMFastISel::ARMLoadAlloca(const Instruction *I, EVT VT) {
533 Value *Op0 = I->getOperand(0);
535 // Verify it's an alloca.
536 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op0)) {
537 DenseMap<const AllocaInst*, int>::iterator SI =
538 FuncInfo.StaticAllocaMap.find(AI);
540 if (SI != FuncInfo.StaticAllocaMap.end()) {
541 TargetRegisterClass* RC = TLI.getRegClassFor(VT);
542 unsigned ResultReg = createResultReg(RC);
543 TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
544 ResultReg, SI->second, RC,
545 TM.getRegisterInfo());
546 UpdateValueMap(I, ResultReg);
553 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
554 unsigned Reg, int Offset) {
556 assert(VT.isSimple() && "Non-simple types are invalid here!");
559 switch (VT.getSimpleVT().SimpleTy) {
561 assert(false && "Trying to emit for an unhandled type!");
564 Opc = isThumb ? ARM::tLDRH : ARM::LDRH;
568 Opc = isThumb ? ARM::tLDRB : ARM::LDRB;
572 Opc = isThumb ? ARM::tLDR : ARM::LDR;
576 ResultReg = createResultReg(TLI.getRegClassFor(VT));
578 // TODO: Fix the Addressing modes so that these can share some code.
579 // Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
581 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
582 TII.get(Opc), ResultReg)
583 .addReg(Reg).addImm(Offset).addReg(0));
585 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
586 TII.get(Opc), ResultReg)
587 .addReg(Reg).addReg(0).addImm(Offset));
591 bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
592 // Verify we have a legal type before going any further.
594 if (!isLoadTypeLegal(I->getType(), VT))
597 // If we're an alloca we know we have a frame index and can emit the load
598 // directly in short order.
599 if (ARMLoadAlloca(I, VT))
602 // Our register and offset with innocuous defaults.
606 // See if we can handle this as Reg + Offset
607 if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
611 if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
613 UpdateValueMap(I, ResultReg);
617 bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT){
618 Value *Op1 = I->getOperand(1);
620 // Verify it's an alloca.
621 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
622 DenseMap<const AllocaInst*, int>::iterator SI =
623 FuncInfo.StaticAllocaMap.find(AI);
625 if (SI != FuncInfo.StaticAllocaMap.end()) {
626 TargetRegisterClass* RC = TLI.getRegClassFor(VT);
627 assert(SrcReg != 0 && "Nothing to store!");
628 TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
629 SrcReg, true /*isKill*/, SI->second, RC,
630 TM.getRegisterInfo());
637 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
638 unsigned DstReg, int Offset) {
640 switch (VT.getSimpleVT().SimpleTy) {
641 default: return false;
643 case MVT::i8: StrOpc = isThumb ? ARM::tSTRB : ARM::STRB; break;
644 case MVT::i16: StrOpc = isThumb ? ARM::tSTRH : ARM::STRH; break;
645 case MVT::i32: StrOpc = isThumb ? ARM::tSTR : ARM::STR; break;
647 if (!Subtarget->hasVFP2()) return false;
651 if (!Subtarget->hasVFP2()) return false;
657 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
658 TII.get(StrOpc), SrcReg)
659 .addReg(DstReg).addImm(Offset).addReg(0));
661 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
662 TII.get(StrOpc), SrcReg)
663 .addReg(DstReg).addReg(0).addImm(Offset));
668 bool ARMFastISel::ARMSelectStore(const Instruction *I) {
669 Value *Op0 = I->getOperand(0);
672 // Yay type legalization
674 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
677 // Get the value to be stored into a register.
678 SrcReg = getRegForValue(Op0);
682 // If we're an alloca we know we have a frame index and can emit the store
684 if (ARMStoreAlloca(I, SrcReg, VT))
687 // Our register and offset with innocuous defaults.
691 // See if we can handle this as Reg + Offset
692 if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
695 if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
700 bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
701 const BranchInst *BI = cast<BranchInst>(I);
702 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
703 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
705 // Simple branch support.
706 unsigned CondReg = getRegForValue(BI->getCondition());
707 if (CondReg == 0) return false;
709 unsigned CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
710 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
711 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
712 .addReg(CondReg).addReg(CondReg));
713 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
714 .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
715 FastEmitBranch(FBB, DL);
716 FuncInfo.MBB->addSuccessor(TBB);
720 bool ARMFastISel::ARMSelectCmp(const Instruction *I) {
721 const CmpInst *CI = cast<CmpInst>(I);
724 const Type *Ty = CI->getOperand(0)->getType();
725 if (!isTypeLegal(Ty, VT))
728 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
729 if (isFloat && !Subtarget->hasVFP2())
733 switch (VT.getSimpleVT().SimpleTy) {
734 default: return false;
735 // TODO: Verify compares.
737 CmpOpc = ARM::VCMPES;
740 CmpOpc = ARM::VCMPED;
743 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
747 unsigned Arg1 = getRegForValue(CI->getOperand(0));
748 if (Arg1 == 0) return false;
750 unsigned Arg2 = getRegForValue(CI->getOperand(1));
751 if (Arg2 == 0) return false;
753 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
754 .addReg(Arg1).addReg(Arg2));
756 // For floating point we need to move the result to a comparison register
757 // that we can then use for branches.
759 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
760 TII.get(ARM::FMSTAT)));
762 // TODO: How to update the value map when there's no result reg?
766 bool ARMFastISel::ARMSelectFPExt(const Instruction *I) {
767 // Make sure we have VFP and that we're extending float to double.
768 if (!Subtarget->hasVFP2()) return false;
770 Value *V = I->getOperand(0);
771 if (!I->getType()->isDoubleTy() ||
772 !V->getType()->isFloatTy()) return false;
774 unsigned Op = getRegForValue(V);
775 if (Op == 0) return false;
777 unsigned Result = createResultReg(ARM::DPRRegisterClass);
778 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
779 TII.get(ARM::VCVTDS), Result)
781 UpdateValueMap(I, Result);
785 bool ARMFastISel::ARMSelectFPTrunc(const Instruction *I) {
786 // Make sure we have VFP and that we're truncating double to float.
787 if (!Subtarget->hasVFP2()) return false;
789 Value *V = I->getOperand(0);
790 if (!I->getType()->isFloatTy() ||
791 !V->getType()->isDoubleTy()) return false;
793 unsigned Op = getRegForValue(V);
794 if (Op == 0) return false;
796 unsigned Result = createResultReg(ARM::SPRRegisterClass);
797 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
798 TII.get(ARM::VCVTSD), Result)
800 UpdateValueMap(I, Result);
804 bool ARMFastISel::ARMSelectSIToFP(const Instruction *I) {
805 // Make sure we have VFP.
806 if (!Subtarget->hasVFP2()) return false;
809 const Type *Ty = I->getType();
810 if (!isTypeLegal(Ty, DstVT))
813 unsigned Op = getRegForValue(I->getOperand(0));
814 if (Op == 0) return false;
816 // The conversion routine works on fp-reg to fp-reg and the operand above
817 // was an integer, move it to the fp registers if possible.
818 unsigned FP = ARMMoveToFPReg(DstVT, Op);
819 if (FP == 0) return false;
822 if (Ty->isFloatTy()) Opc = ARM::VSITOS;
823 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD;
826 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
827 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
830 UpdateValueMap(I, ResultReg);
834 bool ARMFastISel::ARMSelectFPToSI(const Instruction *I) {
835 // Make sure we have VFP.
836 if (!Subtarget->hasVFP2()) return false;
839 const Type *RetTy = I->getType();
840 if (!isTypeLegal(RetTy, DstVT))
843 unsigned Op = getRegForValue(I->getOperand(0));
844 if (Op == 0) return false;
847 const Type *OpTy = I->getOperand(0)->getType();
848 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS;
849 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD;
851 EVT OpVT = TLI.getValueType(OpTy, true);
853 unsigned ResultReg = createResultReg(TLI.getRegClassFor(OpVT));
854 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
858 // This result needs to be in an integer register, but the conversion only
859 // takes place in fp-regs.
860 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
861 if (IntReg == 0) return false;
863 UpdateValueMap(I, IntReg);
867 bool ARMFastISel::ARMSelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
868 EVT VT = TLI.getValueType(I->getType(), true);
870 // We can get here in the case when we want to use NEON for our fp
871 // operations, but can't figure out how to. Just use the vfp instructions
873 // FIXME: It'd be nice to use NEON instructions.
874 const Type *Ty = I->getType();
875 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
876 if (isFloat && !Subtarget->hasVFP2())
879 unsigned Op1 = getRegForValue(I->getOperand(0));
880 if (Op1 == 0) return false;
882 unsigned Op2 = getRegForValue(I->getOperand(1));
883 if (Op2 == 0) return false;
886 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 ||
887 VT.getSimpleVT().SimpleTy == MVT::i64;
889 default: return false;
891 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
894 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
897 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
900 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
901 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
902 TII.get(Opc), ResultReg)
903 .addReg(Op1).addReg(Op2));
904 UpdateValueMap(I, ResultReg);
908 // Call Handling Code
910 // This is largely taken directly from CCAssignFnForNode - we don't support
911 // varargs in FastISel so that part has been removed.
912 // TODO: We may not support all of this.
913 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) {
916 llvm_unreachable("Unsupported calling convention");
918 case CallingConv::Fast:
919 // Use target triple & subtarget features to do actual dispatch.
920 if (Subtarget->isAAPCS_ABI()) {
921 if (Subtarget->hasVFP2() &&
922 FloatABIType == FloatABI::Hard)
923 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
925 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
927 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
928 case CallingConv::ARM_AAPCS_VFP:
929 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
930 case CallingConv::ARM_AAPCS:
931 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
932 case CallingConv::ARM_APCS:
933 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
937 // A quick function that will emit a call for a named libcall in F with the
938 // vector of passed arguments for the Instruction in I. We can assume that we
939 // can emit a call for any libcall we can produce. This is an abridged version
940 // of the full call infrastructure since we won't need to worry about things
941 // like computed function pointers or strange arguments at call sites.
942 // TODO: Try to unify this and the normal call bits for ARM, then try to unify
944 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, Function *F) {
945 CallingConv::ID CC = F->getCallingConv();
947 // Handle *simple* calls for now.
948 const Type *RetTy = F->getReturnType();
950 if (RetTy->isVoidTy())
952 else if (!isTypeLegal(RetTy, RetVT))
955 assert(!F->isVarArg() && "Vararg libcall?!");
957 // Abridged from the X86 FastISel call selection mechanism
958 SmallVector<Value*, 8> Args;
959 SmallVector<unsigned, 8> ArgRegs;
960 SmallVector<EVT, 8> ArgVTs;
961 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
962 Args.reserve(I->getNumOperands());
963 ArgRegs.reserve(I->getNumOperands());
964 ArgVTs.reserve(I->getNumOperands());
965 ArgFlags.reserve(I->getNumOperands());
966 for (unsigned i = 0; i < Args.size(); ++i) {
967 Value *Op = I->getOperand(i);
968 unsigned Arg = getRegForValue(Op);
969 if (Arg == 0) return false;
971 const Type *ArgTy = Op->getType();
973 if (!isTypeLegal(ArgTy, ArgVT)) return false;
975 ISD::ArgFlagsTy Flags;
976 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
977 Flags.setOrigAlign(OriginalAlignment);
980 ArgRegs.push_back(Arg);
981 ArgVTs.push_back(ArgVT);
982 ArgFlags.push_back(Flags);
985 SmallVector<CCValAssign, 16> ArgLocs;
986 CCState CCInfo(CC, false, TM, ArgLocs, F->getContext());
987 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
990 SmallVector<unsigned, 4> RegArgs;
991 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
992 CCValAssign &VA = ArgLocs[i];
993 unsigned Arg = ArgRegs[VA.getValNo()];
994 EVT ArgVT = ArgVTs[VA.getValNo()];
996 // Should we ever have to promote?
997 switch (VA.getLocInfo()) {
998 case CCValAssign::Full: break;
1000 assert(false && "Handle arg promotion for libcalls?");
1004 // Now copy/store arg to correct locations.
1005 if (VA.isRegLoc()) {
1006 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1007 VA.getLocReg()).addReg(Arg);
1008 RegArgs.push_back(VA.getLocReg());
1015 // Issue the call, BLr9 for darwin, BL otherwise.
1016 MachineInstrBuilder MIB;
1017 unsigned CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL;
1018 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
1019 .addGlobalAddress(F, 0, 0);
1021 // Add implicit physical register uses to the call.
1022 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
1023 MIB.addReg(RegArgs[i]);
1025 // Now the return value.
1026 SmallVector<unsigned, 4> UsedRegs;
1027 if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
1028 SmallVector<CCValAssign, 16> RVLocs;
1029 CCState CCInfo(CC, false, TM, RVLocs, F->getContext());
1030 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
1032 // Copy all of the result registers out of their specified physreg.
1033 assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
1034 EVT CopyVT = RVLocs[0].getValVT();
1035 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
1037 unsigned ResultReg = createResultReg(DstRC);
1038 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1039 ResultReg).addReg(RVLocs[0].getLocReg());
1040 UsedRegs.push_back(RVLocs[0].getLocReg());
1042 // Finally update the result.
1043 UpdateValueMap(I, ResultReg);
1046 // Set all unused physreg defs as dead.
1047 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
1052 bool ARMFastISel::ARMSelectSDiv(const Instruction *I) {
1054 const Type *Ty = I->getType();
1055 if (!isTypeLegal(Ty, VT))
1058 // If we have integer div support we should have gotten already, emit a
1060 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1062 LC = RTLIB::SDIV_I16;
1063 else if (VT == MVT::i32)
1064 LC = RTLIB::SDIV_I32;
1065 else if (VT == MVT::i64)
1066 LC = RTLIB::SDIV_I64;
1067 else if (VT == MVT::i128)
1068 LC = RTLIB::SDIV_I128;
1069 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1071 // Binary operand with all the same type.
1072 std::vector<const Type*> ArgTys;
1073 ArgTys.push_back(Ty);
1074 ArgTys.push_back(Ty);
1075 const FunctionType *FTy = FunctionType::get(Ty, ArgTys, false);
1076 Function *F = Function::Create(FTy, GlobalValue::ExternalLinkage,
1077 TLI.getLibcallName(LC));
1078 if (Subtarget->isAAPCS_ABI())
1079 F->setCallingConv(CallingConv::ARM_AAPCS);
1081 F->setCallingConv(I->getParent()->getParent()->getCallingConv());
1083 return ARMEmitLibcall(I, F);
1086 // TODO: SoftFP support.
1087 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
1088 // No Thumb-1 for now.
1089 if (isThumb && !AFI->isThumb2Function()) return false;
1091 switch (I->getOpcode()) {
1092 case Instruction::Load:
1093 return ARMSelectLoad(I);
1094 case Instruction::Store:
1095 return ARMSelectStore(I);
1096 case Instruction::Br:
1097 return ARMSelectBranch(I);
1098 case Instruction::ICmp:
1099 case Instruction::FCmp:
1100 return ARMSelectCmp(I);
1101 case Instruction::FPExt:
1102 return ARMSelectFPExt(I);
1103 case Instruction::FPTrunc:
1104 return ARMSelectFPTrunc(I);
1105 case Instruction::SIToFP:
1106 return ARMSelectSIToFP(I);
1107 case Instruction::FPToSI:
1108 return ARMSelectFPToSI(I);
1109 case Instruction::FAdd:
1110 return ARMSelectBinaryOp(I, ISD::FADD);
1111 case Instruction::FSub:
1112 return ARMSelectBinaryOp(I, ISD::FSUB);
1113 case Instruction::FMul:
1114 return ARMSelectBinaryOp(I, ISD::FMUL);
1115 case Instruction::SDiv:
1116 return ARMSelectSDiv(I);
1123 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
1124 if (EnableARMFastISel) return new ARMFastISel(funcInfo);