1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMBaseRegisterInfo.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMFrameLowering.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/RegisterScavenging.h"
29 #include "llvm/CodeGen/VirtRegMap.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Target/TargetFrameLowering.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetOptions.h"
41 #define DEBUG_TYPE "arm-register-info"
43 #define GET_REGINFO_TARGET_DESC
44 #include "ARMGenRegisterInfo.inc"
48 ARMBaseRegisterInfo::ARMBaseRegisterInfo()
49 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), BasePtr(ARM::R6) {}
51 static unsigned getFramePointerReg(const ARMSubtarget &STI) {
52 if (STI.isTargetMachO()) {
53 if (STI.isTargetDarwin() || STI.isThumb1Only())
57 } else if (STI.isTargetWindows())
60 return STI.isThumb() ? ARM::R7 : ARM::R11;
64 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
65 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
66 const MCPhysReg *RegList =
67 STI.isTargetDarwin() ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
69 const Function *F = MF->getFunction();
70 if (F->getCallingConv() == CallingConv::GHC) {
71 // GHC set of callee saved regs is empty as all those regs are
72 // used for passing STG regs around
73 return CSR_NoRegs_SaveList;
74 } else if (F->hasFnAttribute("interrupt")) {
76 // M-class CPUs have hardware which saves the registers needed to allow a
77 // function conforming to the AAPCS to function as a handler.
78 return CSR_AAPCS_SaveList;
79 } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") {
80 // Fast interrupt mode gives the handler a private copy of R8-R14, so less
81 // need to be saved to restore user-mode state.
82 return CSR_FIQ_SaveList;
84 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
85 // exception handling.
86 return CSR_GenericInt_SaveList;
94 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
95 CallingConv::ID CC) const {
96 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
97 if (CC == CallingConv::GHC)
98 // This is academic becase all GHC calls are (supposed to be) tail calls
99 return CSR_NoRegs_RegMask;
100 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
104 ARMBaseRegisterInfo::getNoPreservedMask() const {
105 return CSR_NoRegs_RegMask;
109 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
110 CallingConv::ID CC) const {
111 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
112 // This should return a register mask that is the same as that returned by
113 // getCallPreservedMask but that additionally preserves the register used for
114 // the first i32 argument (which must also be the register used to return a
115 // single i32 return value)
117 // In case that the calling convention does not use the same register for
118 // both or otherwise does not want to enable this optimization, the function
119 // should return NULL
120 if (CC == CallingConv::GHC)
121 // This is academic becase all GHC calls are (supposed to be) tail calls
123 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
124 : CSR_AAPCS_ThisReturn_RegMask;
127 BitVector ARMBaseRegisterInfo::
128 getReservedRegs(const MachineFunction &MF) const {
129 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
130 const TargetFrameLowering *TFI = STI.getFrameLowering();
132 // FIXME: avoid re-calculating this every time.
133 BitVector Reserved(getNumRegs());
134 Reserved.set(ARM::SP);
135 Reserved.set(ARM::PC);
136 Reserved.set(ARM::FPSCR);
137 Reserved.set(ARM::APSR_NZCV);
139 Reserved.set(getFramePointerReg(STI));
140 if (hasBasePointer(MF))
141 Reserved.set(BasePtr);
142 // Some targets reserve R9.
143 if (STI.isR9Reserved())
144 Reserved.set(ARM::R9);
145 // Reserve D16-D31 if the subtarget doesn't support them.
146 if (!STI.hasVFP3() || STI.hasD16()) {
147 assert(ARM::D31 == ARM::D16 + 15);
148 for (unsigned i = 0; i != 16; ++i)
149 Reserved.set(ARM::D16 + i);
151 const TargetRegisterClass *RC = &ARM::GPRPairRegClass;
152 for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I)
153 for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI)
154 if (Reserved.test(*SI)) Reserved.set(*I);
159 const TargetRegisterClass *
160 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
161 const MachineFunction &) const {
162 const TargetRegisterClass *Super = RC;
163 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
165 switch (Super->getID()) {
166 case ARM::GPRRegClassID:
167 case ARM::SPRRegClassID:
168 case ARM::DPRRegClassID:
169 case ARM::QPRRegClassID:
170 case ARM::QQPRRegClassID:
171 case ARM::QQQQPRRegClassID:
172 case ARM::GPRPairRegClassID:
180 const TargetRegisterClass *
181 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
183 return &ARM::GPRRegClass;
186 const TargetRegisterClass *
187 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
188 if (RC == &ARM::CCRRegClass)
189 return &ARM::rGPRRegClass; // Can't copy CCR registers.
194 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
195 MachineFunction &MF) const {
196 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
197 const TargetFrameLowering *TFI = STI.getFrameLowering();
199 switch (RC->getID()) {
202 case ARM::tGPRRegClassID:
203 return TFI->hasFP(MF) ? 4 : 5;
204 case ARM::GPRRegClassID: {
205 unsigned FP = TFI->hasFP(MF) ? 1 : 0;
206 return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
208 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
209 case ARM::DPRRegClassID:
214 // Get the other register in a GPRPair.
215 static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) {
216 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers)
217 if (ARM::GPRPairRegClass.contains(*Supers))
218 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0);
222 // Resolve the RegPairEven / RegPairOdd register allocator hints.
224 ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg,
225 ArrayRef<MCPhysReg> Order,
226 SmallVectorImpl<MCPhysReg> &Hints,
227 const MachineFunction &MF,
228 const VirtRegMap *VRM) const {
229 const MachineRegisterInfo &MRI = MF.getRegInfo();
230 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg);
233 switch (Hint.first) {
234 case ARMRI::RegPairEven:
237 case ARMRI::RegPairOdd:
241 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
245 // This register should preferably be even (Odd == 0) or odd (Odd == 1).
246 // Check if the other part of the pair has already been assigned, and provide
247 // the paired register as the first hint.
248 unsigned PairedPhys = 0;
249 if (VRM && VRM->hasPhys(Hint.second)) {
250 PairedPhys = getPairedGPR(VRM->getPhys(Hint.second), Odd, this);
251 if (PairedPhys && MRI.isReserved(PairedPhys))
255 // First prefer the paired physreg.
257 std::find(Order.begin(), Order.end(), PairedPhys) != Order.end())
258 Hints.push_back(PairedPhys);
260 // Then prefer even or odd registers.
261 for (unsigned I = 0, E = Order.size(); I != E; ++I) {
262 unsigned Reg = Order[I];
263 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
265 // Don't provide hints that are paired to a reserved register.
266 unsigned Paired = getPairedGPR(Reg, !Odd, this);
267 if (!Paired || MRI.isReserved(Paired))
269 Hints.push_back(Reg);
274 ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg,
275 MachineFunction &MF) const {
276 MachineRegisterInfo *MRI = &MF.getRegInfo();
277 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
278 if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
279 Hint.first == (unsigned)ARMRI::RegPairEven) &&
280 TargetRegisterInfo::isVirtualRegister(Hint.second)) {
281 // If 'Reg' is one of the even / odd register pair and it's now changed
282 // (e.g. coalesced) into a different register. The other register of the
283 // pair allocation hint must be updated to reflect the relationship
285 unsigned OtherReg = Hint.second;
286 Hint = MRI->getRegAllocationHint(OtherReg);
287 if (Hint.second == Reg)
288 // Make sure the pair has not already divorced.
289 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
293 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
294 const MachineFrameInfo *MFI = MF.getFrameInfo();
295 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
296 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
298 // When outgoing call frames are so large that we adjust the stack pointer
299 // around the call, we can no longer use the stack pointer to reach the
300 // emergency spill slot.
301 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
304 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
305 // negative range for ldr/str (255), and thumb1 is positive offsets only.
306 // It's going to be better to use the SP or Base Pointer instead. When there
307 // are variable sized objects, we can't reference off of the SP, so we
308 // reserve a Base Pointer.
309 if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
310 // Conservatively estimate whether the negative offset from the frame
311 // pointer will be sufficient to reach. If a function has a smallish
312 // frame, it's less likely to have lots of spills and callee saved
313 // space, so it's all more likely to be within range of the frame pointer.
314 // If it's wrong, the scavenger will still enable access to work, it just
316 if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
324 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
325 const MachineRegisterInfo *MRI = &MF.getRegInfo();
326 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
327 // We can't realign the stack if:
328 // 1. Dynamic stack realignment is explicitly disabled,
329 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
330 // 3. There are VLAs in the function and the base pointer is disabled.
331 if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
333 if (AFI->isThumb1OnlyFunction())
335 // Stack realignment requires a frame pointer. If we already started
336 // register allocation with frame pointer elimination, it is too late now.
337 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>())))
339 // We may also need a base pointer if there are dynamic allocas or stack
340 // pointer adjustments around calls.
341 if (MF.getSubtarget().getFrameLowering()->hasReservedCallFrame(MF))
343 // A base pointer is required and allowed. Check that it isn't too late to
345 return MRI->canReserveReg(BasePtr);
348 bool ARMBaseRegisterInfo::
349 needsStackRealignment(const MachineFunction &MF) const {
350 const MachineFrameInfo *MFI = MF.getFrameInfo();
351 const Function *F = MF.getFunction();
352 unsigned StackAlign =
353 MF.getSubtarget().getFrameLowering()->getStackAlignment();
354 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
355 F->hasFnAttribute(Attribute::StackAlignment));
357 return requiresRealignment && canRealignStack(MF);
360 bool ARMBaseRegisterInfo::
361 cannotEliminateFrame(const MachineFunction &MF) const {
362 const MachineFrameInfo *MFI = MF.getFrameInfo();
363 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack())
365 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
366 || needsStackRealignment(MF);
370 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
371 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
372 const TargetFrameLowering *TFI = STI.getFrameLowering();
375 return getFramePointerReg(STI);
379 /// emitLoadConstPool - Emits a load from constpool to materialize the
380 /// specified immediate.
381 void ARMBaseRegisterInfo::
382 emitLoadConstPool(MachineBasicBlock &MBB,
383 MachineBasicBlock::iterator &MBBI,
385 unsigned DestReg, unsigned SubIdx, int Val,
386 ARMCC::CondCodes Pred,
387 unsigned PredReg, unsigned MIFlags) const {
388 MachineFunction &MF = *MBB.getParent();
389 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
390 MachineConstantPool *ConstantPool = MF.getConstantPool();
392 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
393 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
395 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
396 .addReg(DestReg, getDefRegState(true), SubIdx)
397 .addConstantPoolIndex(Idx)
398 .addImm(0).addImm(Pred).addReg(PredReg)
399 .setMIFlags(MIFlags);
402 bool ARMBaseRegisterInfo::
403 requiresRegisterScavenging(const MachineFunction &MF) const {
407 bool ARMBaseRegisterInfo::
408 trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
412 bool ARMBaseRegisterInfo::
413 requiresFrameIndexScavenging(const MachineFunction &MF) const {
417 bool ARMBaseRegisterInfo::
418 requiresVirtualBaseRegisters(const MachineFunction &MF) const {
422 int64_t ARMBaseRegisterInfo::
423 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
424 const MCInstrDesc &Desc = MI->getDesc();
425 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
426 int64_t InstrOffs = 0;
430 case ARMII::AddrModeT2_i8:
431 case ARMII::AddrModeT2_i12:
432 case ARMII::AddrMode_i12:
433 InstrOffs = MI->getOperand(Idx+1).getImm();
436 case ARMII::AddrMode5: {
438 const MachineOperand &OffOp = MI->getOperand(Idx+1);
439 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
440 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
441 InstrOffs = -InstrOffs;
445 case ARMII::AddrMode2: {
447 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
448 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
449 InstrOffs = -InstrOffs;
452 case ARMII::AddrMode3: {
454 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
455 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
456 InstrOffs = -InstrOffs;
459 case ARMII::AddrModeT1_s: {
461 InstrOffs = MI->getOperand(ImmIdx).getImm();
466 llvm_unreachable("Unsupported addressing mode!");
469 return InstrOffs * Scale;
472 /// needsFrameBaseReg - Returns true if the instruction's frame index
473 /// reference would be better served by a base register other than FP
474 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
475 /// references it should create new base registers for.
476 bool ARMBaseRegisterInfo::
477 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
478 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
479 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
482 // It's the load/store FI references that cause issues, as it can be difficult
483 // to materialize the offset if it won't fit in the literal field. Estimate
484 // based on the size of the local frame and some conservative assumptions
485 // about the rest of the stack frame (note, this is pre-regalloc, so
486 // we don't know everything for certain yet) whether this offset is likely
487 // to be out of range of the immediate. Return true if so.
489 // We only generate virtual base registers for loads and stores, so
490 // return false for everything else.
491 unsigned Opc = MI->getOpcode();
493 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
494 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
495 case ARM::t2LDRi12: case ARM::t2LDRi8:
496 case ARM::t2STRi12: case ARM::t2STRi8:
497 case ARM::VLDRS: case ARM::VLDRD:
498 case ARM::VSTRS: case ARM::VSTRD:
499 case ARM::tSTRspi: case ARM::tLDRspi:
505 // Without a virtual base register, if the function has variable sized
506 // objects, all fixed-size local references will be via the frame pointer,
507 // Approximate the offset and see if it's legal for the instruction.
508 // Note that the incoming offset is based on the SP value at function entry,
509 // so it'll be negative.
510 MachineFunction &MF = *MI->getParent()->getParent();
511 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
512 MachineFrameInfo *MFI = MF.getFrameInfo();
513 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
515 // Estimate an offset from the frame pointer.
516 // Conservatively assume all callee-saved registers get pushed. R4-R6
517 // will be earlier than the FP, so we ignore those.
519 int64_t FPOffset = Offset - 8;
520 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
521 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
523 // Estimate an offset from the stack pointer.
524 // The incoming offset is relating to the SP at the start of the function,
525 // but when we access the local it'll be relative to the SP after local
526 // allocation, so adjust our SP-relative offset by that allocation size.
528 Offset += MFI->getLocalFrameSize();
529 // Assume that we'll have at least some spill slots allocated.
530 // FIXME: This is a total SWAG number. We should run some statistics
531 // and pick a real one.
532 Offset += 128; // 128 bytes of spill slots
534 // If there's a frame pointer and the addressing mode allows it, try using it.
535 // The FP is only available if there is no dynamic realignment. We
536 // don't know for sure yet whether we'll need that, so we guess based
537 // on whether there are any local variables that would trigger it.
538 unsigned StackAlign = TFI->getStackAlignment();
539 if (TFI->hasFP(MF) &&
540 (MI->getDesc().TSFlags & ARMII::AddrModeMask) != ARMII::AddrModeT1_s &&
541 !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
542 if (isFrameOffsetLegal(MI, FPOffset))
545 // If we can reference via the stack pointer, try that.
546 // FIXME: This (and the code that resolves the references) can be improved
547 // to only disallow SP relative references in the live range of
548 // the VLA(s). In practice, it's unclear how much difference that
549 // would make, but it may be worth doing.
550 if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
553 // The offset likely isn't legal, we want to allocate a virtual base register.
557 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
558 /// be a pointer to FrameIdx at the beginning of the basic block.
559 void ARMBaseRegisterInfo::
560 materializeFrameBaseRegister(MachineBasicBlock *MBB,
561 unsigned BaseReg, int FrameIdx,
562 int64_t Offset) const {
563 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
564 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
565 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri);
567 MachineBasicBlock::iterator Ins = MBB->begin();
568 DebugLoc DL; // Defaults to "unknown"
569 if (Ins != MBB->end())
570 DL = Ins->getDebugLoc();
572 const MachineFunction &MF = *MBB->getParent();
573 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
574 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
575 const MCInstrDesc &MCID = TII.get(ADDriOpc);
576 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
578 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
579 .addFrameIndex(FrameIdx).addImm(Offset);
581 if (!AFI->isThumb1OnlyFunction())
582 AddDefaultCC(AddDefaultPred(MIB));
585 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
586 int64_t Offset) const {
587 MachineBasicBlock &MBB = *MI.getParent();
588 MachineFunction &MF = *MBB.getParent();
589 const ARMBaseInstrInfo &TII =
590 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
591 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
592 int Off = Offset; // ARM doesn't need the general 64-bit offsets
595 assert(!AFI->isThumb1OnlyFunction() &&
596 "This resolveFrameIndex does not support Thumb1!");
598 while (!MI.getOperand(i).isFI()) {
600 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
603 if (!AFI->isThumbFunction())
604 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
606 assert(AFI->isThumb2Function());
607 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
609 assert (Done && "Unable to resolve frame index!");
613 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
614 int64_t Offset) const {
615 const MCInstrDesc &Desc = MI->getDesc();
616 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
619 while (!MI->getOperand(i).isFI()) {
621 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
624 // AddrMode4 and AddrMode6 cannot handle any offset.
625 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
628 unsigned NumBits = 0;
630 bool isSigned = true;
632 case ARMII::AddrModeT2_i8:
633 case ARMII::AddrModeT2_i12:
634 // i8 supports only negative, and i12 supports only positive, so
635 // based on Offset sign, consider the appropriate instruction
644 case ARMII::AddrMode5:
649 case ARMII::AddrMode_i12:
650 case ARMII::AddrMode2:
653 case ARMII::AddrMode3:
656 case ARMII::AddrModeT1_s:
662 llvm_unreachable("Unsupported addressing mode!");
665 Offset += getFrameIndexInstrOffset(MI, i);
666 // Make sure the offset is encodable for instructions that scale the
668 if ((Offset & (Scale-1)) != 0)
671 if (isSigned && Offset < 0)
674 unsigned Mask = (1 << NumBits) - 1;
675 if ((unsigned)Offset <= Mask * Scale)
682 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
683 int SPAdj, unsigned FIOperandNum,
684 RegScavenger *RS) const {
685 MachineInstr &MI = *II;
686 MachineBasicBlock &MBB = *MI.getParent();
687 MachineFunction &MF = *MBB.getParent();
688 const ARMBaseInstrInfo &TII =
689 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
690 const ARMFrameLowering *TFI = static_cast<const ARMFrameLowering *>(
691 MF.getSubtarget().getFrameLowering());
692 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
693 assert(!AFI->isThumb1OnlyFunction() &&
694 "This eliminateFrameIndex does not support Thumb1!");
695 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
698 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
700 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
701 // call frame setup/destroy instructions have already been eliminated. That
702 // means the stack pointer cannot be used to access the emergency spill slot
703 // when !hasReservedCallFrame().
705 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
706 assert(TFI->hasReservedCallFrame(MF) &&
707 "Cannot use SP to access the emergency spill slot in "
708 "functions without a reserved call frame");
709 assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
710 "Cannot use SP to access the emergency spill slot in "
711 "functions with variable sized frame objects");
715 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
717 // Modify MI as necessary to handle as much of 'Offset' as possible
719 if (!AFI->isThumbFunction())
720 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
722 assert(AFI->isThumb2Function());
723 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
728 // If we get here, the immediate doesn't fit into the instruction. We folded
729 // as much as possible above, handle the rest, providing a register that is
732 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
733 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
734 "This code isn't needed if offset already handled!");
736 unsigned ScratchReg = 0;
737 int PIdx = MI.findFirstPredOperandIdx();
738 ARMCC::CondCodes Pred = (PIdx == -1)
739 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
740 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
742 // Must be addrmode4/6.
743 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
745 ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass);
746 if (!AFI->isThumbFunction())
747 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
748 Offset, Pred, PredReg, TII);
750 assert(AFI->isThumb2Function());
751 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
752 Offset, Pred, PredReg, TII);
754 // Update the original instruction to use the scratch register.
755 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
759 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI,
760 const TargetRegisterClass *SrcRC,
762 const TargetRegisterClass *DstRC,
764 const TargetRegisterClass *NewRC) const {
765 auto MBB = MI->getParent();
766 auto MF = MBB->getParent();
767 const MachineRegisterInfo &MRI = MF->getRegInfo();
768 // If not copying into a sub-register this should be ok because we shouldn't
769 // need to split the reg.
772 // Small registers don't frequently cause a problem, so we can coalesce them.
773 if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32)
777 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
779 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
781 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
782 // If the source register class is more expensive than the destination, the
783 // coalescing is probably profitable.
784 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
786 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
789 // If the register allocator isn't constrained, we can always allow coalescing
790 // unfortunately we don't know yet if we will be constrained.
791 // The goal of this heuristic is to restrict how many expensive registers
792 // we allow to coalesce in a given basic block.
793 auto AFI = MF->getInfo<ARMFunctionInfo>();
794 auto It = AFI->getCoalescedWeight(MBB);
796 DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
797 << It->second << "\n");
798 DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
799 << NewRCWeight.RegWeight << "\n");
801 // This number is the largest round number that which meets the criteria:
802 // (1) addresses PR18825
803 // (2) generates better code in some test cases (like vldm-shed-a9.ll)
804 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
805 // In practice the SizeMultiplier will only factor in for straight line code
806 // that uses a lot of NEON vectors, which isn't terribly common.
807 unsigned SizeMultiplier = MBB->size()/100;
808 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
809 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
810 It->second += NewRCWeight.RegWeight;