1 //===- ARMRegisterInfo.cpp - ARM Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the "Instituto Nokia de Tecnologia" and
6 // is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file contains the ARM implementation of the MRegisterInfo class.
13 //===----------------------------------------------------------------------===//
16 #include "ARMAddressingModes.h"
17 #include "ARMInstrInfo.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMRegisterInfo.h"
20 #include "ARMSubtarget.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLocation.h"
28 #include "llvm/Target/TargetFrameInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/ADT/BitVector.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/Support/CommandLine.h"
38 static cl::opt<bool> EnableScavenging("enable-arm-reg-scavenging", cl::Hidden,
39 cl::desc("Enable register scavenging on ARM"));
41 unsigned ARMRegisterInfo::getRegisterNumbering(unsigned RegEnum) {
44 case R0: case S0: case D0: return 0;
45 case R1: case S1: case D1: return 1;
46 case R2: case S2: case D2: return 2;
47 case R3: case S3: case D3: return 3;
48 case R4: case S4: case D4: return 4;
49 case R5: case S5: case D5: return 5;
50 case R6: case S6: case D6: return 6;
51 case R7: case S7: case D7: return 7;
52 case R8: case S8: case D8: return 8;
53 case R9: case S9: case D9: return 9;
54 case R10: case S10: case D10: return 10;
55 case R11: case S11: case D11: return 11;
56 case R12: case S12: case D12: return 12;
57 case SP: case S13: case D13: return 13;
58 case LR: case S14: case D14: return 14;
59 case PC: case S15: case D15: return 15;
77 assert(0 && "Unknown ARM register!");
82 ARMRegisterInfo::ARMRegisterInfo(const TargetInstrInfo &tii,
83 const ARMSubtarget &sti)
84 : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
86 FramePtr(STI.useThumbBacktraces() ? ARM::R7 : ARM::R11) {
89 bool ARMRegisterInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
90 MachineBasicBlock::iterator MI,
91 const std::vector<CalleeSavedInfo> &CSI) const {
92 MachineFunction &MF = *MBB.getParent();
93 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
94 if (!AFI->isThumbFunction() || CSI.empty())
97 MachineInstrBuilder MIB = BuildMI(MBB, MI, TII.get(ARM::tPUSH));
98 for (unsigned i = CSI.size(); i != 0; --i) {
99 unsigned Reg = CSI[i-1].getReg();
100 // Add the callee-saved register as live-in. It's killed at the spill.
102 MIB.addReg(Reg, false/*isDef*/,false/*isImp*/,true/*isKill*/);
107 bool ARMRegisterInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
108 MachineBasicBlock::iterator MI,
109 const std::vector<CalleeSavedInfo> &CSI) const {
110 MachineFunction &MF = *MBB.getParent();
111 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
112 if (!AFI->isThumbFunction() || CSI.empty())
115 bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
116 MachineInstr *PopMI = new MachineInstr(TII.get(ARM::tPOP));
117 MBB.insert(MI, PopMI);
118 for (unsigned i = CSI.size(); i != 0; --i) {
119 unsigned Reg = CSI[i-1].getReg();
120 if (Reg == ARM::LR) {
121 // Special epilogue for vararg functions. See emitEpilogue
125 PopMI->setInstrDescriptor(TII.get(ARM::tPOP_RET));
128 PopMI->addRegOperand(Reg, true);
133 void ARMRegisterInfo::
134 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
135 unsigned SrcReg, int FI,
136 const TargetRegisterClass *RC) const {
137 if (RC == ARM::GPRRegisterClass) {
138 MachineFunction &MF = *MBB.getParent();
139 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
140 if (AFI->isThumbFunction())
141 BuildMI(MBB, I, TII.get(ARM::tSpill)).addReg(SrcReg, false, false, true)
142 .addFrameIndex(FI).addImm(0);
144 BuildMI(MBB, I, TII.get(ARM::STR)).addReg(SrcReg, false, false, true)
145 .addFrameIndex(FI).addReg(0).addImm(0);
146 } else if (RC == ARM::DPRRegisterClass) {
147 BuildMI(MBB, I, TII.get(ARM::FSTD)).addReg(SrcReg, false, false, true)
148 .addFrameIndex(FI).addImm(0);
150 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
151 BuildMI(MBB, I, TII.get(ARM::FSTS)).addReg(SrcReg, false, false, true)
152 .addFrameIndex(FI).addImm(0);
156 void ARMRegisterInfo::
157 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
158 unsigned DestReg, int FI,
159 const TargetRegisterClass *RC) const {
160 if (RC == ARM::GPRRegisterClass) {
161 MachineFunction &MF = *MBB.getParent();
162 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
163 if (AFI->isThumbFunction())
164 BuildMI(MBB, I, TII.get(ARM::tRestore), DestReg)
165 .addFrameIndex(FI).addImm(0);
167 BuildMI(MBB, I, TII.get(ARM::LDR), DestReg)
168 .addFrameIndex(FI).addReg(0).addImm(0);
169 } else if (RC == ARM::DPRRegisterClass) {
170 BuildMI(MBB, I, TII.get(ARM::FLDD), DestReg)
171 .addFrameIndex(FI).addImm(0);
173 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
174 BuildMI(MBB, I, TII.get(ARM::FLDS), DestReg)
175 .addFrameIndex(FI).addImm(0);
179 void ARMRegisterInfo::copyRegToReg(MachineBasicBlock &MBB,
180 MachineBasicBlock::iterator I,
181 unsigned DestReg, unsigned SrcReg,
182 const TargetRegisterClass *RC) const {
183 if (RC == ARM::GPRRegisterClass) {
184 MachineFunction &MF = *MBB.getParent();
185 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
186 BuildMI(MBB, I, TII.get(AFI->isThumbFunction() ? ARM::tMOVrr : ARM::MOVrr),
187 DestReg).addReg(SrcReg);
188 } else if (RC == ARM::SPRRegisterClass)
189 BuildMI(MBB, I, TII.get(ARM::FCPYS), DestReg).addReg(SrcReg);
190 else if (RC == ARM::DPRRegisterClass)
191 BuildMI(MBB, I, TII.get(ARM::FCPYD), DestReg).addReg(SrcReg);
196 /// isLowRegister - Returns true if the register is low register r0-r7.
198 static bool isLowRegister(unsigned Reg) {
201 case R0: case R1: case R2: case R3:
202 case R4: case R5: case R6: case R7:
209 MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
210 unsigned OpNum, int FI) const {
211 unsigned Opc = MI->getOpcode();
212 MachineInstr *NewMI = NULL;
216 if (OpNum == 0) { // move -> store
217 unsigned SrcReg = MI->getOperand(1).getReg();
218 NewMI = BuildMI(TII.get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
219 .addReg(0).addImm(0);
220 } else { // move -> load
221 unsigned DstReg = MI->getOperand(0).getReg();
222 NewMI = BuildMI(TII.get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
228 if (OpNum == 0) { // move -> store
229 unsigned SrcReg = MI->getOperand(1).getReg();
230 if (isPhysicalRegister(SrcReg) && !isLowRegister(SrcReg))
231 // tSpill cannot take a high register operand.
233 NewMI = BuildMI(TII.get(ARM::tSpill)).addReg(SrcReg).addFrameIndex(FI)
235 } else { // move -> load
236 unsigned DstReg = MI->getOperand(0).getReg();
237 if (isPhysicalRegister(DstReg) && !isLowRegister(DstReg))
238 // tRestore cannot target a high register operand.
240 NewMI = BuildMI(TII.get(ARM::tRestore), DstReg).addFrameIndex(FI)
246 if (OpNum == 0) { // move -> store
247 unsigned SrcReg = MI->getOperand(1).getReg();
248 NewMI = BuildMI(TII.get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
250 } else { // move -> load
251 unsigned DstReg = MI->getOperand(0).getReg();
252 NewMI = BuildMI(TII.get(ARM::FLDS), DstReg).addFrameIndex(FI).addImm(0);
257 if (OpNum == 0) { // move -> store
258 unsigned SrcReg = MI->getOperand(1).getReg();
259 NewMI = BuildMI(TII.get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
261 } else { // move -> load
262 unsigned DstReg = MI->getOperand(0).getReg();
263 NewMI = BuildMI(TII.get(ARM::FLDD), DstReg).addFrameIndex(FI).addImm(0);
270 NewMI->copyKillDeadInfo(MI);
274 const unsigned* ARMRegisterInfo::getCalleeSavedRegs() const {
275 static const unsigned CalleeSavedRegs[] = {
276 ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
277 ARM::R7, ARM::R6, ARM::R5, ARM::R4,
279 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
280 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
284 static const unsigned DarwinCalleeSavedRegs[] = {
285 ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4,
286 ARM::R11, ARM::R10, ARM::R9, ARM::R8,
288 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
289 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
292 return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
295 const TargetRegisterClass* const *
296 ARMRegisterInfo::getCalleeSavedRegClasses() const {
297 static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
298 &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
299 &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
300 &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
302 &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
303 &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
306 return CalleeSavedRegClasses;
309 BitVector ARMRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
310 BitVector Reserved(getNumRegs());
311 Reserved.set(ARM::SP);
312 if (STI.isTargetDarwin() || hasFP(MF))
313 Reserved.set(FramePtr);
314 // Some targets reserve R9.
315 if (STI.isR9Reserved())
316 Reserved.set(ARM::R9);
317 // At PEI time, if LR is used, it will be spilled upon entry.
318 if (MF.getUsedPhysregs() && !MF.isPhysRegUsed((unsigned)ARM::LR))
319 Reserved.set(ARM::LR);
323 /// hasFP - Return true if the specified function should have a dedicated frame
324 /// pointer register. This is true if the function has variable sized allocas
325 /// or if frame pointer elimination is disabled.
327 bool ARMRegisterInfo::hasFP(const MachineFunction &MF) const {
328 return NoFramePointerElim || MF.getFrameInfo()->hasVarSizedObjects();
331 bool ARMRegisterInfo::requiresRegisterScavenging() const {
332 return EnableScavenging;
335 /// emitARMRegPlusImmediate - Emits a series of instructions to materialize
336 /// a destreg = basereg + immediate in ARM code.
338 void emitARMRegPlusImmediate(MachineBasicBlock &MBB,
339 MachineBasicBlock::iterator &MBBI,
340 unsigned DestReg, unsigned BaseReg,
341 int NumBytes, const TargetInstrInfo &TII) {
342 bool isSub = NumBytes < 0;
343 if (isSub) NumBytes = -NumBytes;
346 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
347 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
348 assert(ThisVal && "Didn't extract field correctly");
350 // We will handle these bits from offset, clear them.
351 NumBytes &= ~ThisVal;
353 // Get the properly encoded SOImmVal field.
354 int SOImmVal = ARM_AM::getSOImmVal(ThisVal);
355 assert(SOImmVal != -1 && "Bit extraction didn't work?");
357 // Build the new ADD / SUB.
358 BuildMI(MBB, MBBI, TII.get(isSub ? ARM::SUBri : ARM::ADDri), DestReg)
359 .addReg(BaseReg).addImm(SOImmVal);
364 /// calcNumMI - Returns the number of instructions required to materialize
365 /// the specific add / sub r, c instruction.
366 static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
367 unsigned NumBits, unsigned Scale) {
369 unsigned Chunk = ((1 << NumBits) - 1) * Scale;
371 if (Opc == ARM::tADDrSPi) {
372 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
377 Chunk = ((1 << NumBits) - 1) * Scale;
380 NumMIs += Bytes / Chunk;
381 if ((Bytes % Chunk) != 0)
388 /// emitLoadConstPool - Emits a load from constpool to materialize NumBytes
390 static void emitLoadConstPool(MachineBasicBlock &MBB,
391 MachineBasicBlock::iterator &MBBI,
392 unsigned DestReg, int NumBytes,
393 const TargetInstrInfo &TII) {
394 MachineFunction &MF = *MBB.getParent();
395 MachineConstantPool *ConstantPool = MF.getConstantPool();
396 Constant *C = ConstantInt::get(Type::Int32Ty, NumBytes);
397 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 2);
398 BuildMI(MBB, MBBI, TII.get(ARM::tLDRpci), DestReg).addConstantPoolIndex(Idx);
401 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
402 /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
403 /// in a register using mov / mvn sequences or load the immediate from a
406 void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
407 MachineBasicBlock::iterator &MBBI,
408 unsigned DestReg, unsigned BaseReg,
409 int NumBytes, bool CanChangeCC,
410 const TargetInstrInfo &TII) {
411 bool isHigh = !isLowRegister(DestReg) ||
412 (BaseReg != 0 && !isLowRegister(BaseReg));
414 // Subtract doesn't have high register version. Load the negative value
415 // if either base or dest register is a high register. Also, if do not
416 // issue sub as part of the sequence if condition register is to be
418 if (NumBytes < 0 && !isHigh && CanChangeCC) {
420 NumBytes = -NumBytes;
422 unsigned LdReg = DestReg;
423 if (DestReg == ARM::SP) {
424 assert(BaseReg == ARM::SP && "Unexpected!");
426 BuildMI(MBB, MBBI, TII.get(ARM::tMOVrr), ARM::R12).addReg(ARM::R3);
429 if (NumBytes <= 255 && NumBytes >= 0)
430 BuildMI(MBB, MBBI, TII.get(ARM::tMOVri8), LdReg).addImm(NumBytes);
431 else if (NumBytes < 0 && NumBytes >= -255) {
432 BuildMI(MBB, MBBI, TII.get(ARM::tMOVri8), LdReg).addImm(NumBytes);
433 BuildMI(MBB, MBBI, TII.get(ARM::tNEG), LdReg).addReg(LdReg);
435 emitLoadConstPool(MBB, MBBI, LdReg, NumBytes, TII);
438 int Opc = (isSub) ? ARM::tSUBrr : (isHigh ? ARM::tADDhirr : ARM::tADDrr);
439 const MachineInstrBuilder MIB = BuildMI(MBB, MBBI, TII.get(Opc), DestReg);
440 if (DestReg == ARM::SP)
441 MIB.addReg(BaseReg).addReg(LdReg);
443 MIB.addReg(BaseReg).addReg(LdReg);
445 MIB.addReg(LdReg).addReg(BaseReg);
446 if (DestReg == ARM::SP)
447 BuildMI(MBB, MBBI, TII.get(ARM::tMOVrr), ARM::R3).addReg(ARM::R12);
450 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
451 /// a destreg = basereg + immediate in Thumb code.
453 void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
454 MachineBasicBlock::iterator &MBBI,
455 unsigned DestReg, unsigned BaseReg,
456 int NumBytes, const TargetInstrInfo &TII) {
457 bool isSub = NumBytes < 0;
458 unsigned Bytes = (unsigned)NumBytes;
459 if (isSub) Bytes = -NumBytes;
460 bool isMul4 = (Bytes & 3) == 0;
461 bool isTwoAddr = false;
462 bool DstNotEqBase = false;
463 unsigned NumBits = 1;
468 if (DestReg == BaseReg && BaseReg == ARM::SP) {
469 assert(isMul4 && "Thumb sp inc / dec size must be multiple of 4!");
472 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
474 } else if (!isSub && BaseReg == ARM::SP) {
477 // r1 = add sp, 100 * 4
481 ExtraOpc = ARM::tADDi3;
490 if (DestReg != BaseReg)
493 Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
497 unsigned NumMIs = calcNumMI(Opc, ExtraOpc, Bytes, NumBits, Scale);
498 unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
499 if (NumMIs > Threshold) {
500 // This will expand into too many instructions. Load the immediate from a
502 emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII);
507 if (isLowRegister(DestReg) && isLowRegister(BaseReg)) {
508 // If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
509 unsigned Chunk = (1 << 3) - 1;
510 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
512 BuildMI(MBB, MBBI, TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3), DestReg)
513 .addReg(BaseReg).addImm(ThisVal);
515 BuildMI(MBB, MBBI, TII.get(ARM::tMOVrr), DestReg).addReg(BaseReg);
520 unsigned Chunk = ((1 << NumBits) - 1) * Scale;
522 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
525 // Build the new tADD / tSUB.
527 BuildMI(MBB, MBBI, TII.get(Opc), DestReg).addReg(DestReg).addImm(ThisVal);
529 BuildMI(MBB, MBBI, TII.get(Opc), DestReg).addReg(BaseReg).addImm(ThisVal);
532 if (Opc == ARM::tADDrSPi) {
538 Chunk = ((1 << NumBits) - 1) * Scale;
539 Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
546 BuildMI(MBB, MBBI, TII.get(ExtraOpc), DestReg).addReg(DestReg)
547 .addImm(((unsigned)NumBytes) & 3);
551 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
552 int NumBytes, bool isThumb, const TargetInstrInfo &TII) {
554 emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII);
556 emitARMRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII);
559 void ARMRegisterInfo::
560 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
561 MachineBasicBlock::iterator I) const {
563 // If we have alloca, convert as follows:
564 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
565 // ADJCALLSTACKUP -> add, sp, sp, amount
566 MachineInstr *Old = I;
567 unsigned Amount = Old->getOperand(0).getImmedValue();
569 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
570 // We need to keep the stack aligned properly. To do this, we round the
571 // amount of space needed for the outgoing arguments up to the next
572 // alignment boundary.
573 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
574 Amount = (Amount+Align-1)/Align*Align;
576 // Replace the pseudo instruction with a new instruction...
577 if (Old->getOpcode() == ARM::ADJCALLSTACKDOWN) {
578 emitSPUpdate(MBB, I, -Amount, AFI->isThumbFunction(), TII);
580 assert(Old->getOpcode() == ARM::ADJCALLSTACKUP);
581 emitSPUpdate(MBB, I, Amount, AFI->isThumbFunction(), TII);
588 /// emitThumbConstant - Emit a series of instructions to materialize a
590 static void emitThumbConstant(MachineBasicBlock &MBB,
591 MachineBasicBlock::iterator &MBBI,
592 unsigned DestReg, int Imm,
593 const TargetInstrInfo &TII) {
594 bool isSub = Imm < 0;
595 if (isSub) Imm = -Imm;
597 int Chunk = (1 << 8) - 1;
598 int ThisVal = (Imm > Chunk) ? Chunk : Imm;
600 BuildMI(MBB, MBBI, TII.get(ARM::tMOVri8), DestReg).addImm(ThisVal);
602 emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII);
604 BuildMI(MBB, MBBI, TII.get(ARM::tNEG), DestReg).addReg(DestReg);
607 void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II) const{
609 MachineInstr &MI = *II;
610 MachineBasicBlock &MBB = *MI.getParent();
611 MachineFunction &MF = *MBB.getParent();
612 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
613 bool isThumb = AFI->isThumbFunction();
615 while (!MI.getOperand(i).isFrameIndex()) {
617 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
620 unsigned FrameReg = ARM::SP;
621 int FrameIndex = MI.getOperand(i).getFrameIndex();
622 int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
623 MF.getFrameInfo()->getStackSize();
625 if (AFI->isGPRCalleeSavedArea1Frame(FrameIndex))
626 Offset -= AFI->getGPRCalleeSavedArea1Offset();
627 else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
628 Offset -= AFI->getGPRCalleeSavedArea2Offset();
629 else if (AFI->isDPRCalleeSavedAreaFrame(FrameIndex))
630 Offset -= AFI->getDPRCalleeSavedAreaOffset();
631 else if (hasFP(MF)) {
632 // There is alloca()'s in this function, must reference off the frame
634 FrameReg = getFrameRegister(MF);
635 Offset -= AFI->getFramePtrSpillOffset();
638 unsigned Opcode = MI.getOpcode();
639 const TargetInstrDescriptor &Desc = TII.get(Opcode);
640 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
643 if (Opcode == ARM::ADDri) {
644 Offset += MI.getOperand(i+1).getImm();
646 // Turn it into a move.
647 MI.setInstrDescriptor(TII.get(ARM::MOVrr));
648 MI.getOperand(i).ChangeToRegister(FrameReg, false);
649 MI.RemoveOperand(i+1);
651 } else if (Offset < 0) {
654 MI.setInstrDescriptor(TII.get(ARM::SUBri));
657 // Common case: small offset, fits into instruction.
658 int ImmedOffset = ARM_AM::getSOImmVal(Offset);
659 if (ImmedOffset != -1) {
660 // Replace the FrameIndex with sp / fp
661 MI.getOperand(i).ChangeToRegister(FrameReg, false);
662 MI.getOperand(i+1).ChangeToImmediate(ImmedOffset);
666 // Otherwise, we fallback to common code below to form the imm offset with
667 // a sequence of ADDri instructions. First though, pull as much of the imm
668 // into this ADDri as possible.
669 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
670 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, (32-RotAmt) & 31);
672 // We will handle these bits from offset, clear them.
673 Offset &= ~ThisImmVal;
675 // Get the properly encoded SOImmVal field.
676 int ThisSOImmVal = ARM_AM::getSOImmVal(ThisImmVal);
677 assert(ThisSOImmVal != -1 && "Bit extraction didn't work?");
678 MI.getOperand(i+1).ChangeToImmediate(ThisSOImmVal);
679 } else if (Opcode == ARM::tADDrSPi) {
680 Offset += MI.getOperand(i+1).getImm();
681 assert((Offset & 3) == 0 &&
682 "Thumb add/sub sp, #imm immediate must be multiple of 4!");
684 // Turn it into a move.
685 MI.setInstrDescriptor(TII.get(ARM::tMOVrr));
686 MI.getOperand(i).ChangeToRegister(FrameReg, false);
687 MI.RemoveOperand(i+1);
691 // Common case: small offset, fits into instruction.
692 if (((Offset >> 2) & ~255U) == 0) {
693 // Replace the FrameIndex with sp / fp
694 MI.getOperand(i).ChangeToRegister(FrameReg, false);
695 MI.getOperand(i+1).ChangeToImmediate(Offset >> 2);
699 unsigned DestReg = MI.getOperand(0).getReg();
700 unsigned Bytes = (Offset > 0) ? Offset : -Offset;
701 unsigned NumMIs = calcNumMI(Opcode, 0, Bytes, 8, 1);
702 // MI would expand into a large number of instructions. Don't try to
703 // simplify the immediate.
705 emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII);
711 // Translate r0 = add sp, imm to
712 // r0 = add sp, 255*4
713 // r0 = add r0, (imm - 255*4)
714 MI.getOperand(i).ChangeToRegister(FrameReg, false);
715 MI.getOperand(i+1).ChangeToImmediate(255);
716 Offset = (Offset - 255 * 4);
717 MachineBasicBlock::iterator NII = next(II);
718 emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII);
720 // Translate r0 = add sp, -imm to
721 // r0 = -imm (this is then translated into a series of instructons)
723 emitThumbConstant(MBB, II, DestReg, Offset, TII);
724 MI.setInstrDescriptor(TII.get(ARM::tADDhirr));
725 MI.getOperand(i).ChangeToRegister(DestReg, false);
726 MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
732 unsigned NumBits = 0;
735 case ARMII::AddrMode2: {
737 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
738 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
743 case ARMII::AddrMode3: {
745 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
746 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
751 case ARMII::AddrMode5: {
753 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
754 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
760 case ARMII::AddrModeTs: {
762 InstrOffs = MI.getOperand(ImmIdx).getImm();
763 NumBits = (FrameReg == ARM::SP) ? 8 : 5;
768 assert(0 && "Unsupported addressing mode!");
773 Offset += InstrOffs * Scale;
774 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
775 if (Offset < 0 && !isThumb) {
780 // Common case: small offset, fits into instruction.
781 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
782 int ImmedOffset = Offset / Scale;
783 unsigned Mask = (1 << NumBits) - 1;
784 if ((unsigned)Offset <= Mask * Scale) {
785 // Replace the FrameIndex with sp
786 MI.getOperand(i).ChangeToRegister(FrameReg, false);
788 ImmedOffset |= 1 << NumBits;
789 ImmOp.ChangeToImmediate(ImmedOffset);
793 bool isThumSpillRestore = Opcode == ARM::tRestore || Opcode == ARM::tSpill;
794 if (AddrMode == ARMII::AddrModeTs) {
795 // Thumb tLDRspi, tSTRspi. These will change to instructions that use
796 // a different base register.
798 Mask = (1 << NumBits) - 1;
800 // If this is a thumb spill / restore, we will be using a constpool load to
801 // materialize the offset.
802 if (AddrMode == ARMII::AddrModeTs && isThumSpillRestore)
803 ImmOp.ChangeToImmediate(0);
805 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
806 ImmedOffset = ImmedOffset & Mask;
808 ImmedOffset |= 1 << NumBits;
809 ImmOp.ChangeToImmediate(ImmedOffset);
810 Offset &= ~(Mask*Scale);
814 // If we get here, the immediate doesn't fit into the instruction. We folded
815 // as much as possible above, handle the rest, providing a register that is
817 assert(Offset && "This code isn't needed if offset already handled!");
820 if (TII.isLoad(Opcode)) {
821 // Use the destination register to materialize sp + offset.
822 unsigned TmpReg = MI.getOperand(0).getReg();
824 if (Opcode == ARM::tRestore) {
825 if (FrameReg == ARM::SP)
826 emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
828 emitLoadConstPool(MBB, II, TmpReg, Offset, TII);
832 emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
833 MI.setInstrDescriptor(TII.get(ARM::tLDR));
834 MI.getOperand(i).ChangeToRegister(TmpReg, false);
836 MI.addRegOperand(FrameReg, false); // Use [reg, reg] addrmode.
838 MI.addRegOperand(0, false); // tLDR has an extra register operand.
839 } else if (TII.isStore(Opcode)) {
840 // FIXME! This is horrific!!! We need register scavenging.
841 // Our temporary workaround has marked r3 unavailable. Of course, r3 is
842 // also a ABI register so it's possible that is is the register that is
843 // being storing here. If that's the case, we do the following:
845 // Use r2 to materialize sp + offset
848 unsigned ValReg = MI.getOperand(0).getReg();
849 unsigned TmpReg = ARM::R3;
851 if (ValReg == ARM::R3) {
852 BuildMI(MBB, II, TII.get(ARM::tMOVrr), ARM::R12).addReg(ARM::R2);
855 if (TmpReg == ARM::R3 && AFI->isR3IsLiveIn())
856 BuildMI(MBB, II, TII.get(ARM::tMOVrr), ARM::R12).addReg(ARM::R3);
857 if (Opcode == ARM::tSpill) {
858 if (FrameReg == ARM::SP)
859 emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
861 emitLoadConstPool(MBB, II, TmpReg, Offset, TII);
865 emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
866 MI.setInstrDescriptor(TII.get(ARM::tSTR));
867 MI.getOperand(i).ChangeToRegister(TmpReg, false);
869 MI.addRegOperand(FrameReg, false); // Use [reg, reg] addrmode.
871 MI.addRegOperand(0, false); // tSTR has an extra register operand.
873 MachineBasicBlock::iterator NII = next(II);
874 if (ValReg == ARM::R3)
875 BuildMI(MBB, NII, TII.get(ARM::tMOVrr), ARM::R2).addReg(ARM::R12);
876 if (TmpReg == ARM::R3 && AFI->isR3IsLiveIn())
877 BuildMI(MBB, NII, TII.get(ARM::tMOVrr), ARM::R3).addReg(ARM::R12);
879 assert(false && "Unexpected opcode!");
881 // Insert a set of r12 with the full address: r12 = sp + offset
882 // If the offset we have is too large to fit into the instruction, we need
883 // to form it with a series of ADDri's. Do this by taking 8-bit chunks
885 emitARMRegPlusImmediate(MBB, II, ARM::R12, FrameReg,
886 isSub ? -Offset : Offset, TII);
887 MI.getOperand(i).ChangeToRegister(ARM::R12, false);
891 void ARMRegisterInfo::
892 processFunctionBeforeCalleeSavedScan(MachineFunction &MF) const {
893 // This tells PEI to spill the FP as if it is any other callee-save register
894 // to take advantage the eliminateFrameIndex machinery. This also ensures it
895 // is spilled in the order specified by getCalleeSavedRegs() to make it easier
896 // to combine multiple loads / stores.
897 bool CanEliminateFrame = true;
898 bool CS1Spilled = false;
899 bool LRSpilled = false;
900 unsigned NumGPRSpills = 0;
901 SmallVector<unsigned, 4> UnspilledCS1GPRs;
902 SmallVector<unsigned, 4> UnspilledCS2GPRs;
904 // Don't spill FP if the frame can be eliminated. This is determined
905 // by scanning the callee-save registers to see if any is used.
906 const unsigned *CSRegs = getCalleeSavedRegs();
907 const TargetRegisterClass* const *CSRegClasses = getCalleeSavedRegClasses();
908 for (unsigned i = 0; CSRegs[i]; ++i) {
909 unsigned Reg = CSRegs[i];
910 bool Spilled = false;
911 if (MF.isPhysRegUsed(Reg)) {
913 CanEliminateFrame = false;
915 // Check alias registers too.
916 for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
917 if (MF.isPhysRegUsed(*Aliases)) {
919 CanEliminateFrame = false;
924 if (CSRegClasses[i] == &ARM::GPRRegClass) {
928 if (!STI.isTargetDarwin()) {
936 // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
951 if (!STI.isTargetDarwin()) {
952 UnspilledCS1GPRs.push_back(Reg);
962 UnspilledCS1GPRs.push_back(Reg);
965 UnspilledCS2GPRs.push_back(Reg);
972 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
973 bool ForceLRSpill = false;
974 if (!LRSpilled && AFI->isThumbFunction()) {
975 unsigned FnSize = ARM::GetFunctionSize(MF);
976 // Force LR spill if the Thumb function size is > 2048. This enables the
977 // use of BL to implement far jump. If it turns out that it's not needed
978 // the branch fix up path will undo it.
979 if (FnSize >= (1 << 11)) {
980 CanEliminateFrame = false;
985 if (!CanEliminateFrame || hasFP(MF)) {
986 AFI->setHasStackFrame(true);
988 // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
989 // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
990 if (!LRSpilled && CS1Spilled) {
991 MF.changePhyRegUsed(ARM::LR, true);
993 UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
994 UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
995 ForceLRSpill = false;
998 // Darwin ABI requires FP to point to the stack slot that contains the
1000 if (STI.isTargetDarwin() || hasFP(MF)) {
1001 MF.changePhyRegUsed(FramePtr, true);
1005 // If stack and double are 8-byte aligned and we are spilling an odd number
1006 // of GPRs. Spill one extra callee save GPR so we won't have to pad between
1007 // the integer and double callee save areas.
1008 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1009 if (TargetAlign == 8 && (NumGPRSpills & 1)) {
1010 if (CS1Spilled && !UnspilledCS1GPRs.empty())
1011 MF.changePhyRegUsed(UnspilledCS1GPRs.front(), true);
1012 else if (!UnspilledCS2GPRs.empty())
1013 MF.changePhyRegUsed(UnspilledCS2GPRs.front(), true);
1018 MF.changePhyRegUsed(ARM::LR, true);
1019 AFI->setLRIsForceSpilled(true);
1023 /// Move iterator pass the next bunch of callee save load / store ops for
1024 /// the particular spill area (1: integer area 1, 2: integer area 2,
1025 /// 3: fp area, 0: don't care).
1026 static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
1027 MachineBasicBlock::iterator &MBBI,
1028 int Opc, unsigned Area,
1029 const ARMSubtarget &STI) {
1030 while (MBBI != MBB.end() &&
1031 MBBI->getOpcode() == Opc && MBBI->getOperand(1).isFrameIndex()) {
1034 unsigned Category = 0;
1035 switch (MBBI->getOperand(0).getReg()) {
1036 case ARM::R4: case ARM::R5: case ARM::R6: case ARM::R7:
1040 case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
1041 Category = STI.isTargetDarwin() ? 2 : 1;
1043 case ARM::D8: case ARM::D9: case ARM::D10: case ARM::D11:
1044 case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
1051 if (Done || Category != Area)
1059 void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
1060 MachineBasicBlock &MBB = MF.front();
1061 MachineBasicBlock::iterator MBBI = MBB.begin();
1062 MachineFrameInfo *MFI = MF.getFrameInfo();
1063 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1064 bool isThumb = AFI->isThumbFunction();
1065 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1066 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1067 unsigned NumBytes = MFI->getStackSize();
1068 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1071 // Check if R3 is live in. It might have to be used as a scratch register.
1072 for (MachineFunction::livein_iterator I=MF.livein_begin(),E=MF.livein_end();
1074 if ((*I).first == ARM::R3) {
1075 AFI->setR3IsLiveIn(true);
1080 // Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
1081 NumBytes = (NumBytes + 3) & ~3;
1082 MFI->setStackSize(NumBytes);
1085 // Determine the sizes of each callee-save spill areas and record which frame
1086 // belongs to which callee-save spill areas.
1087 unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
1088 int FramePtrSpillFI = 0;
1089 if (!AFI->hasStackFrame()) {
1091 emitSPUpdate(MBB, MBBI, -NumBytes, isThumb, TII);
1096 emitSPUpdate(MBB, MBBI, -VARegSaveSize, isThumb, TII);
1098 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1099 unsigned Reg = CSI[i].getReg();
1100 int FI = CSI[i].getFrameIdx();
1107 if (Reg == FramePtr)
1108 FramePtrSpillFI = FI;
1109 AFI->addGPRCalleeSavedArea1Frame(FI);
1116 if (Reg == FramePtr)
1117 FramePtrSpillFI = FI;
1118 if (STI.isTargetDarwin()) {
1119 AFI->addGPRCalleeSavedArea2Frame(FI);
1122 AFI->addGPRCalleeSavedArea1Frame(FI);
1127 AFI->addDPRCalleeSavedAreaFrame(FI);
1132 if (Align == 8 && (GPRCS1Size & 7) != 0)
1133 // Pad CS1 to ensure proper alignment.
1137 // Build the new SUBri to adjust SP for integer callee-save spill area 1.
1138 emitSPUpdate(MBB, MBBI, -GPRCS1Size, isThumb, TII);
1139 movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 1, STI);
1140 } else if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH)
1143 // Darwin ABI requires FP to point to the stack slot that contains the
1145 if (STI.isTargetDarwin() || hasFP(MF))
1146 BuildMI(MBB, MBBI, TII.get(isThumb ? ARM::tADDrSPi : ARM::ADDri), FramePtr)
1147 .addFrameIndex(FramePtrSpillFI).addImm(0);
1150 // Build the new SUBri to adjust SP for integer callee-save spill area 2.
1151 emitSPUpdate(MBB, MBBI, -GPRCS2Size, false, TII);
1153 // Build the new SUBri to adjust SP for FP callee-save spill area.
1154 movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 2, STI);
1155 emitSPUpdate(MBB, MBBI, -DPRCSSize, false, TII);
1158 // Determine starting offsets of spill areas.
1159 unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
1160 unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
1161 unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
1162 AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + NumBytes);
1163 AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
1164 AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
1165 AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
1167 NumBytes = DPRCSOffset;
1169 // Insert it after all the callee-save spills.
1171 movePastCSLoadStoreOps(MBB, MBBI, ARM::FSTD, 3, STI);
1172 emitSPUpdate(MBB, MBBI, -NumBytes, isThumb, TII);
1175 AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
1176 AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
1177 AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
1180 static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
1181 for (unsigned i = 0; CSRegs[i]; ++i)
1182 if (Reg == CSRegs[i])
1187 static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
1188 return ((MI->getOpcode() == ARM::FLDD ||
1189 MI->getOpcode() == ARM::LDR ||
1190 MI->getOpcode() == ARM::tRestore) &&
1191 MI->getOperand(1).isFrameIndex() &&
1192 isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
1195 void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
1196 MachineBasicBlock &MBB) const {
1197 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1198 assert((MBBI->getOpcode() == ARM::BX_RET ||
1199 MBBI->getOpcode() == ARM::tBX_RET ||
1200 MBBI->getOpcode() == ARM::tPOP_RET) &&
1201 "Can only insert epilog into returning blocks");
1203 MachineFrameInfo *MFI = MF.getFrameInfo();
1204 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1205 bool isThumb = AFI->isThumbFunction();
1206 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1207 int NumBytes = (int)MFI->getStackSize();
1208 if (!AFI->hasStackFrame()) {
1210 emitSPUpdate(MBB, MBBI, NumBytes, isThumb, TII);
1214 // Unwind MBBI to point to first LDR / FLDD.
1215 const unsigned *CSRegs = getCalleeSavedRegs();
1216 if (MBBI != MBB.begin()) {
1219 while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
1220 if (!isCSRestore(MBBI, CSRegs))
1224 // Move SP to start of FP callee save spill area.
1225 NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
1226 AFI->getGPRCalleeSavedArea2Size() +
1227 AFI->getDPRCalleeSavedAreaSize());
1230 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
1231 // Reset SP based on frame pointer only if the stack frame extends beyond
1232 // frame pointer stack slot or target is ELF and the function has FP.
1234 emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes, TII);
1236 BuildMI(MBB, MBBI, TII.get(ARM::tMOVrr), ARM::SP).addReg(FramePtr);
1238 if (MBBI->getOpcode() == ARM::tBX_RET &&
1239 &MBB.front() != MBBI &&
1240 prior(MBBI)->getOpcode() == ARM::tPOP) {
1241 MachineBasicBlock::iterator PMBBI = prior(MBBI);
1242 emitSPUpdate(MBB, PMBBI, NumBytes, isThumb, TII);
1244 emitSPUpdate(MBB, MBBI, NumBytes, isThumb, TII);
1247 // Darwin ABI requires FP to point to the stack slot that contains the
1249 if (STI.isTargetDarwin() || hasFP(MF)) {
1250 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
1251 // Reset SP based on frame pointer only if the stack frame extends beyond
1252 // frame pointer stack slot or target is ELF and the function has FP.
1253 if (AFI->getGPRCalleeSavedArea2Size() ||
1254 AFI->getDPRCalleeSavedAreaSize() ||
1255 AFI->getDPRCalleeSavedAreaOffset()||
1258 BuildMI(MBB, MBBI, TII.get(ARM::SUBri), ARM::SP).addReg(FramePtr)
1261 BuildMI(MBB, MBBI, TII.get(ARM::MOVrr), ARM::SP).addReg(FramePtr);
1262 } else if (NumBytes) {
1263 emitSPUpdate(MBB, MBBI, NumBytes, false, TII);
1266 // Move SP to start of integer callee save spill area 2.
1267 movePastCSLoadStoreOps(MBB, MBBI, ARM::FLDD, 3, STI);
1268 emitSPUpdate(MBB, MBBI, AFI->getDPRCalleeSavedAreaSize(), false, TII);
1270 // Move SP to start of integer callee save spill area 1.
1271 movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 2, STI);
1272 emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea2Size(), false, TII);
1274 // Move SP to SP upon entry to the function.
1275 movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 1, STI);
1276 emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea1Size(), false, TII);
1279 if (VARegSaveSize) {
1281 // Epilogue for vararg functions: pop LR to R3 and branch off it.
1282 // FIXME: Verify this is still ok when R3 is no longer being reserved.
1283 BuildMI(MBB, MBBI, TII.get(ARM::tPOP)).addReg(ARM::R3);
1285 emitSPUpdate(MBB, MBBI, VARegSaveSize, isThumb, TII);
1288 BuildMI(MBB, MBBI, TII.get(ARM::tBX_RET_vararg)).addReg(ARM::R3);
1294 unsigned ARMRegisterInfo::getRARegister() const {
1298 unsigned ARMRegisterInfo::getFrameRegister(MachineFunction &MF) const {
1299 return STI.useThumbBacktraces() ? ARM::R7 : ARM::R11;
1302 unsigned ARMRegisterInfo::getEHExceptionRegister() const {
1303 assert(0 && "What is the exception register");
1307 unsigned ARMRegisterInfo::getEHHandlerRegister() const {
1308 assert(0 && "What is the exception handler register");
1312 #include "ARMGenRegisterInfo.inc"