1 //===- ThumbRegisterInfo.cpp - Thumb Register Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
15 #include "ARMAddressingModes.h"
16 #include "ARMMachineFunctionInfo.h"
17 #include "ARMSubtarget.h"
18 #include "ThumbInstrInfo.h"
19 #include "ThumbRegisterInfo.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/CodeGen/MachineConstantPool.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineLocation.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/Target/TargetFrameInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/Support/CommandLine.h"
36 ThumbRegScavenging("enable-thumb-reg-scavenging",
38 cl::desc("Enable register scavenging on Thumb"));
40 ThumbRegisterInfo::ThumbRegisterInfo(const TargetInstrInfo &tii,
41 const ARMSubtarget &sti)
42 : ARMBaseRegisterInfo(tii, sti) {
45 /// emitLoadConstPool - Emits a load from constpool to materialize the
46 /// specified immediate.
47 void ThumbRegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
48 MachineBasicBlock::iterator &MBBI,
49 unsigned DestReg, int Val,
50 const TargetInstrInfo *TII,
52 MachineFunction &MF = *MBB.getParent();
53 MachineConstantPool *ConstantPool = MF.getConstantPool();
54 Constant *C = ConstantInt::get(Type::Int32Ty, Val);
55 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
57 BuildMI(MBB, MBBI, dl, TII->get(ARM::tLDRcp), DestReg)
58 .addConstantPoolIndex(Idx);
61 const TargetRegisterClass*
62 ThumbRegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, MVT VT) const {
63 if (isARMLowRegister(Reg))
64 return ARM::tGPRRegisterClass;
68 case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
69 case ARM::R12: case ARM::SP: case ARM::LR: case ARM::PC:
70 return ARM::GPRRegisterClass;
73 return TargetRegisterInfo::getPhysicalRegisterRegClass(Reg, VT);
77 ThumbRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
78 return ThumbRegScavenging;
81 bool ThumbRegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
82 const MachineFrameInfo *FFI = MF.getFrameInfo();
83 unsigned CFSize = FFI->getMaxCallFrameSize();
84 // It's not always a good idea to include the call frame as part of the
85 // stack frame. ARM (especially Thumb) has small immediate offset to
86 // address the stack frame. So a large call frame can cause poor codegen
87 // and may even makes it impossible to scavenge a register.
88 if (CFSize >= ((1 << 8) - 1) * 4 / 2) // Half of imm8 * 4
91 return !MF.getFrameInfo()->hasVarSizedObjects();
94 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
95 /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
96 /// in a register using mov / mvn sequences or load the immediate from a
99 void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
100 MachineBasicBlock::iterator &MBBI,
101 unsigned DestReg, unsigned BaseReg,
102 int NumBytes, bool CanChangeCC,
103 const TargetInstrInfo &TII,
104 const ThumbRegisterInfo& MRI,
106 bool isHigh = !isARMLowRegister(DestReg) ||
107 (BaseReg != 0 && !isARMLowRegister(BaseReg));
109 // Subtract doesn't have high register version. Load the negative value
110 // if either base or dest register is a high register. Also, if do not
111 // issue sub as part of the sequence if condition register is to be
113 if (NumBytes < 0 && !isHigh && CanChangeCC) {
115 NumBytes = -NumBytes;
117 unsigned LdReg = DestReg;
118 if (DestReg == ARM::SP) {
119 assert(BaseReg == ARM::SP && "Unexpected!");
121 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
122 .addReg(ARM::R3, RegState::Kill);
125 if (NumBytes <= 255 && NumBytes >= 0)
126 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
127 else if (NumBytes < 0 && NumBytes >= -255) {
128 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
129 BuildMI(MBB, MBBI, dl, TII.get(ARM::tNEG), LdReg)
130 .addReg(LdReg, RegState::Kill);
132 MRI.emitLoadConstPool(MBB, MBBI, LdReg, NumBytes, &TII, dl);
135 int Opc = (isSub) ? ARM::tSUBrr : (isHigh ? ARM::tADDhirr : ARM::tADDrr);
136 const MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl,
137 TII.get(Opc), DestReg);
138 if (DestReg == ARM::SP || isSub)
139 MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
141 MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
142 if (DestReg == ARM::SP)
143 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
144 .addReg(ARM::R12, RegState::Kill);
147 /// calcNumMI - Returns the number of instructions required to materialize
148 /// the specific add / sub r, c instruction.
149 static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
150 unsigned NumBits, unsigned Scale) {
152 unsigned Chunk = ((1 << NumBits) - 1) * Scale;
154 if (Opc == ARM::tADDrSPi) {
155 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
159 Scale = 1; // Followed by a number of tADDi8.
160 Chunk = ((1 << NumBits) - 1) * Scale;
163 NumMIs += Bytes / Chunk;
164 if ((Bytes % Chunk) != 0)
171 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
172 /// a destreg = basereg + immediate in Thumb code.
174 void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
175 MachineBasicBlock::iterator &MBBI,
176 unsigned DestReg, unsigned BaseReg,
177 int NumBytes, const TargetInstrInfo &TII,
178 const ThumbRegisterInfo& MRI,
180 bool isSub = NumBytes < 0;
181 unsigned Bytes = (unsigned)NumBytes;
182 if (isSub) Bytes = -NumBytes;
183 bool isMul4 = (Bytes & 3) == 0;
184 bool isTwoAddr = false;
185 bool DstNotEqBase = false;
186 unsigned NumBits = 1;
191 if (DestReg == BaseReg && BaseReg == ARM::SP) {
192 assert(isMul4 && "Thumb sp inc / dec size must be multiple of 4!");
195 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
197 } else if (!isSub && BaseReg == ARM::SP) {
200 // r1 = add sp, 100 * 4
204 ExtraOpc = ARM::tADDi3;
213 if (DestReg != BaseReg)
216 Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
220 unsigned NumMIs = calcNumMI(Opc, ExtraOpc, Bytes, NumBits, Scale);
221 unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
222 if (NumMIs > Threshold) {
223 // This will expand into too many instructions. Load the immediate from a
225 emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII,
231 if (isARMLowRegister(DestReg) && isARMLowRegister(BaseReg)) {
232 // If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
233 unsigned Chunk = (1 << 3) - 1;
234 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
236 BuildMI(MBB, MBBI, dl,TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3), DestReg)
237 .addReg(BaseReg, RegState::Kill).addImm(ThisVal);
239 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
240 .addReg(BaseReg, RegState::Kill);
245 unsigned Chunk = ((1 << NumBits) - 1) * Scale;
247 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
250 // Build the new tADD / tSUB.
252 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
253 .addReg(DestReg).addImm(ThisVal);
255 bool isKill = BaseReg != ARM::SP;
256 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
257 .addReg(BaseReg, getKillRegState(isKill)).addImm(ThisVal);
260 if (Opc == ARM::tADDrSPi) {
266 Chunk = ((1 << NumBits) - 1) * Scale;
267 Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
274 BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg)
275 .addReg(DestReg, RegState::Kill)
276 .addImm(((unsigned)NumBytes) & 3);
279 static void emitSPUpdate(MachineBasicBlock &MBB,
280 MachineBasicBlock::iterator &MBBI,
281 const TargetInstrInfo &TII, DebugLoc dl,
282 const ThumbRegisterInfo &MRI,
284 emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII,
288 void ThumbRegisterInfo::
289 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
290 MachineBasicBlock::iterator I) const {
291 if (!hasReservedCallFrame(MF)) {
292 // If we have alloca, convert as follows:
293 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
294 // ADJCALLSTACKUP -> add, sp, sp, amount
295 MachineInstr *Old = I;
296 DebugLoc dl = Old->getDebugLoc();
297 unsigned Amount = Old->getOperand(0).getImm();
299 // We need to keep the stack aligned properly. To do this, we round the
300 // amount of space needed for the outgoing arguments up to the next
301 // alignment boundary.
302 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
303 Amount = (Amount+Align-1)/Align*Align;
305 // Replace the pseudo instruction with a new instruction...
306 unsigned Opc = Old->getOpcode();
307 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
308 emitSPUpdate(MBB, I, TII, dl, *this, -Amount);
310 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
311 emitSPUpdate(MBB, I, TII, dl, *this, Amount);
318 /// emitThumbConstant - Emit a series of instructions to materialize a
320 static void emitThumbConstant(MachineBasicBlock &MBB,
321 MachineBasicBlock::iterator &MBBI,
322 unsigned DestReg, int Imm,
323 const TargetInstrInfo &TII,
324 const ThumbRegisterInfo& MRI,
326 bool isSub = Imm < 0;
327 if (isSub) Imm = -Imm;
329 int Chunk = (1 << 8) - 1;
330 int ThisVal = (Imm > Chunk) ? Chunk : Imm;
332 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), DestReg).addImm(ThisVal);
334 emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII, MRI, dl);
336 BuildMI(MBB, MBBI, dl, TII.get(ARM::tNEG), DestReg)
337 .addReg(DestReg, RegState::Kill);
340 void ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
341 int SPAdj, RegScavenger *RS) const{
343 MachineInstr &MI = *II;
344 MachineBasicBlock &MBB = *MI.getParent();
345 MachineFunction &MF = *MBB.getParent();
346 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
347 DebugLoc dl = MI.getDebugLoc();
349 while (!MI.getOperand(i).isFI()) {
351 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
354 unsigned FrameReg = ARM::SP;
355 int FrameIndex = MI.getOperand(i).getIndex();
356 int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
357 MF.getFrameInfo()->getStackSize() + SPAdj;
359 if (AFI->isGPRCalleeSavedArea1Frame(FrameIndex))
360 Offset -= AFI->getGPRCalleeSavedArea1Offset();
361 else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
362 Offset -= AFI->getGPRCalleeSavedArea2Offset();
363 else if (hasFP(MF)) {
364 assert(SPAdj == 0 && "Unexpected");
365 // There is alloca()'s in this function, must reference off the frame
367 FrameReg = getFrameRegister(MF);
368 Offset -= AFI->getFramePtrSpillOffset();
371 unsigned Opcode = MI.getOpcode();
372 const TargetInstrDesc &Desc = MI.getDesc();
373 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
375 if (Opcode == ARM::tADDrSPi) {
376 Offset += MI.getOperand(i+1).getImm();
378 // Can't use tADDrSPi if it's based off the frame pointer.
379 unsigned NumBits = 0;
381 if (FrameReg != ARM::SP) {
382 Opcode = ARM::tADDi3;
383 MI.setDesc(TII.get(ARM::tADDi3));
388 assert((Offset & 3) == 0 &&
389 "Thumb add/sub sp, #imm immediate must be multiple of 4!");
393 // Turn it into a move.
394 MI.setDesc(TII.get(ARM::tMOVhir2lor));
395 MI.getOperand(i).ChangeToRegister(FrameReg, false);
396 MI.RemoveOperand(i+1);
400 // Common case: small offset, fits into instruction.
401 unsigned Mask = (1 << NumBits) - 1;
402 if (((Offset / Scale) & ~Mask) == 0) {
403 // Replace the FrameIndex with sp / fp
404 MI.getOperand(i).ChangeToRegister(FrameReg, false);
405 MI.getOperand(i+1).ChangeToImmediate(Offset / Scale);
409 unsigned DestReg = MI.getOperand(0).getReg();
410 unsigned Bytes = (Offset > 0) ? Offset : -Offset;
411 unsigned NumMIs = calcNumMI(Opcode, 0, Bytes, NumBits, Scale);
412 // MI would expand into a large number of instructions. Don't try to
413 // simplify the immediate.
415 emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII,
422 // Translate r0 = add sp, imm to
423 // r0 = add sp, 255*4
424 // r0 = add r0, (imm - 255*4)
425 MI.getOperand(i).ChangeToRegister(FrameReg, false);
426 MI.getOperand(i+1).ChangeToImmediate(Mask);
427 Offset = (Offset - Mask * Scale);
428 MachineBasicBlock::iterator NII = next(II);
429 emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII,
432 // Translate r0 = add sp, -imm to
433 // r0 = -imm (this is then translated into a series of instructons)
435 emitThumbConstant(MBB, II, DestReg, Offset, TII, *this, dl);
436 MI.setDesc(TII.get(ARM::tADDhirr));
437 MI.getOperand(i).ChangeToRegister(DestReg, false, false, true);
438 MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
444 unsigned NumBits = 0;
447 case ARMII::AddrModeT1_s: {
449 InstrOffs = MI.getOperand(ImmIdx).getImm();
450 NumBits = (FrameReg == ARM::SP) ? 8 : 5;
455 assert(0 && "Unsupported addressing mode!");
460 Offset += InstrOffs * Scale;
461 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
463 // Common case: small offset, fits into instruction.
464 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
465 int ImmedOffset = Offset / Scale;
466 unsigned Mask = (1 << NumBits) - 1;
467 if ((unsigned)Offset <= Mask * Scale) {
468 // Replace the FrameIndex with sp
469 MI.getOperand(i).ChangeToRegister(FrameReg, false);
470 ImmOp.ChangeToImmediate(ImmedOffset);
474 bool isThumSpillRestore = Opcode == ARM::tRestore || Opcode == ARM::tSpill;
475 if (AddrMode == ARMII::AddrModeT1_s) {
476 // Thumb tLDRspi, tSTRspi. These will change to instructions that use
477 // a different base register.
479 Mask = (1 << NumBits) - 1;
481 // If this is a thumb spill / restore, we will be using a constpool load to
482 // materialize the offset.
483 if (AddrMode == ARMII::AddrModeT1_s && isThumSpillRestore)
484 ImmOp.ChangeToImmediate(0);
486 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
487 ImmedOffset = ImmedOffset & Mask;
488 ImmOp.ChangeToImmediate(ImmedOffset);
489 Offset &= ~(Mask*Scale);
493 // If we get here, the immediate doesn't fit into the instruction. We folded
494 // as much as possible above, handle the rest, providing a register that is
496 assert(Offset && "This code isn't needed if offset already handled!");
498 if (Desc.mayLoad()) {
499 // Use the destination register to materialize sp + offset.
500 unsigned TmpReg = MI.getOperand(0).getReg();
502 if (Opcode == ARM::tRestore) {
503 if (FrameReg == ARM::SP)
504 emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
505 Offset, false, TII, *this, dl);
507 emitLoadConstPool(MBB, II, TmpReg, Offset, &TII, dl);
511 emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII,
513 MI.setDesc(TII.get(ARM::tLDR));
514 MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
516 // Use [reg, reg] addrmode.
517 MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
518 else // tLDR has an extra register operand.
519 MI.addOperand(MachineOperand::CreateReg(0, false));
520 } else if (Desc.mayStore()) {
521 // FIXME! This is horrific!!! We need register scavenging.
522 // Our temporary workaround has marked r3 unavailable. Of course, r3 is
523 // also a ABI register so it's possible that is is the register that is
524 // being storing here. If that's the case, we do the following:
526 // Use r2 to materialize sp + offset
529 unsigned ValReg = MI.getOperand(0).getReg();
530 unsigned TmpReg = ARM::R3;
532 if (ValReg == ARM::R3) {
533 BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
534 .addReg(ARM::R2, RegState::Kill);
537 if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
538 BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
539 .addReg(ARM::R3, RegState::Kill);
540 if (Opcode == ARM::tSpill) {
541 if (FrameReg == ARM::SP)
542 emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
543 Offset, false, TII, *this, dl);
545 emitLoadConstPool(MBB, II, TmpReg, Offset, &TII, dl);
549 emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII,
551 MI.setDesc(TII.get(ARM::tSTR));
552 MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
553 if (UseRR) // Use [reg, reg] addrmode.
554 MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
555 else // tSTR has an extra register operand.
556 MI.addOperand(MachineOperand::CreateReg(0, false));
558 MachineBasicBlock::iterator NII = next(II);
559 if (ValReg == ARM::R3)
560 BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R2)
561 .addReg(ARM::R12, RegState::Kill);
562 if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
563 BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
564 .addReg(ARM::R12, RegState::Kill);
566 assert(false && "Unexpected opcode!");
569 void ThumbRegisterInfo::emitPrologue(MachineFunction &MF) const {
570 MachineBasicBlock &MBB = MF.front();
571 MachineBasicBlock::iterator MBBI = MBB.begin();
572 MachineFrameInfo *MFI = MF.getFrameInfo();
573 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
574 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
575 unsigned NumBytes = MFI->getStackSize();
576 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
577 DebugLoc dl = (MBBI != MBB.end() ?
578 MBBI->getDebugLoc() : DebugLoc::getUnknownLoc());
580 // Check if R3 is live in. It might have to be used as a scratch register.
581 for (MachineRegisterInfo::livein_iterator I =MF.getRegInfo().livein_begin(),
582 E = MF.getRegInfo().livein_end(); I != E; ++I) {
583 if (I->first == ARM::R3) {
584 AFI->setR3IsLiveIn(true);
589 // Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
590 NumBytes = (NumBytes + 3) & ~3;
591 MFI->setStackSize(NumBytes);
593 // Determine the sizes of each callee-save spill areas and record which frame
594 // belongs to which callee-save spill areas.
595 unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
596 int FramePtrSpillFI = 0;
599 emitSPUpdate(MBB, MBBI, TII, dl, *this, -VARegSaveSize);
601 if (!AFI->hasStackFrame()) {
603 emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
607 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
608 unsigned Reg = CSI[i].getReg();
609 int FI = CSI[i].getFrameIdx();
617 FramePtrSpillFI = FI;
618 AFI->addGPRCalleeSavedArea1Frame(FI);
626 FramePtrSpillFI = FI;
627 if (STI.isTargetDarwin()) {
628 AFI->addGPRCalleeSavedArea2Frame(FI);
631 AFI->addGPRCalleeSavedArea1Frame(FI);
636 AFI->addDPRCalleeSavedAreaFrame(FI);
641 if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
643 if (MBBI != MBB.end())
644 dl = MBBI->getDebugLoc();
647 // Darwin ABI requires FP to point to the stack slot that contains the
649 if (STI.isTargetDarwin() || hasFP(MF)) {
650 MachineInstrBuilder MIB =
651 BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
652 .addFrameIndex(FramePtrSpillFI).addImm(0);
655 // Determine starting offsets of spill areas.
656 unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
657 unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
658 unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
659 AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + NumBytes);
660 AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
661 AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
662 AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
664 NumBytes = DPRCSOffset;
666 // Insert it after all the callee-save spills.
667 emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
670 if (STI.isTargetELF() && hasFP(MF)) {
671 MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
672 AFI->getFramePtrSpillOffset());
675 AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
676 AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
677 AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
680 static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
681 for (unsigned i = 0; CSRegs[i]; ++i)
682 if (Reg == CSRegs[i])
687 static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
688 return (MI->getOpcode() == ARM::tRestore &&
689 MI->getOperand(1).isFI() &&
690 isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
693 void ThumbRegisterInfo::emitEpilogue(MachineFunction &MF,
694 MachineBasicBlock &MBB) const {
695 MachineBasicBlock::iterator MBBI = prior(MBB.end());
696 assert((MBBI->getOpcode() == ARM::tBX_RET ||
697 MBBI->getOpcode() == ARM::tPOP_RET) &&
698 "Can only insert epilog into returning blocks");
699 DebugLoc dl = MBBI->getDebugLoc();
700 MachineFrameInfo *MFI = MF.getFrameInfo();
701 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
702 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
703 int NumBytes = (int)MFI->getStackSize();
705 if (!AFI->hasStackFrame()) {
707 emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
709 // Unwind MBBI to point to first LDR / FLDD.
710 const unsigned *CSRegs = getCalleeSavedRegs();
711 if (MBBI != MBB.begin()) {
714 while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
715 if (!isCSRestore(MBBI, CSRegs))
719 // Move SP to start of FP callee save spill area.
720 NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
721 AFI->getGPRCalleeSavedArea2Size() +
722 AFI->getDPRCalleeSavedAreaSize());
725 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
726 // Reset SP based on frame pointer only if the stack frame extends beyond
727 // frame pointer stack slot or target is ELF and the function has FP.
729 emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
732 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::SP)
735 if (MBBI->getOpcode() == ARM::tBX_RET &&
736 &MBB.front() != MBBI &&
737 prior(MBBI)->getOpcode() == ARM::tPOP) {
738 MachineBasicBlock::iterator PMBBI = prior(MBBI);
739 emitSPUpdate(MBB, PMBBI, TII, dl, *this, NumBytes);
741 emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
746 // Epilogue for vararg functions: pop LR to R3 and branch off it.
747 // FIXME: Verify this is still ok when R3 is no longer being reserved.
748 BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)).addReg(ARM::R3);
750 emitSPUpdate(MBB, MBBI, TII, dl, *this, VARegSaveSize);
752 BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg)).addReg(ARM::R3);