1 //===- PPCRegisterInfo.cpp - PowerPC Register Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the PowerPC implementation of the TargetRegisterInfo
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "reginfo"
17 #include "PPCInstrBuilder.h"
18 #include "PPCMachineFunctionInfo.h"
19 #include "PPCRegisterInfo.h"
20 #include "PPCFrameInfo.h"
21 #include "PPCSubtarget.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/Constants.h"
24 #include "llvm/Function.h"
25 #include "llvm/Type.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineLocation.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/RegisterScavenging.h"
34 #include "llvm/Target/TargetFrameInfo.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/ADT/BitVector.h"
42 #include "llvm/ADT/STLExtras.h"
46 // FIXME This disables some code that aligns the stack to a boundary
47 // bigger than the default (16 bytes on Darwin) when there is a stack local
48 // of greater alignment. This does not currently work, because the delta
49 // between old and new stack pointers is added to offsets that reference
50 // incoming parameters after the prolog is generated, and the code that
51 // does that doesn't handle a variable delta. You don't want to do that
52 // anyway; a better approach is to reserve another register that retains
53 // to the incoming stack pointer, and reference parameters relative to that.
56 // FIXME (64-bit): Eventually enable by default.
57 cl::opt<bool> EnablePPC32RS("enable-ppc32-regscavenger",
59 cl::desc("Enable PPC32 register scavenger"),
61 cl::opt<bool> EnablePPC64RS("enable-ppc64-regscavenger",
63 cl::desc("Enable PPC64 register scavenger"),
65 #define EnableRegisterScavenging \
66 ((EnablePPC32RS && !Subtarget.isPPC64()) || \
67 (EnablePPC64RS && Subtarget.isPPC64()))
69 // FIXME (64-bit): Should be inlined.
71 PPCRegisterInfo::requiresRegisterScavenging(const MachineFunction &) const {
72 return EnableRegisterScavenging;
75 /// getRegisterNumbering - Given the enum value for some register, e.g.
76 /// PPC::F14, return the number that it corresponds to (e.g. 14).
77 unsigned PPCRegisterInfo::getRegisterNumbering(unsigned RegEnum) {
81 case R0 : case X0 : case F0 : case V0 : case CR0: case CR0LT: return 0;
82 case R1 : case X1 : case F1 : case V1 : case CR1: case CR0GT: return 1;
83 case R2 : case X2 : case F2 : case V2 : case CR2: case CR0EQ: return 2;
84 case R3 : case X3 : case F3 : case V3 : case CR3: case CR0UN: return 3;
85 case R4 : case X4 : case F4 : case V4 : case CR4: case CR1LT: return 4;
86 case R5 : case X5 : case F5 : case V5 : case CR5: case CR1GT: return 5;
87 case R6 : case X6 : case F6 : case V6 : case CR6: case CR1EQ: return 6;
88 case R7 : case X7 : case F7 : case V7 : case CR7: case CR1UN: return 7;
89 case R8 : case X8 : case F8 : case V8 : case CR2LT: return 8;
90 case R9 : case X9 : case F9 : case V9 : case CR2GT: return 9;
91 case R10: case X10: case F10: case V10: case CR2EQ: return 10;
92 case R11: case X11: case F11: case V11: case CR2UN: return 11;
93 case R12: case X12: case F12: case V12: case CR3LT: return 12;
94 case R13: case X13: case F13: case V13: case CR3GT: return 13;
95 case R14: case X14: case F14: case V14: case CR3EQ: return 14;
96 case R15: case X15: case F15: case V15: case CR3UN: return 15;
97 case R16: case X16: case F16: case V16: case CR4LT: return 16;
98 case R17: case X17: case F17: case V17: case CR4GT: return 17;
99 case R18: case X18: case F18: case V18: case CR4EQ: return 18;
100 case R19: case X19: case F19: case V19: case CR4UN: return 19;
101 case R20: case X20: case F20: case V20: case CR5LT: return 20;
102 case R21: case X21: case F21: case V21: case CR5GT: return 21;
103 case R22: case X22: case F22: case V22: case CR5EQ: return 22;
104 case R23: case X23: case F23: case V23: case CR5UN: return 23;
105 case R24: case X24: case F24: case V24: case CR6LT: return 24;
106 case R25: case X25: case F25: case V25: case CR6GT: return 25;
107 case R26: case X26: case F26: case V26: case CR6EQ: return 26;
108 case R27: case X27: case F27: case V27: case CR6UN: return 27;
109 case R28: case X28: case F28: case V28: case CR7LT: return 28;
110 case R29: case X29: case F29: case V29: case CR7GT: return 29;
111 case R30: case X30: case F30: case V30: case CR7EQ: return 30;
112 case R31: case X31: case F31: case V31: case CR7UN: return 31;
114 cerr << "Unhandled reg in PPCRegisterInfo::getRegisterNumbering!\n";
119 PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
120 const TargetInstrInfo &tii)
121 : PPCGenRegisterInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
122 Subtarget(ST), TII(tii) {
123 ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
124 ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
125 ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
126 ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX;
127 ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX;
128 ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX;
129 ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX;
130 ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
133 ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
134 ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
135 ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
136 ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
137 ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; ImmToIdxMap[PPC::STD_32] = PPC::STDX_32;
141 PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
142 // 32-bit Darwin calling convention.
143 static const unsigned Macho32_CalleeSavedRegs[] = {
144 PPC::R13, PPC::R14, PPC::R15,
145 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
146 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
147 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
148 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
150 PPC::F14, PPC::F15, PPC::F16, PPC::F17,
151 PPC::F18, PPC::F19, PPC::F20, PPC::F21,
152 PPC::F22, PPC::F23, PPC::F24, PPC::F25,
153 PPC::F26, PPC::F27, PPC::F28, PPC::F29,
156 PPC::CR2, PPC::CR3, PPC::CR4,
157 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
158 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
159 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
161 PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
162 PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
163 PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
168 static const unsigned ELF32_CalleeSavedRegs[] = {
169 PPC::R13, PPC::R14, PPC::R15,
170 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
171 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
172 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
173 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
176 PPC::F10, PPC::F11, PPC::F12, PPC::F13,
177 PPC::F14, PPC::F15, PPC::F16, PPC::F17,
178 PPC::F18, PPC::F19, PPC::F20, PPC::F21,
179 PPC::F22, PPC::F23, PPC::F24, PPC::F25,
180 PPC::F26, PPC::F27, PPC::F28, PPC::F29,
183 PPC::CR2, PPC::CR3, PPC::CR4,
184 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
185 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
186 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
188 PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
189 PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
190 PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
194 // 64-bit Darwin calling convention.
195 static const unsigned Macho64_CalleeSavedRegs[] = {
197 PPC::X16, PPC::X17, PPC::X18, PPC::X19,
198 PPC::X20, PPC::X21, PPC::X22, PPC::X23,
199 PPC::X24, PPC::X25, PPC::X26, PPC::X27,
200 PPC::X28, PPC::X29, PPC::X30, PPC::X31,
202 PPC::F14, PPC::F15, PPC::F16, PPC::F17,
203 PPC::F18, PPC::F19, PPC::F20, PPC::F21,
204 PPC::F22, PPC::F23, PPC::F24, PPC::F25,
205 PPC::F26, PPC::F27, PPC::F28, PPC::F29,
208 PPC::CR2, PPC::CR3, PPC::CR4,
209 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
210 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
211 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
213 PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
214 PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
215 PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
220 if (Subtarget.isMachoABI())
221 return Subtarget.isPPC64() ? Macho64_CalleeSavedRegs :
222 Macho32_CalleeSavedRegs;
225 return ELF32_CalleeSavedRegs;
228 const TargetRegisterClass* const*
229 PPCRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
230 // 32-bit Macho calling convention.
231 static const TargetRegisterClass * const Macho32_CalleeSavedRegClasses[] = {
232 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
233 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
234 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
235 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
236 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
238 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
239 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
240 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
241 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
242 &PPC::F8RCRegClass,&PPC::F8RCRegClass,
244 &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
246 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
247 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
248 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
250 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
251 &PPC::CRBITRCRegClass,
252 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
253 &PPC::CRBITRCRegClass,
254 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
255 &PPC::CRBITRCRegClass,
257 &PPC::GPRCRegClass, 0
260 static const TargetRegisterClass * const ELF32_CalleeSavedRegClasses[] = {
261 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
262 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
263 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
264 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
265 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
268 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
269 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
270 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
271 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
272 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
273 &PPC::F8RCRegClass,&PPC::F8RCRegClass,
275 &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
277 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
278 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
279 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
281 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
282 &PPC::CRBITRCRegClass,
283 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
284 &PPC::CRBITRCRegClass,
285 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
286 &PPC::CRBITRCRegClass,
288 &PPC::GPRCRegClass, 0
291 // 64-bit Macho calling convention.
292 static const TargetRegisterClass * const Macho64_CalleeSavedRegClasses[] = {
293 &PPC::G8RCRegClass,&PPC::G8RCRegClass,
294 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
295 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
296 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
297 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
299 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
300 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
301 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
302 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
303 &PPC::F8RCRegClass,&PPC::F8RCRegClass,
305 &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
307 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
308 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
309 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
311 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
312 &PPC::CRBITRCRegClass,
313 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
314 &PPC::CRBITRCRegClass,
315 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
316 &PPC::CRBITRCRegClass,
318 &PPC::G8RCRegClass, 0
321 if (Subtarget.isMachoABI())
322 return Subtarget.isPPC64() ? Macho64_CalleeSavedRegClasses :
323 Macho32_CalleeSavedRegClasses;
326 return ELF32_CalleeSavedRegClasses;
329 // needsFP - Return true if the specified function should have a dedicated frame
330 // pointer register. This is true if the function has variable sized allocas or
331 // if frame pointer elimination is disabled.
333 static bool needsFP(const MachineFunction &MF) {
334 const MachineFrameInfo *MFI = MF.getFrameInfo();
335 return NoFramePointerElim || MFI->hasVarSizedObjects() ||
336 (PerformTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall());
339 static bool spillsCR(const MachineFunction &MF) {
340 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
341 return FuncInfo->isCRSpilled();
344 BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
345 BitVector Reserved(getNumRegs());
346 Reserved.set(PPC::R0);
347 Reserved.set(PPC::R1);
348 Reserved.set(PPC::LR);
349 Reserved.set(PPC::LR8);
350 Reserved.set(PPC::RM);
352 // In Linux, r2 is reserved for the OS.
353 if (!Subtarget.isDarwin())
354 Reserved.set(PPC::R2);
356 // On PPC64, r13 is the thread pointer. Never allocate this register. Note
357 // that this is over conservative, as it also prevents allocation of R31 when
358 // the FP is not needed.
359 if (Subtarget.isPPC64()) {
360 Reserved.set(PPC::R13);
361 Reserved.set(PPC::R31);
363 if (!EnableRegisterScavenging)
364 Reserved.set(PPC::R0); // FIXME (64-bit): Remove
366 Reserved.set(PPC::X0);
367 Reserved.set(PPC::X1);
368 Reserved.set(PPC::X13);
369 Reserved.set(PPC::X31);
373 Reserved.set(PPC::R31);
378 //===----------------------------------------------------------------------===//
379 // Stack Frame Processing methods
380 //===----------------------------------------------------------------------===//
382 // hasFP - Return true if the specified function actually has a dedicated frame
383 // pointer register. This is true if the function needs a frame pointer and has
384 // a non-zero stack size.
385 bool PPCRegisterInfo::hasFP(const MachineFunction &MF) const {
386 const MachineFrameInfo *MFI = MF.getFrameInfo();
387 return MFI->getStackSize() && needsFP(MF);
390 /// MustSaveLR - Return true if this function requires that we save the LR
391 /// register onto the stack in the prolog and restore it in the epilog of the
393 static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
394 const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
396 // We need a save/restore of LR if there is any def of LR (which is
397 // defined by calls, including the PIC setup sequence), or if there is
398 // some use of the LR stack slot (e.g. for builtin_return_address).
399 // (LR comes in 32 and 64 bit versions.)
400 MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
401 return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
406 void PPCRegisterInfo::
407 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
408 MachineBasicBlock::iterator I) const {
409 if (PerformTailCallOpt && I->getOpcode() == PPC::ADJCALLSTACKUP) {
410 // Add (actually subtract) back the amount the callee popped on return.
411 if (int CalleeAmt = I->getOperand(1).getImm()) {
412 bool is64Bit = Subtarget.isPPC64();
414 unsigned StackReg = is64Bit ? PPC::X1 : PPC::R1;
415 unsigned TmpReg = is64Bit ? PPC::X0 : PPC::R0;
416 unsigned ADDIInstr = is64Bit ? PPC::ADDI8 : PPC::ADDI;
417 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4;
418 unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS;
419 unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI;
421 if (isInt16(CalleeAmt)) {
422 BuildMI(MBB, I, TII.get(ADDIInstr), StackReg).addReg(StackReg).
425 MachineBasicBlock::iterator MBBI = I;
426 BuildMI(MBB, MBBI, TII.get(LISInstr), TmpReg)
427 .addImm(CalleeAmt >> 16);
428 BuildMI(MBB, MBBI, TII.get(ORIInstr), TmpReg)
429 .addReg(TmpReg, false, false, true)
430 .addImm(CalleeAmt & 0xFFFF);
431 BuildMI(MBB, MBBI, TII.get(ADDInstr))
438 // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
442 /// findScratchRegister - Find a 'free' PPC register. Try for a call-clobbered
443 /// register first and then a spilled callee-saved register if that fails.
445 unsigned findScratchRegister(MachineBasicBlock::iterator II, RegScavenger *RS,
446 const TargetRegisterClass *RC, int SPAdj) {
447 assert(RS && "Register scavenging must be on");
448 unsigned Reg = RS->FindUnusedReg(RC, true);
449 // FIXME: move ARM callee-saved reg scan to target independent code, then
450 // search for already spilled CS register here.
452 Reg = RS->scavengeRegister(RC, II, SPAdj);
456 /// lowerDynamicAlloc - Generate the code for allocating an object in the
457 /// current frame. The sequence of code with be in the general form
459 /// addi R0, SP, #frameSize ; get the address of the previous frame
460 /// stwxu R0, SP, Rnegsize ; add and update the SP with the negated size
461 /// addi Rnew, SP, #maxCalFrameSize ; get the top of the allocation
463 void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
464 int SPAdj, RegScavenger *RS) const {
465 // Get the instruction.
466 MachineInstr &MI = *II;
467 // Get the instruction's basic block.
468 MachineBasicBlock &MBB = *MI.getParent();
469 // Get the basic block's function.
470 MachineFunction &MF = *MBB.getParent();
471 // Get the frame info.
472 MachineFrameInfo *MFI = MF.getFrameInfo();
473 // Determine whether 64-bit pointers are used.
474 bool LP64 = Subtarget.isPPC64();
476 // Get the maximum call stack size.
477 unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
478 // Get the total frame size.
479 unsigned FrameSize = MFI->getStackSize();
481 // Get stack alignments.
482 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
483 unsigned MaxAlign = MFI->getMaxAlignment();
484 assert(MaxAlign <= TargetAlign &&
485 "Dynamic alloca with large aligns not supported");
487 // Determine the previous frame's address. If FrameSize can't be
488 // represented as 16 bits or we need special alignment, then we load the
489 // previous frame's address from 0(SP). Why not do an addis of the hi?
490 // Because R0 is our only safe tmp register and addi/addis treat R0 as zero.
491 // Constructing the constant and adding would take 3 instructions.
492 // Fortunately, a frame greater than 32K is rare.
493 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
494 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
495 const TargetRegisterClass *RC = LP64 ? G8RC : GPRC;
497 // FIXME (64-bit): Use "findScratchRegister"
499 if (EnableRegisterScavenging)
500 Reg = findScratchRegister(II, RS, RC, SPAdj);
504 if (MaxAlign < TargetAlign && isInt16(FrameSize)) {
505 BuildMI(MBB, II, TII.get(PPC::ADDI), Reg)
509 if (EnableRegisterScavenging) // FIXME (64-bit): Use "true" part.
510 BuildMI(MBB, II, TII.get(PPC::LD), Reg)
514 BuildMI(MBB, II, TII.get(PPC::LD), PPC::X0)
518 BuildMI(MBB, II, TII.get(PPC::LWZ), Reg)
523 // Grow the stack and update the stack pointer link, then determine the
524 // address of new allocated space.
526 if (EnableRegisterScavenging) // FIXME (64-bit): Use "true" part.
527 BuildMI(MBB, II, TII.get(PPC::STDUX))
528 .addReg(Reg, false, false, true)
530 .addReg(MI.getOperand(1).getReg());
532 BuildMI(MBB, II, TII.get(PPC::STDUX))
533 .addReg(PPC::X0, false, false, true)
535 .addReg(MI.getOperand(1).getReg());
537 if (!MI.getOperand(1).isKill())
538 BuildMI(MBB, II, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
540 .addImm(maxCallFrameSize);
542 // Implicitly kill the register.
543 BuildMI(MBB, II, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
545 .addImm(maxCallFrameSize)
546 .addReg(MI.getOperand(1).getReg(), false, true, true);
548 BuildMI(MBB, II, TII.get(PPC::STWUX))
549 .addReg(Reg, false, false, true)
551 .addReg(MI.getOperand(1).getReg());
553 if (!MI.getOperand(1).isKill())
554 BuildMI(MBB, II, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
556 .addImm(maxCallFrameSize);
558 // Implicitly kill the register.
559 BuildMI(MBB, II, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
561 .addImm(maxCallFrameSize)
562 .addReg(MI.getOperand(1).getReg(), false, true, true);
565 // Discard the DYNALLOC instruction.
569 /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of
570 /// reserving a whole register (R0), we scrounge for one here. This generates
573 /// mfcr rA ; Move the conditional register into GPR rA.
574 /// rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot.
575 /// stw rA, FI ; Store rA to the frame.
577 void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
578 unsigned FrameIndex, int SPAdj,
579 RegScavenger *RS) const {
580 // Get the instruction.
581 MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset>, <FI>
582 // Get the instruction's basic block.
583 MachineBasicBlock &MBB = *MI.getParent();
585 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
586 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
587 const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC;
588 unsigned Reg = findScratchRegister(II, RS, RC, SPAdj);
590 // We need to store the CR in the low 4-bits of the saved value. First, issue
591 // an MFCR to save all of the CRBits. Add an implicit kill of the CR.
592 if (!MI.getOperand(0).isKill())
593 BuildMI(MBB, II, TII.get(PPC::MFCR), Reg);
595 // Implicitly kill the CR register.
596 BuildMI(MBB, II, TII.get(PPC::MFCR), Reg)
597 .addReg(MI.getOperand(0).getReg(), false, true, true);
599 // If the saved register wasn't CR0, shift the bits left so that they are in
601 unsigned SrcReg = MI.getOperand(0).getReg();
602 if (SrcReg != PPC::CR0)
603 // rlwinm rA, rA, ShiftBits, 0, 31.
604 BuildMI(MBB, II, TII.get(PPC::RLWINM), Reg)
605 .addReg(Reg, false, false, true)
606 .addImm(PPCRegisterInfo::getRegisterNumbering(SrcReg) * 4)
610 addFrameReference(BuildMI(MBB, II, TII.get(PPC::STW))
611 .addReg(Reg, false, false, MI.getOperand(1).getImm()),
614 // Discard the pseudo instruction.
618 void PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
619 int SPAdj, RegScavenger *RS) const {
620 assert(SPAdj == 0 && "Unexpected");
622 // Get the instruction.
623 MachineInstr &MI = *II;
624 // Get the instruction's basic block.
625 MachineBasicBlock &MBB = *MI.getParent();
626 // Get the basic block's function.
627 MachineFunction &MF = *MBB.getParent();
628 // Get the frame info.
629 MachineFrameInfo *MFI = MF.getFrameInfo();
631 // Find out which operand is the frame index.
632 unsigned FIOperandNo = 0;
633 while (!MI.getOperand(FIOperandNo).isFI()) {
635 assert(FIOperandNo != MI.getNumOperands() &&
636 "Instr doesn't have FrameIndex operand!");
638 // Take into account whether it's an add or mem instruction
639 unsigned OffsetOperandNo = (FIOperandNo == 2) ? 1 : 2;
640 if (MI.getOpcode() == TargetInstrInfo::INLINEASM)
641 OffsetOperandNo = FIOperandNo-1;
643 // Get the frame index.
644 int FrameIndex = MI.getOperand(FIOperandNo).getIndex();
646 // Get the frame pointer save index. Users of this index are primarily
647 // DYNALLOC instructions.
648 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
649 int FPSI = FI->getFramePointerSaveIndex();
650 // Get the instruction opcode.
651 unsigned OpC = MI.getOpcode();
653 // Special case for dynamic alloca.
654 if (FPSI && FrameIndex == FPSI &&
655 (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
656 lowerDynamicAlloc(II, SPAdj, RS);
660 // Special case for pseudo-op SPILL_CR.
661 if (EnableRegisterScavenging) // FIXME (64-bit): Enable by default.
662 if (OpC == PPC::SPILL_CR) {
663 lowerCRSpilling(II, FrameIndex, SPAdj, RS);
667 // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
668 MI.getOperand(FIOperandNo).ChangeToRegister(hasFP(MF) ? PPC::R31 : PPC::R1,
671 // Figure out if the offset in the instruction is shifted right two bits. This
672 // is true for instructions like "STD", which the machine implicitly adds two
674 bool isIXAddr = false;
684 // Now add the frame object offset to the offset from r1.
685 int Offset = MFI->getObjectOffset(FrameIndex);
687 Offset += MI.getOperand(OffsetOperandNo).getImm();
689 Offset += MI.getOperand(OffsetOperandNo).getImm() << 2;
691 // If we're not using a Frame Pointer that has been set to the value of the
692 // SP before having the stack size subtracted from it, then add the stack size
693 // to Offset to get the correct offset.
694 Offset += MFI->getStackSize();
696 // If we can, encode the offset directly into the instruction. If this is a
697 // normal PPC "ri" instruction, any 16-bit value can be safely encoded. If
698 // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits
699 // clear can be encoded. This is extremely uncommon, because normally you
700 // only "std" to a stack slot that is at least 4-byte aligned, but it can
701 // happen in invalid code.
702 if (isInt16(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
704 Offset >>= 2; // The actual encoded value has the low two bits zero.
705 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
709 // The offset doesn't fit into a single register, scavenge one to build the
711 // FIXME: figure out what SPAdj is doing here.
713 // FIXME (64-bit): Use "findScratchRegister".
715 if (EnableRegisterScavenging)
716 SReg = findScratchRegister(II, RS, &PPC::GPRCRegClass, SPAdj);
720 // Insert a set of rA with the full offset value before the ld, st, or add
721 BuildMI(MBB, II, TII.get(PPC::LIS), SReg)
722 .addImm(Offset >> 16);
723 BuildMI(MBB, II, TII.get(PPC::ORI), SReg)
724 .addReg(SReg, false, false, true)
727 // Convert into indexed form of the instruction:
729 // sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0
730 // addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0
731 unsigned OperandBase;
733 if (OpC != TargetInstrInfo::INLINEASM) {
734 assert(ImmToIdxMap.count(OpC) &&
735 "No indexed form of load or store available!");
736 unsigned NewOpcode = ImmToIdxMap.find(OpC)->second;
737 MI.setDesc(TII.get(NewOpcode));
740 OperandBase = OffsetOperandNo;
743 unsigned StackReg = MI.getOperand(FIOperandNo).getReg();
744 MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
745 MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false);
748 /// VRRegNo - Map from a numbered VR register to its enum value.
750 static const unsigned short VRRegNo[] = {
751 PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
752 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, PPC::V12, PPC::V13, PPC::V14, PPC::V15,
753 PPC::V16, PPC::V17, PPC::V18, PPC::V19, PPC::V20, PPC::V21, PPC::V22, PPC::V23,
754 PPC::V24, PPC::V25, PPC::V26, PPC::V27, PPC::V28, PPC::V29, PPC::V30, PPC::V31
757 /// RemoveVRSaveCode - We have found that this function does not need any code
758 /// to manipulate the VRSAVE register, even though it uses vector registers.
759 /// This can happen when the only registers used are known to be live in or out
760 /// of the function. Remove all of the VRSAVE related code from the function.
761 static void RemoveVRSaveCode(MachineInstr *MI) {
762 MachineBasicBlock *Entry = MI->getParent();
763 MachineFunction *MF = Entry->getParent();
765 // We know that the MTVRSAVE instruction immediately follows MI. Remove it.
766 MachineBasicBlock::iterator MBBI = MI;
768 assert(MBBI != Entry->end() && MBBI->getOpcode() == PPC::MTVRSAVE);
769 MBBI->eraseFromParent();
771 bool RemovedAllMTVRSAVEs = true;
772 // See if we can find and remove the MTVRSAVE instruction from all of the
774 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) {
775 // If last instruction is a return instruction, add an epilogue
776 if (!I->empty() && I->back().getDesc().isReturn()) {
777 bool FoundIt = false;
778 for (MBBI = I->end(); MBBI != I->begin(); ) {
780 if (MBBI->getOpcode() == PPC::MTVRSAVE) {
781 MBBI->eraseFromParent(); // remove it.
786 RemovedAllMTVRSAVEs &= FoundIt;
790 // If we found and removed all MTVRSAVE instructions, remove the read of
792 if (RemovedAllMTVRSAVEs) {
794 assert(MBBI != Entry->begin() && "UPDATE_VRSAVE is first instr in block?");
796 assert(MBBI->getOpcode() == PPC::MFVRSAVE && "VRSAVE instrs wandered?");
797 MBBI->eraseFromParent();
800 // Finally, nuke the UPDATE_VRSAVE.
801 MI->eraseFromParent();
804 // HandleVRSaveUpdate - MI is the UPDATE_VRSAVE instruction introduced by the
805 // instruction selector. Based on the vector registers that have been used,
806 // transform this into the appropriate ORI instruction.
807 static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) {
808 MachineFunction *MF = MI->getParent()->getParent();
810 unsigned UsedRegMask = 0;
811 for (unsigned i = 0; i != 32; ++i)
812 if (MF->getRegInfo().isPhysRegUsed(VRRegNo[i]))
813 UsedRegMask |= 1 << (31-i);
815 // Live in and live out values already must be in the mask, so don't bother
817 for (MachineRegisterInfo::livein_iterator
818 I = MF->getRegInfo().livein_begin(),
819 E = MF->getRegInfo().livein_end(); I != E; ++I) {
820 unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(I->first);
821 if (VRRegNo[RegNo] == I->first) // If this really is a vector reg.
822 UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
824 for (MachineRegisterInfo::liveout_iterator
825 I = MF->getRegInfo().liveout_begin(),
826 E = MF->getRegInfo().liveout_end(); I != E; ++I) {
827 unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(*I);
828 if (VRRegNo[RegNo] == *I) // If this really is a vector reg.
829 UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
832 // If no registers are used, turn this into a copy.
833 if (UsedRegMask == 0) {
834 // Remove all VRSAVE code.
835 RemoveVRSaveCode(MI);
839 unsigned SrcReg = MI->getOperand(1).getReg();
840 unsigned DstReg = MI->getOperand(0).getReg();
842 if ((UsedRegMask & 0xFFFF) == UsedRegMask) {
843 if (DstReg != SrcReg)
844 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORI), DstReg)
846 .addImm(UsedRegMask);
848 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORI), DstReg)
849 .addReg(SrcReg, false, false, true)
850 .addImm(UsedRegMask);
851 } else if ((UsedRegMask & 0xFFFF0000) == UsedRegMask) {
852 if (DstReg != SrcReg)
853 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
855 .addImm(UsedRegMask >> 16);
857 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
858 .addReg(SrcReg, false, false, true)
859 .addImm(UsedRegMask >> 16);
861 if (DstReg != SrcReg)
862 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
864 .addImm(UsedRegMask >> 16);
866 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
867 .addReg(SrcReg, false, false, true)
868 .addImm(UsedRegMask >> 16);
870 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORI), DstReg)
871 .addReg(DstReg, false, false, true)
872 .addImm(UsedRegMask & 0xFFFF);
875 // Remove the old UPDATE_VRSAVE instruction.
876 MI->eraseFromParent();
879 /// determineFrameLayout - Determine the size of the frame and maximum call
881 void PPCRegisterInfo::determineFrameLayout(MachineFunction &MF) const {
882 MachineFrameInfo *MFI = MF.getFrameInfo();
884 // Get the number of bytes to allocate from the FrameInfo
885 unsigned FrameSize = MFI->getStackSize();
887 // Get the alignments provided by the target, and the maximum alignment
888 // (if any) of the fixed frame objects.
889 unsigned MaxAlign = MFI->getMaxAlignment();
890 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
891 unsigned AlignMask = TargetAlign - 1; //
893 // If we are a leaf function, and use up to 224 bytes of stack space,
894 // don't have a frame pointer, calls, or dynamic alloca then we do not need
895 // to adjust the stack pointer (we fit in the Red Zone).
896 if (FrameSize <= 224 && // Fits in red zone.
897 !MFI->hasVarSizedObjects() && // No dynamic alloca.
898 !MFI->hasCalls() && // No calls.
899 (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment.
901 MFI->setStackSize(0);
905 // Get the maximum call frame size of all the calls.
906 unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
908 // Maximum call frame needs to be at least big enough for linkage and 8 args.
909 unsigned minCallFrameSize =
910 PPCFrameInfo::getMinCallFrameSize(Subtarget.isPPC64(),
911 Subtarget.isMachoABI());
912 maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
914 // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
915 // that allocations will be aligned.
916 if (MFI->hasVarSizedObjects())
917 maxCallFrameSize = (maxCallFrameSize + AlignMask) & ~AlignMask;
919 // Update maximum call frame size.
920 MFI->setMaxCallFrameSize(maxCallFrameSize);
922 // Include call frame size in total.
923 FrameSize += maxCallFrameSize;
925 // Make sure the frame is aligned.
926 FrameSize = (FrameSize + AlignMask) & ~AlignMask;
928 // Update frame info.
929 MFI->setStackSize(FrameSize);
933 PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
934 RegScavenger *RS) const {
935 // Save and clear the LR state.
936 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
937 unsigned LR = getRARegister();
938 FI->setMustSaveLR(MustSaveLR(MF, LR));
939 MF.getRegInfo().setPhysRegUnused(LR);
941 // Save R31 if necessary
942 int FPSI = FI->getFramePointerSaveIndex();
943 bool IsPPC64 = Subtarget.isPPC64();
944 bool IsELF32_ABI = Subtarget.isELF32_ABI();
945 bool IsMachoABI = Subtarget.isMachoABI();
946 MachineFrameInfo *MFI = MF.getFrameInfo();
948 // If the frame pointer save index hasn't been defined yet.
949 if (!FPSI && (NoFramePointerElim || MFI->hasVarSizedObjects()) &&
951 // Find out what the fix offset of the frame pointer save area.
952 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64,
954 // Allocate the frame index for frame pointer save area.
955 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset);
957 FI->setFramePointerSaveIndex(FPSI);
960 // Reserve stack space to move the linkage area to in case of a tail call.
962 if (PerformTailCallOpt && (TCSPDelta=FI->getTailCallSPDelta()) < 0) {
963 int AddFPOffsetAmount = IsELF32_ABI ? -4 : 0;
964 MF.getFrameInfo()->CreateFixedObject( -1 * TCSPDelta,
965 AddFPOffsetAmount + TCSPDelta);
967 // Reserve a slot closest to SP or frame pointer if we have a dynalloc or
968 // a large stack, which will require scavenging a register to materialize a
970 // FIXME: this doesn't actually check stack size, so is a bit pessimistic
971 // FIXME: doesn't detect whether or not we need to spill vXX, which requires
974 if (EnableRegisterScavenging) // FIXME (64-bit): Enable.
975 if (needsFP(MF) || spillsCR(MF)) {
976 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
977 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
978 const TargetRegisterClass *RC = IsPPC64 ? G8RC : GPRC;
979 RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
980 RC->getAlignment()));
985 PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
986 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
987 MachineBasicBlock::iterator MBBI = MBB.begin();
988 MachineFrameInfo *MFI = MF.getFrameInfo();
989 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
990 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) ||
991 !MF.getFunction()->doesNotThrow() ||
992 UnwindTablesMandatory;
994 // Prepare for frame info.
995 unsigned FrameLabelId = 0;
997 // Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
999 for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) {
1000 if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) {
1001 HandleVRSaveUpdate(MBBI, TII);
1006 // Move MBBI back to the beginning of the function.
1009 // Work out frame sizes.
1010 determineFrameLayout(MF);
1011 unsigned FrameSize = MFI->getStackSize();
1013 int NegFrameSize = -FrameSize;
1015 // Get processor type.
1016 bool IsPPC64 = Subtarget.isPPC64();
1017 // Get operating system
1018 bool IsMachoABI = Subtarget.isMachoABI();
1019 // Check if the link register (LR) must be saved.
1020 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1021 bool MustSaveLR = FI->mustSaveLR();
1022 // Do we have a frame pointer for this function?
1023 bool HasFP = hasFP(MF) && FrameSize;
1025 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, IsMachoABI);
1026 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI);
1030 BuildMI(MBB, MBBI, TII.get(PPC::MFLR8), PPC::X0);
1033 BuildMI(MBB, MBBI, TII.get(PPC::STD))
1039 BuildMI(MBB, MBBI, TII.get(PPC::STD))
1041 .addImm(LROffset / 4)
1045 BuildMI(MBB, MBBI, TII.get(PPC::MFLR), PPC::R0);
1048 BuildMI(MBB, MBBI, TII.get(PPC::STW))
1054 BuildMI(MBB, MBBI, TII.get(PPC::STW))
1060 // Skip if a leaf routine.
1061 if (!FrameSize) return;
1063 // Get stack alignments.
1064 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1065 unsigned MaxAlign = MFI->getMaxAlignment();
1067 if (needsFrameMoves) {
1068 // Mark effective beginning of when frame pointer becomes valid.
1069 FrameLabelId = MMI->NextLabelID();
1070 BuildMI(MBB, MBBI, TII.get(PPC::DBG_LABEL)).addImm(FrameLabelId);
1073 // Adjust stack pointer: r1 += NegFrameSize.
1074 // If there is a preferred stack alignment, align R1 now
1077 if (ALIGN_STACK && MaxAlign > TargetAlign) {
1078 assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
1079 assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
1081 BuildMI(MBB, MBBI, TII.get(PPC::RLWINM), PPC::R0)
1084 .addImm(32 - Log2_32(MaxAlign))
1086 BuildMI(MBB, MBBI, TII.get(PPC::SUBFIC) ,PPC::R0)
1087 .addReg(PPC::R0, false, false, true)
1088 .addImm(NegFrameSize);
1089 BuildMI(MBB, MBBI, TII.get(PPC::STWUX))
1093 } else if (isInt16(NegFrameSize)) {
1094 BuildMI(MBB, MBBI, TII.get(PPC::STWU), PPC::R1)
1096 .addImm(NegFrameSize)
1099 BuildMI(MBB, MBBI, TII.get(PPC::LIS), PPC::R0)
1100 .addImm(NegFrameSize >> 16);
1101 BuildMI(MBB, MBBI, TII.get(PPC::ORI), PPC::R0)
1102 .addReg(PPC::R0, false, false, true)
1103 .addImm(NegFrameSize & 0xFFFF);
1104 BuildMI(MBB, MBBI, TII.get(PPC::STWUX))
1110 if (ALIGN_STACK && MaxAlign > TargetAlign) {
1111 assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
1112 assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
1114 BuildMI(MBB, MBBI, TII.get(PPC::RLDICL), PPC::X0)
1117 .addImm(64 - Log2_32(MaxAlign));
1118 BuildMI(MBB, MBBI, TII.get(PPC::SUBFIC8), PPC::X0)
1120 .addImm(NegFrameSize);
1121 BuildMI(MBB, MBBI, TII.get(PPC::STDUX))
1125 } else if (isInt16(NegFrameSize)) {
1126 BuildMI(MBB, MBBI, TII.get(PPC::STDU), PPC::X1)
1128 .addImm(NegFrameSize / 4)
1131 BuildMI(MBB, MBBI, TII.get(PPC::LIS8), PPC::X0)
1132 .addImm(NegFrameSize >> 16);
1133 BuildMI(MBB, MBBI, TII.get(PPC::ORI8), PPC::X0)
1134 .addReg(PPC::X0, false, false, true)
1135 .addImm(NegFrameSize & 0xFFFF);
1136 BuildMI(MBB, MBBI, TII.get(PPC::STDUX))
1143 if (needsFrameMoves) {
1144 std::vector<MachineMove> &Moves = MMI->getFrameMoves();
1147 // Show update of SP.
1148 MachineLocation SPDst(MachineLocation::VirtualFP);
1149 MachineLocation SPSrc(MachineLocation::VirtualFP, NegFrameSize);
1150 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
1152 MachineLocation SP(IsPPC64 ? PPC::X31 : PPC::R31);
1153 Moves.push_back(MachineMove(FrameLabelId, SP, SP));
1157 MachineLocation FPDst(MachineLocation::VirtualFP, FPOffset);
1158 MachineLocation FPSrc(IsPPC64 ? PPC::X31 : PPC::R31);
1159 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
1162 // Add callee saved registers to move list.
1163 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1164 for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
1165 int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
1166 unsigned Reg = CSI[I].getReg();
1167 if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
1168 MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
1169 MachineLocation CSSrc(Reg);
1170 Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc));
1173 MachineLocation LRDst(MachineLocation::VirtualFP, LROffset);
1174 MachineLocation LRSrc(IsPPC64 ? PPC::LR8 : PPC::LR);
1175 Moves.push_back(MachineMove(FrameLabelId, LRDst, LRSrc));
1177 // Mark effective beginning of when frame pointer is ready.
1178 unsigned ReadyLabelId = MMI->NextLabelID();
1179 BuildMI(MBB, MBBI, TII.get(PPC::DBG_LABEL)).addImm(ReadyLabelId);
1181 MachineLocation FPDst(HasFP ? (IsPPC64 ? PPC::X31 : PPC::R31) :
1182 (IsPPC64 ? PPC::X1 : PPC::R1));
1183 MachineLocation FPSrc(MachineLocation::VirtualFP);
1184 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc));
1187 // If there is a frame pointer, copy R1 into R31
1190 BuildMI(MBB, MBBI, TII.get(PPC::OR), PPC::R31)
1194 BuildMI(MBB, MBBI, TII.get(PPC::OR8), PPC::X31)
1201 void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
1202 MachineBasicBlock &MBB) const {
1203 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1204 unsigned RetOpcode = MBBI->getOpcode();
1206 assert( (RetOpcode == PPC::BLR ||
1207 RetOpcode == PPC::TCRETURNri ||
1208 RetOpcode == PPC::TCRETURNdi ||
1209 RetOpcode == PPC::TCRETURNai ||
1210 RetOpcode == PPC::TCRETURNri8 ||
1211 RetOpcode == PPC::TCRETURNdi8 ||
1212 RetOpcode == PPC::TCRETURNai8) &&
1213 "Can only insert epilog into returning blocks");
1215 // Get alignment info so we know how to restore r1
1216 const MachineFrameInfo *MFI = MF.getFrameInfo();
1217 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1218 unsigned MaxAlign = MFI->getMaxAlignment();
1220 // Get the number of bytes allocated from the FrameInfo.
1221 int FrameSize = MFI->getStackSize();
1223 // Get processor type.
1224 bool IsPPC64 = Subtarget.isPPC64();
1225 // Get operating system
1226 bool IsMachoABI = Subtarget.isMachoABI();
1227 // Check if the link register (LR) has been saved.
1228 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1229 bool MustSaveLR = FI->mustSaveLR();
1230 // Do we have a frame pointer for this function?
1231 bool HasFP = hasFP(MF) && FrameSize;
1233 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, IsMachoABI);
1234 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI);
1236 bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
1237 RetOpcode == PPC::TCRETURNdi ||
1238 RetOpcode == PPC::TCRETURNai ||
1239 RetOpcode == PPC::TCRETURNri8 ||
1240 RetOpcode == PPC::TCRETURNdi8 ||
1241 RetOpcode == PPC::TCRETURNai8;
1244 int MaxTCRetDelta = FI->getTailCallSPDelta();
1245 MachineOperand &StackAdjust = MBBI->getOperand(1);
1246 assert(StackAdjust.isImm() && "Expecting immediate value.");
1247 // Adjust stack pointer.
1248 int StackAdj = StackAdjust.getImm();
1249 int Delta = StackAdj - MaxTCRetDelta;
1250 assert((Delta >= 0) && "Delta must be positive");
1251 if (MaxTCRetDelta>0)
1252 FrameSize += (StackAdj +Delta);
1254 FrameSize += StackAdj;
1258 // The loaded (or persistent) stack pointer value is offset by the 'stwu'
1259 // on entry to the function. Add this offset back now.
1261 // If this function contained a fastcc call and PerformTailCallOpt is
1262 // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
1263 // call which invalidates the stack pointer value in SP(0). So we use the
1264 // value of R31 in this case.
1265 if (FI->hasFastCall() && isInt16(FrameSize)) {
1266 assert(hasFP(MF) && "Expecting a valid the frame pointer.");
1267 BuildMI(MBB, MBBI, TII.get(PPC::ADDI), PPC::R1)
1268 .addReg(PPC::R31).addImm(FrameSize);
1269 } else if(FI->hasFastCall()) {
1270 BuildMI(MBB, MBBI, TII.get(PPC::LIS), PPC::R0)
1271 .addImm(FrameSize >> 16);
1272 BuildMI(MBB, MBBI, TII.get(PPC::ORI), PPC::R0)
1273 .addReg(PPC::R0, false, false, true)
1274 .addImm(FrameSize & 0xFFFF);
1275 BuildMI(MBB, MBBI, TII.get(PPC::ADD4))
1279 } else if (isInt16(FrameSize) &&
1280 (!ALIGN_STACK || TargetAlign >= MaxAlign) &&
1281 !MFI->hasVarSizedObjects()) {
1282 BuildMI(MBB, MBBI, TII.get(PPC::ADDI), PPC::R1)
1283 .addReg(PPC::R1).addImm(FrameSize);
1285 BuildMI(MBB, MBBI, TII.get(PPC::LWZ),PPC::R1).addImm(0).addReg(PPC::R1);
1288 if (FI->hasFastCall() && isInt16(FrameSize)) {
1289 assert(hasFP(MF) && "Expecting a valid the frame pointer.");
1290 BuildMI(MBB, MBBI, TII.get(PPC::ADDI8), PPC::X1)
1291 .addReg(PPC::X31).addImm(FrameSize);
1292 } else if(FI->hasFastCall()) {
1293 BuildMI(MBB, MBBI, TII.get(PPC::LIS8), PPC::X0)
1294 .addImm(FrameSize >> 16);
1295 BuildMI(MBB, MBBI, TII.get(PPC::ORI8), PPC::X0)
1296 .addReg(PPC::X0, false, false, true)
1297 .addImm(FrameSize & 0xFFFF);
1298 BuildMI(MBB, MBBI, TII.get(PPC::ADD8))
1302 } else if (isInt16(FrameSize) && TargetAlign >= MaxAlign &&
1303 !MFI->hasVarSizedObjects()) {
1304 BuildMI(MBB, MBBI, TII.get(PPC::ADDI8), PPC::X1)
1305 .addReg(PPC::X1).addImm(FrameSize);
1307 BuildMI(MBB, MBBI, TII.get(PPC::LD), PPC::X1).addImm(0).addReg(PPC::X1);
1314 BuildMI(MBB, MBBI, TII.get(PPC::LD), PPC::X0)
1315 .addImm(LROffset/4).addReg(PPC::X1);
1318 BuildMI(MBB, MBBI, TII.get(PPC::LD), PPC::X31)
1319 .addImm(FPOffset/4).addReg(PPC::X1);
1322 BuildMI(MBB, MBBI, TII.get(PPC::MTLR8)).addReg(PPC::X0);
1325 BuildMI(MBB, MBBI, TII.get(PPC::LWZ), PPC::R0)
1326 .addImm(LROffset).addReg(PPC::R1);
1329 BuildMI(MBB, MBBI, TII.get(PPC::LWZ), PPC::R31)
1330 .addImm(FPOffset).addReg(PPC::R1);
1333 BuildMI(MBB, MBBI, TII.get(PPC::MTLR)).addReg(PPC::R0);
1336 // Callee pop calling convention. Pop parameter/linkage area. Used for tail
1337 // call optimization
1338 if (PerformTailCallOpt && RetOpcode == PPC::BLR &&
1339 MF.getFunction()->getCallingConv() == CallingConv::Fast) {
1340 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1341 unsigned CallerAllocatedAmt = FI->getMinReservedArea();
1342 unsigned StackReg = IsPPC64 ? PPC::X1 : PPC::R1;
1343 unsigned FPReg = IsPPC64 ? PPC::X31 : PPC::R31;
1344 unsigned TmpReg = IsPPC64 ? PPC::X0 : PPC::R0;
1345 unsigned ADDIInstr = IsPPC64 ? PPC::ADDI8 : PPC::ADDI;
1346 unsigned ADDInstr = IsPPC64 ? PPC::ADD8 : PPC::ADD4;
1347 unsigned LISInstr = IsPPC64 ? PPC::LIS8 : PPC::LIS;
1348 unsigned ORIInstr = IsPPC64 ? PPC::ORI8 : PPC::ORI;
1350 if (CallerAllocatedAmt && isInt16(CallerAllocatedAmt)) {
1351 BuildMI(MBB, MBBI, TII.get(ADDIInstr), StackReg)
1352 .addReg(StackReg).addImm(CallerAllocatedAmt);
1354 BuildMI(MBB, MBBI, TII.get(LISInstr), TmpReg)
1355 .addImm(CallerAllocatedAmt >> 16);
1356 BuildMI(MBB, MBBI, TII.get(ORIInstr), TmpReg)
1357 .addReg(TmpReg, false, false, true)
1358 .addImm(CallerAllocatedAmt & 0xFFFF);
1359 BuildMI(MBB, MBBI, TII.get(ADDInstr))
1364 } else if (RetOpcode == PPC::TCRETURNdi) {
1365 MBBI = prior(MBB.end());
1366 MachineOperand &JumpTarget = MBBI->getOperand(0);
1367 BuildMI(MBB, MBBI, TII.get(PPC::TAILB)).
1368 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1369 } else if (RetOpcode == PPC::TCRETURNri) {
1370 MBBI = prior(MBB.end());
1371 MachineOperand &JumpTarget = MBBI->getOperand(0);
1372 assert(JumpTarget.isReg() && "Expecting register operand.");
1373 BuildMI(MBB, MBBI, TII.get(PPC::TAILBCTR));
1374 } else if (RetOpcode == PPC::TCRETURNai) {
1375 MBBI = prior(MBB.end());
1376 MachineOperand &JumpTarget = MBBI->getOperand(0);
1377 BuildMI(MBB, MBBI, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
1378 } else if (RetOpcode == PPC::TCRETURNdi8) {
1379 MBBI = prior(MBB.end());
1380 MachineOperand &JumpTarget = MBBI->getOperand(0);
1381 BuildMI(MBB, MBBI, TII.get(PPC::TAILB8)).
1382 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1383 } else if (RetOpcode == PPC::TCRETURNri8) {
1384 MBBI = prior(MBB.end());
1385 MachineOperand &JumpTarget = MBBI->getOperand(0);
1386 assert(JumpTarget.isReg() && "Expecting register operand.");
1387 BuildMI(MBB, MBBI, TII.get(PPC::TAILBCTR8));
1388 } else if (RetOpcode == PPC::TCRETURNai8) {
1389 MBBI = prior(MBB.end());
1390 MachineOperand &JumpTarget = MBBI->getOperand(0);
1391 BuildMI(MBB, MBBI, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
1395 unsigned PPCRegisterInfo::getRARegister() const {
1396 return !Subtarget.isPPC64() ? PPC::LR : PPC::LR8;
1399 unsigned PPCRegisterInfo::getFrameRegister(MachineFunction &MF) const {
1400 if (!Subtarget.isPPC64())
1401 return hasFP(MF) ? PPC::R31 : PPC::R1;
1403 return hasFP(MF) ? PPC::X31 : PPC::X1;
1406 void PPCRegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves)
1408 // Initial state of the frame pointer is R1.
1409 MachineLocation Dst(MachineLocation::VirtualFP);
1410 MachineLocation Src(PPC::R1, 0);
1411 Moves.push_back(MachineMove(0, Dst, Src));
1414 unsigned PPCRegisterInfo::getEHExceptionRegister() const {
1415 return !Subtarget.isPPC64() ? PPC::R3 : PPC::X3;
1418 unsigned PPCRegisterInfo::getEHHandlerRegister() const {
1419 return !Subtarget.isPPC64() ? PPC::R4 : PPC::X4;
1422 int PPCRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
1423 // FIXME: Most probably dwarf numbers differs for Linux and Darwin
1424 return PPCGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
1427 #include "PPCGenRegisterInfo.inc"