1 //===- PPCRegisterInfo.cpp - PowerPC Register Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the PowerPC implementation of the TargetRegisterInfo
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "reginfo"
17 #include "PPCInstrBuilder.h"
18 #include "PPCMachineFunctionInfo.h"
19 #include "PPCRegisterInfo.h"
20 #include "PPCFrameInfo.h"
21 #include "PPCSubtarget.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/Constants.h"
24 #include "llvm/Function.h"
25 #include "llvm/Type.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineLocation.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/RegisterScavenging.h"
34 #include "llvm/Target/TargetFrameInfo.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/ADT/BitVector.h"
42 #include "llvm/ADT/STLExtras.h"
46 // FIXME This disables some code that aligns the stack to a boundary
47 // bigger than the default (16 bytes on Darwin) when there is a stack local
48 // of greater alignment. This does not currently work, because the delta
49 // between old and new stack pointers is added to offsets that reference
50 // incoming parameters after the prolog is generated, and the code that
51 // does that doesn't handle a variable delta. You don't want to do that
52 // anyway; a better approach is to reserve another register that retains
53 // to the incoming stack pointer, and reference parameters relative to that.
56 // FIXME (64-bit): Eventually enable by default.
57 cl::opt<bool> EnablePPC32RS("enable-ppc32-regscavenger",
59 cl::desc("Enable PPC32 register scavenger"),
61 cl::opt<bool> EnablePPC64RS("enable-ppc64-regscavenger",
63 cl::desc("Enable PPC64 register scavenger"),
65 #define EnableRegisterScavenging \
66 ((EnablePPC32RS && !Subtarget.isPPC64()) || \
67 (EnablePPC64RS && Subtarget.isPPC64()))
69 // FIXME (64-bit): Should be inlined.
71 PPCRegisterInfo::requiresRegisterScavenging(const MachineFunction &) const {
72 return EnableRegisterScavenging;
75 /// getRegisterNumbering - Given the enum value for some register, e.g.
76 /// PPC::F14, return the number that it corresponds to (e.g. 14).
77 unsigned PPCRegisterInfo::getRegisterNumbering(unsigned RegEnum) {
81 case R0 : case X0 : case F0 : case V0 : case CR0: case CR0LT: return 0;
82 case R1 : case X1 : case F1 : case V1 : case CR1: case CR0GT: return 1;
83 case R2 : case X2 : case F2 : case V2 : case CR2: case CR0EQ: return 2;
84 case R3 : case X3 : case F3 : case V3 : case CR3: case CR0UN: return 3;
85 case R4 : case X4 : case F4 : case V4 : case CR4: case CR1LT: return 4;
86 case R5 : case X5 : case F5 : case V5 : case CR5: case CR1GT: return 5;
87 case R6 : case X6 : case F6 : case V6 : case CR6: case CR1EQ: return 6;
88 case R7 : case X7 : case F7 : case V7 : case CR7: case CR1UN: return 7;
89 case R8 : case X8 : case F8 : case V8 : case CR2LT: return 8;
90 case R9 : case X9 : case F9 : case V9 : case CR2GT: return 9;
91 case R10: case X10: case F10: case V10: case CR2EQ: return 10;
92 case R11: case X11: case F11: case V11: case CR2UN: return 11;
93 case R12: case X12: case F12: case V12: case CR3LT: return 12;
94 case R13: case X13: case F13: case V13: case CR3GT: return 13;
95 case R14: case X14: case F14: case V14: case CR3EQ: return 14;
96 case R15: case X15: case F15: case V15: case CR3UN: return 15;
97 case R16: case X16: case F16: case V16: case CR4LT: return 16;
98 case R17: case X17: case F17: case V17: case CR4GT: return 17;
99 case R18: case X18: case F18: case V18: case CR4EQ: return 18;
100 case R19: case X19: case F19: case V19: case CR4UN: return 19;
101 case R20: case X20: case F20: case V20: case CR5LT: return 20;
102 case R21: case X21: case F21: case V21: case CR5GT: return 21;
103 case R22: case X22: case F22: case V22: case CR5EQ: return 22;
104 case R23: case X23: case F23: case V23: case CR5UN: return 23;
105 case R24: case X24: case F24: case V24: case CR6LT: return 24;
106 case R25: case X25: case F25: case V25: case CR6GT: return 25;
107 case R26: case X26: case F26: case V26: case CR6EQ: return 26;
108 case R27: case X27: case F27: case V27: case CR6UN: return 27;
109 case R28: case X28: case F28: case V28: case CR7LT: return 28;
110 case R29: case X29: case F29: case V29: case CR7GT: return 29;
111 case R30: case X30: case F30: case V30: case CR7EQ: return 30;
112 case R31: case X31: case F31: case V31: case CR7UN: return 31;
114 cerr << "Unhandled reg in PPCRegisterInfo::getRegisterNumbering!\n";
119 PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
120 const TargetInstrInfo &tii)
121 : PPCGenRegisterInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
122 Subtarget(ST), TII(tii) {
123 ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
124 ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
125 ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
126 ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX;
127 ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX;
128 ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX;
129 ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX;
130 ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
133 ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
134 ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
135 ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
136 ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
137 ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; ImmToIdxMap[PPC::STD_32] = PPC::STDX_32;
141 PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
142 // 32-bit Darwin calling convention.
143 static const unsigned Macho32_CalleeSavedRegs[] = {
144 PPC::R13, PPC::R14, PPC::R15,
145 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
146 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
147 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
148 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
150 PPC::F14, PPC::F15, PPC::F16, PPC::F17,
151 PPC::F18, PPC::F19, PPC::F20, PPC::F21,
152 PPC::F22, PPC::F23, PPC::F24, PPC::F25,
153 PPC::F26, PPC::F27, PPC::F28, PPC::F29,
156 PPC::CR2, PPC::CR3, PPC::CR4,
157 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
158 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
159 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
161 PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
162 PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
163 PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
168 static const unsigned ELF32_CalleeSavedRegs[] = {
169 PPC::R13, PPC::R14, PPC::R15,
170 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
171 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
172 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
173 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
176 PPC::F10, PPC::F11, PPC::F12, PPC::F13,
177 PPC::F14, PPC::F15, PPC::F16, PPC::F17,
178 PPC::F18, PPC::F19, PPC::F20, PPC::F21,
179 PPC::F22, PPC::F23, PPC::F24, PPC::F25,
180 PPC::F26, PPC::F27, PPC::F28, PPC::F29,
183 PPC::CR2, PPC::CR3, PPC::CR4,
184 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
185 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
186 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
188 PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
189 PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
190 PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
194 // 64-bit Darwin calling convention.
195 static const unsigned Macho64_CalleeSavedRegs[] = {
197 PPC::X16, PPC::X17, PPC::X18, PPC::X19,
198 PPC::X20, PPC::X21, PPC::X22, PPC::X23,
199 PPC::X24, PPC::X25, PPC::X26, PPC::X27,
200 PPC::X28, PPC::X29, PPC::X30, PPC::X31,
202 PPC::F14, PPC::F15, PPC::F16, PPC::F17,
203 PPC::F18, PPC::F19, PPC::F20, PPC::F21,
204 PPC::F22, PPC::F23, PPC::F24, PPC::F25,
205 PPC::F26, PPC::F27, PPC::F28, PPC::F29,
208 PPC::CR2, PPC::CR3, PPC::CR4,
209 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
210 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
211 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
213 PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
214 PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
215 PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
220 if (Subtarget.isMachoABI())
221 return Subtarget.isPPC64() ? Macho64_CalleeSavedRegs :
222 Macho32_CalleeSavedRegs;
225 return ELF32_CalleeSavedRegs;
228 const TargetRegisterClass* const*
229 PPCRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
230 // 32-bit Macho calling convention.
231 static const TargetRegisterClass * const Macho32_CalleeSavedRegClasses[] = {
232 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
233 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
234 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
235 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
236 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
238 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
239 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
240 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
241 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
242 &PPC::F8RCRegClass,&PPC::F8RCRegClass,
244 &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
246 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
247 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
248 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
250 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
251 &PPC::CRBITRCRegClass,
252 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
253 &PPC::CRBITRCRegClass,
254 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
255 &PPC::CRBITRCRegClass,
257 &PPC::GPRCRegClass, 0
260 static const TargetRegisterClass * const ELF32_CalleeSavedRegClasses[] = {
261 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
262 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
263 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
264 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
265 &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
268 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
269 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
270 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
271 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
272 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
273 &PPC::F8RCRegClass,&PPC::F8RCRegClass,
275 &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
277 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
278 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
279 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
281 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
282 &PPC::CRBITRCRegClass,
283 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
284 &PPC::CRBITRCRegClass,
285 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
286 &PPC::CRBITRCRegClass,
288 &PPC::GPRCRegClass, 0
291 // 64-bit Macho calling convention.
292 static const TargetRegisterClass * const Macho64_CalleeSavedRegClasses[] = {
293 &PPC::G8RCRegClass,&PPC::G8RCRegClass,
294 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
295 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
296 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
297 &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
299 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
300 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
301 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
302 &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
303 &PPC::F8RCRegClass,&PPC::F8RCRegClass,
305 &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
307 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
308 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
309 &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
311 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
312 &PPC::CRBITRCRegClass,
313 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
314 &PPC::CRBITRCRegClass,
315 &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
316 &PPC::CRBITRCRegClass,
318 &PPC::G8RCRegClass, 0
321 if (Subtarget.isMachoABI())
322 return Subtarget.isPPC64() ? Macho64_CalleeSavedRegClasses :
323 Macho32_CalleeSavedRegClasses;
326 return ELF32_CalleeSavedRegClasses;
329 // needsFP - Return true if the specified function should have a dedicated frame
330 // pointer register. This is true if the function has variable sized allocas or
331 // if frame pointer elimination is disabled.
333 static bool needsFP(const MachineFunction &MF) {
334 const MachineFrameInfo *MFI = MF.getFrameInfo();
335 return NoFramePointerElim || MFI->hasVarSizedObjects() ||
336 (PerformTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall());
339 static bool spillsCR(const MachineFunction &MF) {
340 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
341 return FuncInfo->isCRSpilled();
344 BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
345 BitVector Reserved(getNumRegs());
346 Reserved.set(PPC::R0);
347 Reserved.set(PPC::R1);
348 Reserved.set(PPC::LR);
349 Reserved.set(PPC::LR8);
351 // In Linux, r2 is reserved for the OS.
352 if (!Subtarget.isDarwin())
353 Reserved.set(PPC::R2);
355 // On PPC64, r13 is the thread pointer. Never allocate this register. Note
356 // that this is over conservative, as it also prevents allocation of R31 when
357 // the FP is not needed.
358 if (Subtarget.isPPC64()) {
359 Reserved.set(PPC::R13);
360 Reserved.set(PPC::R31);
362 if (!EnableRegisterScavenging)
363 Reserved.set(PPC::R0); // FIXME (64-bit): Remove
365 Reserved.set(PPC::X0);
366 Reserved.set(PPC::X1);
367 Reserved.set(PPC::X13);
368 Reserved.set(PPC::X31);
372 Reserved.set(PPC::R31);
377 //===----------------------------------------------------------------------===//
378 // Stack Frame Processing methods
379 //===----------------------------------------------------------------------===//
381 // hasFP - Return true if the specified function actually has a dedicated frame
382 // pointer register. This is true if the function needs a frame pointer and has
383 // a non-zero stack size.
384 bool PPCRegisterInfo::hasFP(const MachineFunction &MF) const {
385 const MachineFrameInfo *MFI = MF.getFrameInfo();
386 return MFI->getStackSize() && needsFP(MF);
389 /// MustSaveLR - Return true if this function requires that we save the LR
390 /// register onto the stack in the prolog and restore it in the epilog of the
392 static bool MustSaveLR(const MachineFunction &MF) {
393 const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
395 // We need an save/restore of LR if there is any use/def of LR explicitly, or
396 // if there is some use of the LR stack slot (e.g. for builtin_return_address.
397 return MFI->usesLR() || MFI->isLRStoreRequired() ||
398 // FIXME: Anything that has a call should clobber the LR register,
399 // isn't this redundant??
400 MF.getFrameInfo()->hasCalls();
405 void PPCRegisterInfo::
406 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
407 MachineBasicBlock::iterator I) const {
408 if (PerformTailCallOpt && I->getOpcode() == PPC::ADJCALLSTACKUP) {
409 // Add (actually substract) back the amount the callee popped on return.
410 if (int CalleeAmt = I->getOperand(1).getImm()) {
411 bool is64Bit = Subtarget.isPPC64();
413 unsigned StackReg = is64Bit ? PPC::X1 : PPC::R1;
414 unsigned TmpReg = is64Bit ? PPC::X0 : PPC::R0;
415 unsigned ADDIInstr = is64Bit ? PPC::ADDI8 : PPC::ADDI;
416 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4;
417 unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS;
418 unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI;
420 if (isInt16(CalleeAmt)) {
421 BuildMI(MBB, I, TII.get(ADDIInstr), StackReg).addReg(StackReg).
424 MachineBasicBlock::iterator MBBI = I;
425 BuildMI(MBB, MBBI, TII.get(LISInstr), TmpReg)
426 .addImm(CalleeAmt >> 16);
427 BuildMI(MBB, MBBI, TII.get(ORIInstr), TmpReg)
428 .addReg(TmpReg, false, false, true)
429 .addImm(CalleeAmt & 0xFFFF);
430 BuildMI(MBB, MBBI, TII.get(ADDInstr))
437 // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
441 /// findScratchRegister - Find a 'free' PPC register. Try for a call-clobbered
442 /// register first and then a spilled callee-saved register if that fails.
444 unsigned findScratchRegister(MachineBasicBlock::iterator II, RegScavenger *RS,
445 const TargetRegisterClass *RC, int SPAdj) {
446 assert(RS && "Register scavenging must be on");
447 unsigned Reg = RS->FindUnusedReg(RC, true);
448 // FIXME: move ARM callee-saved reg scan to target independent code, then
449 // search for already spilled CS register here.
451 Reg = RS->scavengeRegister(RC, II, SPAdj);
455 /// lowerDynamicAlloc - Generate the code for allocating an object in the
456 /// current frame. The sequence of code with be in the general form
458 /// addi R0, SP, #frameSize ; get the address of the previous frame
459 /// stwxu R0, SP, Rnegsize ; add and update the SP with the negated size
460 /// addi Rnew, SP, #maxCalFrameSize ; get the top of the allocation
462 void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
463 int SPAdj, RegScavenger *RS) const {
464 // Get the instruction.
465 MachineInstr &MI = *II;
466 // Get the instruction's basic block.
467 MachineBasicBlock &MBB = *MI.getParent();
468 // Get the basic block's function.
469 MachineFunction &MF = *MBB.getParent();
470 // Get the frame info.
471 MachineFrameInfo *MFI = MF.getFrameInfo();
472 // Determine whether 64-bit pointers are used.
473 bool LP64 = Subtarget.isPPC64();
475 // Get the maximum call stack size.
476 unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
477 // Get the total frame size.
478 unsigned FrameSize = MFI->getStackSize();
480 // Get stack alignments.
481 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
482 unsigned MaxAlign = MFI->getMaxAlignment();
483 assert(MaxAlign <= TargetAlign &&
484 "Dynamic alloca with large aligns not supported");
486 // Determine the previous frame's address. If FrameSize can't be
487 // represented as 16 bits or we need special alignment, then we load the
488 // previous frame's address from 0(SP). Why not do an addis of the hi?
489 // Because R0 is our only safe tmp register and addi/addis treat R0 as zero.
490 // Constructing the constant and adding would take 3 instructions.
491 // Fortunately, a frame greater than 32K is rare.
492 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
493 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
494 const TargetRegisterClass *RC = LP64 ? G8RC : GPRC;
496 // FIXME (64-bit): Use "findScratchRegister"
498 if (EnableRegisterScavenging)
499 Reg = findScratchRegister(II, RS, RC, SPAdj);
503 if (MaxAlign < TargetAlign && isInt16(FrameSize)) {
504 BuildMI(MBB, II, TII.get(PPC::ADDI), Reg)
508 if (EnableRegisterScavenging) // FIXME (64-bit): Use "true" part.
509 BuildMI(MBB, II, TII.get(PPC::LD), Reg)
513 BuildMI(MBB, II, TII.get(PPC::LD), PPC::X0)
517 BuildMI(MBB, II, TII.get(PPC::LWZ), Reg)
522 // Grow the stack and update the stack pointer link, then determine the
523 // address of new allocated space.
525 if (EnableRegisterScavenging) // FIXME (64-bit): Use "true" part.
526 BuildMI(MBB, II, TII.get(PPC::STDUX))
527 .addReg(Reg, false, false, true)
529 .addReg(MI.getOperand(1).getReg());
531 BuildMI(MBB, II, TII.get(PPC::STDUX))
532 .addReg(PPC::X0, false, false, true)
534 .addReg(MI.getOperand(1).getReg());
536 if (!MI.getOperand(1).isKill())
537 BuildMI(MBB, II, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
539 .addImm(maxCallFrameSize);
541 // Implicitly kill the register.
542 BuildMI(MBB, II, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
544 .addImm(maxCallFrameSize)
545 .addReg(MI.getOperand(1).getReg(), false, true, true);
547 BuildMI(MBB, II, TII.get(PPC::STWUX))
548 .addReg(Reg, false, false, true)
550 .addReg(MI.getOperand(1).getReg());
552 if (!MI.getOperand(1).isKill())
553 BuildMI(MBB, II, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
555 .addImm(maxCallFrameSize);
557 // Implicitly kill the register.
558 BuildMI(MBB, II, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
560 .addImm(maxCallFrameSize)
561 .addReg(MI.getOperand(1).getReg(), false, true, true);
564 // Discard the DYNALLOC instruction.
568 /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of
569 /// reserving a whole register (R0), we scrounge for one here. This generates
572 /// mfcr rA ; Move the conditional register into GPR rA.
573 /// rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot.
574 /// stw rA, FI ; Store rA to the frame.
576 void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
577 unsigned FrameIndex, int SPAdj,
578 RegScavenger *RS) const {
579 // Get the instruction.
580 MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset>, <FI>
581 // Get the instruction's basic block.
582 MachineBasicBlock &MBB = *MI.getParent();
584 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
585 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
586 const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC;
587 unsigned Reg = findScratchRegister(II, RS, RC, SPAdj);
589 // We need to store the CR in the low 4-bits of the saved value. First, issue
590 // an MFCR to save all of the CRBits. Add an implicit kill of the CR.
591 if (!MI.getOperand(0).isKill())
592 BuildMI(MBB, II, TII.get(PPC::MFCR), Reg);
594 // Implicitly kill the CR register.
595 BuildMI(MBB, II, TII.get(PPC::MFCR), Reg)
596 .addReg(MI.getOperand(0).getReg(), false, true, true);
598 // If the saved register wasn't CR0, shift the bits left so that they are in
600 unsigned SrcReg = MI.getOperand(0).getReg();
601 if (SrcReg != PPC::CR0)
602 // rlwinm rA, rA, ShiftBits, 0, 31.
603 BuildMI(MBB, II, TII.get(PPC::RLWINM), Reg)
604 .addReg(Reg, false, false, true)
605 .addImm(PPCRegisterInfo::getRegisterNumbering(SrcReg) * 4)
609 addFrameReference(BuildMI(MBB, II, TII.get(PPC::STW))
610 .addReg(Reg, false, false, MI.getOperand(1).getImm()),
613 // Discard the pseudo instruction.
617 void PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
618 int SPAdj, RegScavenger *RS) const {
619 assert(SPAdj == 0 && "Unexpected");
621 // Get the instruction.
622 MachineInstr &MI = *II;
623 // Get the instruction's basic block.
624 MachineBasicBlock &MBB = *MI.getParent();
625 // Get the basic block's function.
626 MachineFunction &MF = *MBB.getParent();
627 // Get the frame info.
628 MachineFrameInfo *MFI = MF.getFrameInfo();
630 // Find out which operand is the frame index.
631 unsigned FIOperandNo = 0;
632 while (!MI.getOperand(FIOperandNo).isFrameIndex()) {
634 assert(FIOperandNo != MI.getNumOperands() &&
635 "Instr doesn't have FrameIndex operand!");
637 // Take into account whether it's an add or mem instruction
638 unsigned OffsetOperandNo = (FIOperandNo == 2) ? 1 : 2;
639 if (MI.getOpcode() == TargetInstrInfo::INLINEASM)
640 OffsetOperandNo = FIOperandNo-1;
642 // Get the frame index.
643 int FrameIndex = MI.getOperand(FIOperandNo).getIndex();
645 // Get the frame pointer save index. Users of this index are primarily
646 // DYNALLOC instructions.
647 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
648 int FPSI = FI->getFramePointerSaveIndex();
649 // Get the instruction opcode.
650 unsigned OpC = MI.getOpcode();
652 // Special case for dynamic alloca.
653 if (FPSI && FrameIndex == FPSI &&
654 (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
655 lowerDynamicAlloc(II, SPAdj, RS);
659 // Special case for pseudo-op SPILL_CR.
660 if (EnableRegisterScavenging) // FIXME (64-bit): Enable by default.
661 if (OpC == PPC::SPILL_CR) {
662 lowerCRSpilling(II, FrameIndex, SPAdj, RS);
666 // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
667 MI.getOperand(FIOperandNo).ChangeToRegister(hasFP(MF) ? PPC::R31 : PPC::R1,
670 // Figure out if the offset in the instruction is shifted right two bits. This
671 // is true for instructions like "STD", which the machine implicitly adds two
673 bool isIXAddr = false;
683 // Now add the frame object offset to the offset from r1.
684 int Offset = MFI->getObjectOffset(FrameIndex);
686 Offset += MI.getOperand(OffsetOperandNo).getImm();
688 Offset += MI.getOperand(OffsetOperandNo).getImm() << 2;
690 // If we're not using a Frame Pointer that has been set to the value of the
691 // SP before having the stack size subtracted from it, then add the stack size
692 // to Offset to get the correct offset.
693 Offset += MFI->getStackSize();
695 // If we can, encode the offset directly into the instruction. If this is a
696 // normal PPC "ri" instruction, any 16-bit value can be safely encoded. If
697 // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits
698 // clear can be encoded. This is extremely uncommon, because normally you
699 // only "std" to a stack slot that is at least 4-byte aligned, but it can
700 // happen in invalid code.
701 if (isInt16(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
703 Offset >>= 2; // The actual encoded value has the low two bits zero.
704 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
708 // The offset doesn't fit into a single register, scavenge one to build the
710 // FIXME: figure out what SPAdj is doing here.
712 // FIXME (64-bit): Use "findScratchRegister".
714 if (EnableRegisterScavenging)
715 SReg = findScratchRegister(II, RS, &PPC::GPRCRegClass, SPAdj);
719 // Insert a set of rA with the full offset value before the ld, st, or add
720 BuildMI(MBB, II, TII.get(PPC::LIS), SReg)
721 .addImm(Offset >> 16);
722 BuildMI(MBB, II, TII.get(PPC::ORI), SReg)
723 .addReg(SReg, false, false, true)
726 // Convert into indexed form of the instruction:
728 // sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0
729 // addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0
730 unsigned OperandBase;
732 if (OpC != TargetInstrInfo::INLINEASM) {
733 assert(ImmToIdxMap.count(OpC) &&
734 "No indexed form of load or store available!");
735 unsigned NewOpcode = ImmToIdxMap.find(OpC)->second;
736 MI.setDesc(TII.get(NewOpcode));
739 OperandBase = OffsetOperandNo;
742 unsigned StackReg = MI.getOperand(FIOperandNo).getReg();
743 MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
744 MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false);
747 /// VRRegNo - Map from a numbered VR register to its enum value.
749 static const unsigned short VRRegNo[] = {
750 PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
751 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, PPC::V12, PPC::V13, PPC::V14, PPC::V15,
752 PPC::V16, PPC::V17, PPC::V18, PPC::V19, PPC::V20, PPC::V21, PPC::V22, PPC::V23,
753 PPC::V24, PPC::V25, PPC::V26, PPC::V27, PPC::V28, PPC::V29, PPC::V30, PPC::V31
756 /// RemoveVRSaveCode - We have found that this function does not need any code
757 /// to manipulate the VRSAVE register, even though it uses vector registers.
758 /// This can happen when the only registers used are known to be live in or out
759 /// of the function. Remove all of the VRSAVE related code from the function.
760 static void RemoveVRSaveCode(MachineInstr *MI) {
761 MachineBasicBlock *Entry = MI->getParent();
762 MachineFunction *MF = Entry->getParent();
764 // We know that the MTVRSAVE instruction immediately follows MI. Remove it.
765 MachineBasicBlock::iterator MBBI = MI;
767 assert(MBBI != Entry->end() && MBBI->getOpcode() == PPC::MTVRSAVE);
768 MBBI->eraseFromParent();
770 bool RemovedAllMTVRSAVEs = true;
771 // See if we can find and remove the MTVRSAVE instruction from all of the
773 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) {
774 // If last instruction is a return instruction, add an epilogue
775 if (!I->empty() && I->back().getDesc().isReturn()) {
776 bool FoundIt = false;
777 for (MBBI = I->end(); MBBI != I->begin(); ) {
779 if (MBBI->getOpcode() == PPC::MTVRSAVE) {
780 MBBI->eraseFromParent(); // remove it.
785 RemovedAllMTVRSAVEs &= FoundIt;
789 // If we found and removed all MTVRSAVE instructions, remove the read of
791 if (RemovedAllMTVRSAVEs) {
793 assert(MBBI != Entry->begin() && "UPDATE_VRSAVE is first instr in block?");
795 assert(MBBI->getOpcode() == PPC::MFVRSAVE && "VRSAVE instrs wandered?");
796 MBBI->eraseFromParent();
799 // Finally, nuke the UPDATE_VRSAVE.
800 MI->eraseFromParent();
803 // HandleVRSaveUpdate - MI is the UPDATE_VRSAVE instruction introduced by the
804 // instruction selector. Based on the vector registers that have been used,
805 // transform this into the appropriate ORI instruction.
806 static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) {
807 MachineFunction *MF = MI->getParent()->getParent();
809 unsigned UsedRegMask = 0;
810 for (unsigned i = 0; i != 32; ++i)
811 if (MF->getRegInfo().isPhysRegUsed(VRRegNo[i]))
812 UsedRegMask |= 1 << (31-i);
814 // Live in and live out values already must be in the mask, so don't bother
816 for (MachineRegisterInfo::livein_iterator
817 I = MF->getRegInfo().livein_begin(),
818 E = MF->getRegInfo().livein_end(); I != E; ++I) {
819 unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(I->first);
820 if (VRRegNo[RegNo] == I->first) // If this really is a vector reg.
821 UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
823 for (MachineRegisterInfo::liveout_iterator
824 I = MF->getRegInfo().liveout_begin(),
825 E = MF->getRegInfo().liveout_end(); I != E; ++I) {
826 unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(*I);
827 if (VRRegNo[RegNo] == *I) // If this really is a vector reg.
828 UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
831 // If no registers are used, turn this into a copy.
832 if (UsedRegMask == 0) {
833 // Remove all VRSAVE code.
834 RemoveVRSaveCode(MI);
838 unsigned SrcReg = MI->getOperand(1).getReg();
839 unsigned DstReg = MI->getOperand(0).getReg();
841 if ((UsedRegMask & 0xFFFF) == UsedRegMask) {
842 if (DstReg != SrcReg)
843 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORI), DstReg)
845 .addImm(UsedRegMask);
847 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORI), DstReg)
848 .addReg(SrcReg, false, false, true)
849 .addImm(UsedRegMask);
850 } else if ((UsedRegMask & 0xFFFF0000) == UsedRegMask) {
851 if (DstReg != SrcReg)
852 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
854 .addImm(UsedRegMask >> 16);
856 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
857 .addReg(SrcReg, false, false, true)
858 .addImm(UsedRegMask >> 16);
860 if (DstReg != SrcReg)
861 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
863 .addImm(UsedRegMask >> 16);
865 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORIS), DstReg)
866 .addReg(SrcReg, false, false, true)
867 .addImm(UsedRegMask >> 16);
869 BuildMI(*MI->getParent(), MI, TII.get(PPC::ORI), DstReg)
870 .addReg(DstReg, false, false, true)
871 .addImm(UsedRegMask & 0xFFFF);
874 // Remove the old UPDATE_VRSAVE instruction.
875 MI->eraseFromParent();
878 /// determineFrameLayout - Determine the size of the frame and maximum call
880 void PPCRegisterInfo::determineFrameLayout(MachineFunction &MF) const {
881 MachineFrameInfo *MFI = MF.getFrameInfo();
883 // Get the number of bytes to allocate from the FrameInfo
884 unsigned FrameSize = MFI->getStackSize();
886 // Get the alignments provided by the target, and the maximum alignment
887 // (if any) of the fixed frame objects.
888 unsigned MaxAlign = MFI->getMaxAlignment();
889 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
890 unsigned AlignMask = TargetAlign - 1; //
892 // If we are a leaf function, and use up to 224 bytes of stack space,
893 // don't have a frame pointer, calls, or dynamic alloca then we do not need
894 // to adjust the stack pointer (we fit in the Red Zone).
895 if (FrameSize <= 224 && // Fits in red zone.
896 !MFI->hasVarSizedObjects() && // No dynamic alloca.
897 !MFI->hasCalls() && // No calls.
898 (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment.
900 MFI->setStackSize(0);
904 // Get the maximum call frame size of all the calls.
905 unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
907 // Maximum call frame needs to be at least big enough for linkage and 8 args.
908 unsigned minCallFrameSize =
909 PPCFrameInfo::getMinCallFrameSize(Subtarget.isPPC64(),
910 Subtarget.isMachoABI());
911 maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
913 // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
914 // that allocations will be aligned.
915 if (MFI->hasVarSizedObjects())
916 maxCallFrameSize = (maxCallFrameSize + AlignMask) & ~AlignMask;
918 // Update maximum call frame size.
919 MFI->setMaxCallFrameSize(maxCallFrameSize);
921 // Include call frame size in total.
922 FrameSize += maxCallFrameSize;
924 // Make sure the frame is aligned.
925 FrameSize = (FrameSize + AlignMask) & ~AlignMask;
927 // Update frame info.
928 MFI->setStackSize(FrameSize);
932 PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
933 RegScavenger *RS) const {
934 // Save and clear the LR state.
935 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
936 unsigned LR = getRARegister();
937 FI->setUsesLR(MF.getRegInfo().isPhysRegUsed(LR));
938 MF.getRegInfo().setPhysRegUnused(LR);
940 // Save R31 if necessary
941 int FPSI = FI->getFramePointerSaveIndex();
942 bool IsPPC64 = Subtarget.isPPC64();
943 bool IsELF32_ABI = Subtarget.isELF32_ABI();
944 bool IsMachoABI = Subtarget.isMachoABI();
945 MachineFrameInfo *MFI = MF.getFrameInfo();
947 // If the frame pointer save index hasn't been defined yet.
948 if (!FPSI && (NoFramePointerElim || MFI->hasVarSizedObjects()) &&
950 // Find out what the fix offset of the frame pointer save area.
951 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64,
953 // Allocate the frame index for frame pointer save area.
954 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset);
956 FI->setFramePointerSaveIndex(FPSI);
959 // Reserve stack space to move the linkage area to in case of a tail call.
961 if (PerformTailCallOpt && (TCSPDelta=FI->getTailCallSPDelta()) < 0) {
962 int AddFPOffsetAmount = IsELF32_ABI ? -4 : 0;
963 MF.getFrameInfo()->CreateFixedObject( -1 * TCSPDelta,
964 AddFPOffsetAmount + TCSPDelta);
966 // Reserve a slot closest to SP or frame pointer if we have a dynalloc or
967 // a large stack, which will require scavenging a register to materialize a
969 // FIXME: this doesn't actually check stack size, so is a bit pessimistic
970 // FIXME: doesn't detect whether or not we need to spill vXX, which requires
973 if (EnableRegisterScavenging) // FIXME (64-bit): Enable.
974 if (needsFP(MF) || spillsCR(MF)) {
975 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
976 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
977 const TargetRegisterClass *RC = IsPPC64 ? G8RC : GPRC;
978 RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
979 RC->getAlignment()));
984 PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
985 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
986 MachineBasicBlock::iterator MBBI = MBB.begin();
987 MachineFrameInfo *MFI = MF.getFrameInfo();
988 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
989 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) ||
990 !MF.getFunction()->doesNotThrow() ||
991 UnwindTablesMandatory;
993 // Prepare for frame info.
994 unsigned FrameLabelId = 0;
996 // Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
998 for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) {
999 if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) {
1000 HandleVRSaveUpdate(MBBI, TII);
1005 // Move MBBI back to the beginning of the function.
1008 // Work out frame sizes.
1009 determineFrameLayout(MF);
1010 unsigned FrameSize = MFI->getStackSize();
1012 int NegFrameSize = -FrameSize;
1014 // Get processor type.
1015 bool IsPPC64 = Subtarget.isPPC64();
1016 // Get operating system
1017 bool IsMachoABI = Subtarget.isMachoABI();
1018 // Check if the link register (LR) has been used.
1019 bool UsesLR = MustSaveLR(MF);
1020 // Do we have a frame pointer for this function?
1021 bool HasFP = hasFP(MF) && FrameSize;
1023 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, IsMachoABI);
1024 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI);
1028 BuildMI(MBB, MBBI, TII.get(PPC::MFLR8), PPC::X0);
1031 BuildMI(MBB, MBBI, TII.get(PPC::STD))
1037 BuildMI(MBB, MBBI, TII.get(PPC::STD))
1039 .addImm(LROffset / 4)
1043 BuildMI(MBB, MBBI, TII.get(PPC::MFLR), PPC::R0);
1046 BuildMI(MBB, MBBI, TII.get(PPC::STW))
1052 BuildMI(MBB, MBBI, TII.get(PPC::STW))
1058 // Skip if a leaf routine.
1059 if (!FrameSize) return;
1061 // Get stack alignments.
1062 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1063 unsigned MaxAlign = MFI->getMaxAlignment();
1065 if (needsFrameMoves) {
1066 // Mark effective beginning of when frame pointer becomes valid.
1067 FrameLabelId = MMI->NextLabelID();
1068 BuildMI(MBB, MBBI, TII.get(PPC::DBG_LABEL)).addImm(FrameLabelId);
1071 // Adjust stack pointer: r1 += NegFrameSize.
1072 // If there is a preferred stack alignment, align R1 now
1075 if (ALIGN_STACK && MaxAlign > TargetAlign) {
1076 assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
1077 assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
1079 BuildMI(MBB, MBBI, TII.get(PPC::RLWINM), PPC::R0)
1082 .addImm(32 - Log2_32(MaxAlign))
1084 BuildMI(MBB, MBBI, TII.get(PPC::SUBFIC) ,PPC::R0)
1085 .addReg(PPC::R0, false, false, true)
1086 .addImm(NegFrameSize);
1087 BuildMI(MBB, MBBI, TII.get(PPC::STWUX))
1091 } else if (isInt16(NegFrameSize)) {
1092 BuildMI(MBB, MBBI, TII.get(PPC::STWU), PPC::R1)
1094 .addImm(NegFrameSize)
1097 BuildMI(MBB, MBBI, TII.get(PPC::LIS), PPC::R0)
1098 .addImm(NegFrameSize >> 16);
1099 BuildMI(MBB, MBBI, TII.get(PPC::ORI), PPC::R0)
1100 .addReg(PPC::R0, false, false, true)
1101 .addImm(NegFrameSize & 0xFFFF);
1102 BuildMI(MBB, MBBI, TII.get(PPC::STWUX))
1108 if (ALIGN_STACK && MaxAlign > TargetAlign) {
1109 assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
1110 assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
1112 BuildMI(MBB, MBBI, TII.get(PPC::RLDICL), PPC::X0)
1115 .addImm(64 - Log2_32(MaxAlign));
1116 BuildMI(MBB, MBBI, TII.get(PPC::SUBFIC8), PPC::X0)
1118 .addImm(NegFrameSize);
1119 BuildMI(MBB, MBBI, TII.get(PPC::STDUX))
1123 } else if (isInt16(NegFrameSize)) {
1124 BuildMI(MBB, MBBI, TII.get(PPC::STDU), PPC::X1)
1126 .addImm(NegFrameSize / 4)
1129 BuildMI(MBB, MBBI, TII.get(PPC::LIS8), PPC::X0)
1130 .addImm(NegFrameSize >> 16);
1131 BuildMI(MBB, MBBI, TII.get(PPC::ORI8), PPC::X0)
1132 .addReg(PPC::X0, false, false, true)
1133 .addImm(NegFrameSize & 0xFFFF);
1134 BuildMI(MBB, MBBI, TII.get(PPC::STDUX))
1141 if (needsFrameMoves) {
1142 std::vector<MachineMove> &Moves = MMI->getFrameMoves();
1145 // Show update of SP.
1146 MachineLocation SPDst(MachineLocation::VirtualFP);
1147 MachineLocation SPSrc(MachineLocation::VirtualFP, NegFrameSize);
1148 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
1150 MachineLocation SP(IsPPC64 ? PPC::X31 : PPC::R31);
1151 Moves.push_back(MachineMove(FrameLabelId, SP, SP));
1155 MachineLocation FPDst(MachineLocation::VirtualFP, FPOffset);
1156 MachineLocation FPSrc(IsPPC64 ? PPC::X31 : PPC::R31);
1157 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
1160 // Add callee saved registers to move list.
1161 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1162 for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
1163 int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
1164 unsigned Reg = CSI[I].getReg();
1165 if (Reg == PPC::LR || Reg == PPC::LR8) continue;
1166 MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
1167 MachineLocation CSSrc(Reg);
1168 Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc));
1171 MachineLocation LRDst(MachineLocation::VirtualFP, LROffset);
1172 MachineLocation LRSrc(IsPPC64 ? PPC::LR8 : PPC::LR);
1173 Moves.push_back(MachineMove(FrameLabelId, LRDst, LRSrc));
1175 // Mark effective beginning of when frame pointer is ready.
1176 unsigned ReadyLabelId = MMI->NextLabelID();
1177 BuildMI(MBB, MBBI, TII.get(PPC::DBG_LABEL)).addImm(ReadyLabelId);
1179 MachineLocation FPDst(HasFP ? (IsPPC64 ? PPC::X31 : PPC::R31) :
1180 (IsPPC64 ? PPC::X1 : PPC::R1));
1181 MachineLocation FPSrc(MachineLocation::VirtualFP);
1182 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc));
1185 // If there is a frame pointer, copy R1 into R31
1188 BuildMI(MBB, MBBI, TII.get(PPC::OR), PPC::R31)
1192 BuildMI(MBB, MBBI, TII.get(PPC::OR8), PPC::X31)
1199 void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
1200 MachineBasicBlock &MBB) const {
1201 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1202 unsigned RetOpcode = MBBI->getOpcode();
1204 assert( (RetOpcode == PPC::BLR ||
1205 RetOpcode == PPC::TCRETURNri ||
1206 RetOpcode == PPC::TCRETURNdi ||
1207 RetOpcode == PPC::TCRETURNai ||
1208 RetOpcode == PPC::TCRETURNri8 ||
1209 RetOpcode == PPC::TCRETURNdi8 ||
1210 RetOpcode == PPC::TCRETURNai8) &&
1211 "Can only insert epilog into returning blocks");
1213 // Get alignment info so we know how to restore r1
1214 const MachineFrameInfo *MFI = MF.getFrameInfo();
1215 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
1216 unsigned MaxAlign = MFI->getMaxAlignment();
1218 // Get the number of bytes allocated from the FrameInfo.
1219 int FrameSize = MFI->getStackSize();
1221 // Get processor type.
1222 bool IsPPC64 = Subtarget.isPPC64();
1223 // Get operating system
1224 bool IsMachoABI = Subtarget.isMachoABI();
1225 // Check if the link register (LR) has been used.
1226 bool UsesLR = MustSaveLR(MF);
1227 // Do we have a frame pointer for this function?
1228 bool HasFP = hasFP(MF) && FrameSize;
1230 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, IsMachoABI);
1231 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI);
1233 bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
1234 RetOpcode == PPC::TCRETURNdi ||
1235 RetOpcode == PPC::TCRETURNai ||
1236 RetOpcode == PPC::TCRETURNri8 ||
1237 RetOpcode == PPC::TCRETURNdi8 ||
1238 RetOpcode == PPC::TCRETURNai8;
1240 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1243 int MaxTCRetDelta = FI->getTailCallSPDelta();
1244 MachineOperand &StackAdjust = MBBI->getOperand(1);
1245 assert( StackAdjust.isImmediate() && "Expecting immediate value.");
1246 // Adjust stack pointer.
1247 int StackAdj = StackAdjust.getImm();
1248 int Delta = StackAdj - MaxTCRetDelta;
1249 assert((Delta >= 0) && "Delta must be positive");
1250 if (MaxTCRetDelta>0)
1251 FrameSize += (StackAdj +Delta);
1253 FrameSize += StackAdj;
1257 // The loaded (or persistent) stack pointer value is offset by the 'stwu'
1258 // on entry to the function. Add this offset back now.
1260 // If this function contained a fastcc call and PerformTailCallOpt is
1261 // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
1262 // call which invalidates the stack pointer value in SP(0). So we use the
1263 // value of R31 in this case.
1264 if (FI->hasFastCall() && isInt16(FrameSize)) {
1265 assert(hasFP(MF) && "Expecting a valid the frame pointer.");
1266 BuildMI(MBB, MBBI, TII.get(PPC::ADDI), PPC::R1)
1267 .addReg(PPC::R31).addImm(FrameSize);
1268 } else if(FI->hasFastCall()) {
1269 BuildMI(MBB, MBBI, TII.get(PPC::LIS), PPC::R0)
1270 .addImm(FrameSize >> 16);
1271 BuildMI(MBB, MBBI, TII.get(PPC::ORI), PPC::R0)
1272 .addReg(PPC::R0, false, false, true)
1273 .addImm(FrameSize & 0xFFFF);
1274 BuildMI(MBB, MBBI, TII.get(PPC::ADD4))
1278 } else if (isInt16(FrameSize) &&
1279 (!ALIGN_STACK || TargetAlign >= MaxAlign) &&
1280 !MFI->hasVarSizedObjects()) {
1281 BuildMI(MBB, MBBI, TII.get(PPC::ADDI), PPC::R1)
1282 .addReg(PPC::R1).addImm(FrameSize);
1284 BuildMI(MBB, MBBI, TII.get(PPC::LWZ),PPC::R1).addImm(0).addReg(PPC::R1);
1287 if (FI->hasFastCall() && isInt16(FrameSize)) {
1288 assert(hasFP(MF) && "Expecting a valid the frame pointer.");
1289 BuildMI(MBB, MBBI, TII.get(PPC::ADDI8), PPC::X1)
1290 .addReg(PPC::X31).addImm(FrameSize);
1291 } else if(FI->hasFastCall()) {
1292 BuildMI(MBB, MBBI, TII.get(PPC::LIS8), PPC::X0)
1293 .addImm(FrameSize >> 16);
1294 BuildMI(MBB, MBBI, TII.get(PPC::ORI8), PPC::X0)
1295 .addReg(PPC::X0, false, false, true)
1296 .addImm(FrameSize & 0xFFFF);
1297 BuildMI(MBB, MBBI, TII.get(PPC::ADD8))
1301 } else if (isInt16(FrameSize) && TargetAlign >= MaxAlign &&
1302 !MFI->hasVarSizedObjects()) {
1303 BuildMI(MBB, MBBI, TII.get(PPC::ADDI8), PPC::X1)
1304 .addReg(PPC::X1).addImm(FrameSize);
1306 BuildMI(MBB, MBBI, TII.get(PPC::LD), PPC::X1).addImm(0).addReg(PPC::X1);
1313 BuildMI(MBB, MBBI, TII.get(PPC::LD), PPC::X0)
1314 .addImm(LROffset/4).addReg(PPC::X1);
1317 BuildMI(MBB, MBBI, TII.get(PPC::LD), PPC::X31)
1318 .addImm(FPOffset/4).addReg(PPC::X1);
1321 BuildMI(MBB, MBBI, TII.get(PPC::MTLR8)).addReg(PPC::X0);
1324 BuildMI(MBB, MBBI, TII.get(PPC::LWZ), PPC::R0)
1325 .addImm(LROffset).addReg(PPC::R1);
1328 BuildMI(MBB, MBBI, TII.get(PPC::LWZ), PPC::R31)
1329 .addImm(FPOffset).addReg(PPC::R1);
1332 BuildMI(MBB, MBBI, TII.get(PPC::MTLR)).addReg(PPC::R0);
1335 // Callee pop calling convention. Pop parameter/linkage area. Used for tail
1336 // call optimization
1337 if (PerformTailCallOpt && RetOpcode == PPC::BLR &&
1338 MF.getFunction()->getCallingConv() == CallingConv::Fast) {
1339 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1340 unsigned CallerAllocatedAmt = FI->getMinReservedArea();
1341 unsigned StackReg = IsPPC64 ? PPC::X1 : PPC::R1;
1342 unsigned FPReg = IsPPC64 ? PPC::X31 : PPC::R31;
1343 unsigned TmpReg = IsPPC64 ? PPC::X0 : PPC::R0;
1344 unsigned ADDIInstr = IsPPC64 ? PPC::ADDI8 : PPC::ADDI;
1345 unsigned ADDInstr = IsPPC64 ? PPC::ADD8 : PPC::ADD4;
1346 unsigned LISInstr = IsPPC64 ? PPC::LIS8 : PPC::LIS;
1347 unsigned ORIInstr = IsPPC64 ? PPC::ORI8 : PPC::ORI;
1349 if (CallerAllocatedAmt && isInt16(CallerAllocatedAmt)) {
1350 BuildMI(MBB, MBBI, TII.get(ADDIInstr), StackReg)
1351 .addReg(StackReg).addImm(CallerAllocatedAmt);
1353 BuildMI(MBB, MBBI, TII.get(LISInstr), TmpReg)
1354 .addImm(CallerAllocatedAmt >> 16);
1355 BuildMI(MBB, MBBI, TII.get(ORIInstr), TmpReg)
1356 .addReg(TmpReg, false, false, true)
1357 .addImm(CallerAllocatedAmt & 0xFFFF);
1358 BuildMI(MBB, MBBI, TII.get(ADDInstr))
1363 } else if (RetOpcode == PPC::TCRETURNdi) {
1364 MBBI = prior(MBB.end());
1365 MachineOperand &JumpTarget = MBBI->getOperand(0);
1366 BuildMI(MBB, MBBI, TII.get(PPC::TAILB)).
1367 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1368 } else if (RetOpcode == PPC::TCRETURNri) {
1369 MBBI = prior(MBB.end());
1370 MachineOperand &JumpTarget = MBBI->getOperand(0);
1371 assert(JumpTarget.isRegister() && "Expecting register operand.");
1372 BuildMI(MBB, MBBI, TII.get(PPC::TAILBCTR));
1373 } else if (RetOpcode == PPC::TCRETURNai) {
1374 MBBI = prior(MBB.end());
1375 MachineOperand &JumpTarget = MBBI->getOperand(0);
1376 BuildMI(MBB, MBBI, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
1377 } else if (RetOpcode == PPC::TCRETURNdi8) {
1378 MBBI = prior(MBB.end());
1379 MachineOperand &JumpTarget = MBBI->getOperand(0);
1380 BuildMI(MBB, MBBI, TII.get(PPC::TAILB8)).
1381 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1382 } else if (RetOpcode == PPC::TCRETURNri8) {
1383 MBBI = prior(MBB.end());
1384 MachineOperand &JumpTarget = MBBI->getOperand(0);
1385 assert(JumpTarget.isRegister() && "Expecting register operand.");
1386 BuildMI(MBB, MBBI, TII.get(PPC::TAILBCTR8));
1387 } else if (RetOpcode == PPC::TCRETURNai8) {
1388 MBBI = prior(MBB.end());
1389 MachineOperand &JumpTarget = MBBI->getOperand(0);
1390 BuildMI(MBB, MBBI, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
1394 unsigned PPCRegisterInfo::getRARegister() const {
1395 return !Subtarget.isPPC64() ? PPC::LR : PPC::LR8;
1398 unsigned PPCRegisterInfo::getFrameRegister(MachineFunction &MF) const {
1399 if (!Subtarget.isPPC64())
1400 return hasFP(MF) ? PPC::R31 : PPC::R1;
1402 return hasFP(MF) ? PPC::X31 : PPC::X1;
1405 void PPCRegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves)
1407 // Initial state of the frame pointer is R1.
1408 MachineLocation Dst(MachineLocation::VirtualFP);
1409 MachineLocation Src(PPC::R1, 0);
1410 Moves.push_back(MachineMove(0, Dst, Src));
1413 unsigned PPCRegisterInfo::getEHExceptionRegister() const {
1414 return !Subtarget.isPPC64() ? PPC::R3 : PPC::X3;
1417 unsigned PPCRegisterInfo::getEHHandlerRegister() const {
1418 return !Subtarget.isPPC64() ? PPC::R4 : PPC::X4;
1421 int PPCRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
1422 // FIXME: Most probably dwarf numbers differs for Linux and Darwin
1423 return PPCGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
1426 #include "PPCGenRegisterInfo.inc"