1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Target/TargetFrameInfo.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/STLExtras.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/ErrorHandling.h"
44 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
45 const TargetInstrInfo &tii)
46 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
47 X86::ADJCALLSTACKDOWN64 :
48 X86::ADJCALLSTACKDOWN32,
49 tm.getSubtarget<X86Subtarget>().is64Bit() ?
50 X86::ADJCALLSTACKUP64 :
51 X86::ADJCALLSTACKUP32),
53 // Cache some information.
54 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
55 Is64Bit = Subtarget->is64Bit();
56 IsWin64 = Subtarget->isTargetWin64();
57 StackAlign = TM.getFrameInfo()->getStackAlignment();
70 /// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF
71 /// specific numbering, used in debug info and exception tables.
72 int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
73 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
74 unsigned Flavour = DWARFFlavour::X86_64;
76 if (!Subtarget->is64Bit()) {
77 if (Subtarget->isTargetDarwin()) {
79 Flavour = DWARFFlavour::X86_32_DarwinEH;
81 Flavour = DWARFFlavour::X86_32_Generic;
82 } else if (Subtarget->isTargetCygMing()) {
83 // Unsupported by now, just quick fallback
84 Flavour = DWARFFlavour::X86_32_Generic;
86 Flavour = DWARFFlavour::X86_32_Generic;
90 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour);
93 /// getX86RegNum - This function maps LLVM register identifiers to their X86
94 /// specific numbering, which is used in various places encoding instructions.
95 unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
97 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
98 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
99 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
100 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
101 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
103 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
105 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
107 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
110 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
112 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
114 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
116 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
118 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
120 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
122 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
124 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
127 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
128 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
129 return RegNo-X86::ST0;
131 case X86::XMM0: case X86::XMM8: case X86::MM0:
133 case X86::XMM1: case X86::XMM9: case X86::MM1:
135 case X86::XMM2: case X86::XMM10: case X86::MM2:
137 case X86::XMM3: case X86::XMM11: case X86::MM3:
139 case X86::XMM4: case X86::XMM12: case X86::MM4:
141 case X86::XMM5: case X86::XMM13: case X86::MM5:
143 case X86::XMM6: case X86::XMM14: case X86::MM6:
145 case X86::XMM7: case X86::XMM15: case X86::MM7:
149 assert(isVirtualRegister(RegNo) && "Unknown physical register!");
150 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
155 const TargetRegisterClass *
156 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
157 const TargetRegisterClass *B,
158 unsigned SubIdx) const {
163 if (B == &X86::GR8RegClass) {
164 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8)
166 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) {
167 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
168 A == &X86::GR64_NOREXRegClass ||
169 A == &X86::GR64_NOSPRegClass ||
170 A == &X86::GR64_NOREX_NOSPRegClass)
171 return &X86::GR64_ABCDRegClass;
172 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
173 A == &X86::GR32_NOREXRegClass ||
174 A == &X86::GR32_NOSPRegClass)
175 return &X86::GR32_ABCDRegClass;
176 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
177 A == &X86::GR16_NOREXRegClass)
178 return &X86::GR16_ABCDRegClass;
179 } else if (B == &X86::GR8_NOREXRegClass) {
180 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
181 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
182 return &X86::GR64_NOREXRegClass;
183 else if (A == &X86::GR64_ABCDRegClass)
184 return &X86::GR64_ABCDRegClass;
185 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
186 A == &X86::GR32_NOSPRegClass)
187 return &X86::GR32_NOREXRegClass;
188 else if (A == &X86::GR32_ABCDRegClass)
189 return &X86::GR32_ABCDRegClass;
190 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass)
191 return &X86::GR16_NOREXRegClass;
192 else if (A == &X86::GR16_ABCDRegClass)
193 return &X86::GR16_ABCDRegClass;
194 } else if (B == &X86::FR32RegClass) {
200 if (B == &X86::GR8_ABCD_HRegClass) {
201 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
202 A == &X86::GR64_NOREXRegClass ||
203 A == &X86::GR64_NOSPRegClass ||
204 A == &X86::GR64_NOREX_NOSPRegClass)
205 return &X86::GR64_ABCDRegClass;
206 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
207 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
208 return &X86::GR32_ABCDRegClass;
209 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
210 A == &X86::GR16_NOREXRegClass)
211 return &X86::GR16_ABCDRegClass;
212 } else if (B == &X86::FR64RegClass) {
218 if (B == &X86::GR16RegClass) {
219 if (A->getSize() == 4 || A->getSize() == 8)
221 } else if (B == &X86::GR16_ABCDRegClass) {
222 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
223 A == &X86::GR64_NOREXRegClass ||
224 A == &X86::GR64_NOSPRegClass ||
225 A == &X86::GR64_NOREX_NOSPRegClass)
226 return &X86::GR64_ABCDRegClass;
227 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
228 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
229 return &X86::GR32_ABCDRegClass;
230 } else if (B == &X86::GR16_NOREXRegClass) {
231 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
232 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
233 return &X86::GR64_NOREXRegClass;
234 else if (A == &X86::GR64_ABCDRegClass)
235 return &X86::GR64_ABCDRegClass;
236 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
237 A == &X86::GR32_NOSPRegClass)
238 return &X86::GR32_NOREXRegClass;
239 else if (A == &X86::GR32_ABCDRegClass)
240 return &X86::GR64_ABCDRegClass;
241 } else if (B == &X86::VR128RegClass) {
247 if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) {
248 if (A->getSize() == 8)
250 } else if (B == &X86::GR32_ABCDRegClass) {
251 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
252 A == &X86::GR64_NOREXRegClass ||
253 A == &X86::GR64_NOSPRegClass ||
254 A == &X86::GR64_NOREX_NOSPRegClass)
255 return &X86::GR64_ABCDRegClass;
256 } else if (B == &X86::GR32_NOREXRegClass) {
257 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
258 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
259 return &X86::GR64_NOREXRegClass;
260 else if (A == &X86::GR64_ABCDRegClass)
261 return &X86::GR64_ABCDRegClass;
268 const TargetRegisterClass *
269 X86RegisterInfo::getPointerRegClass(unsigned Kind) const {
271 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
272 case 0: // Normal GPRs.
273 if (TM.getSubtarget<X86Subtarget>().is64Bit())
274 return &X86::GR64RegClass;
275 return &X86::GR32RegClass;
276 case 1: // Normal GRPs except the stack pointer (for encoding reasons).
277 if (TM.getSubtarget<X86Subtarget>().is64Bit())
278 return &X86::GR64_NOSPRegClass;
279 return &X86::GR32_NOSPRegClass;
283 const TargetRegisterClass *
284 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
285 if (RC == &X86::CCRRegClass) {
287 return &X86::GR64RegClass;
289 return &X86::GR32RegClass;
295 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
296 bool callsEHReturn = false;
297 bool ghcCall = false;
300 callsEHReturn = MF->getMMI().callsEHReturn();
301 const Function *F = MF->getFunction();
302 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
305 static const unsigned GhcCalleeSavedRegs[] = {
309 static const unsigned CalleeSavedRegs32Bit[] = {
310 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
313 static const unsigned CalleeSavedRegs32EHRet[] = {
314 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
317 static const unsigned CalleeSavedRegs64Bit[] = {
318 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
321 static const unsigned CalleeSavedRegs64EHRet[] = {
322 X86::RAX, X86::RDX, X86::RBX, X86::R12,
323 X86::R13, X86::R14, X86::R15, X86::RBP, 0
326 static const unsigned CalleeSavedRegsWin64[] = {
327 X86::RBX, X86::RBP, X86::RDI, X86::RSI,
328 X86::R12, X86::R13, X86::R14, X86::R15,
329 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
330 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
331 X86::XMM14, X86::XMM15, 0
335 return GhcCalleeSavedRegs;
336 } else if (Is64Bit) {
338 return CalleeSavedRegsWin64;
340 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
342 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
346 const TargetRegisterClass* const*
347 X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
348 bool callsEHReturn = false;
350 callsEHReturn = MF->getMMI().callsEHReturn();
352 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = {
353 &X86::GR32RegClass, &X86::GR32RegClass,
354 &X86::GR32RegClass, &X86::GR32RegClass, 0
356 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = {
357 &X86::GR32RegClass, &X86::GR32RegClass,
358 &X86::GR32RegClass, &X86::GR32RegClass,
359 &X86::GR32RegClass, &X86::GR32RegClass, 0
361 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = {
362 &X86::GR64RegClass, &X86::GR64RegClass,
363 &X86::GR64RegClass, &X86::GR64RegClass,
364 &X86::GR64RegClass, &X86::GR64RegClass, 0
366 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = {
367 &X86::GR64RegClass, &X86::GR64RegClass,
368 &X86::GR64RegClass, &X86::GR64RegClass,
369 &X86::GR64RegClass, &X86::GR64RegClass,
370 &X86::GR64RegClass, &X86::GR64RegClass, 0
372 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = {
373 &X86::GR64RegClass, &X86::GR64RegClass,
374 &X86::GR64RegClass, &X86::GR64RegClass,
375 &X86::GR64RegClass, &X86::GR64RegClass,
376 &X86::GR64RegClass, &X86::GR64RegClass,
377 &X86::VR128RegClass, &X86::VR128RegClass,
378 &X86::VR128RegClass, &X86::VR128RegClass,
379 &X86::VR128RegClass, &X86::VR128RegClass,
380 &X86::VR128RegClass, &X86::VR128RegClass,
381 &X86::VR128RegClass, &X86::VR128RegClass, 0
386 return CalleeSavedRegClassesWin64;
388 return (callsEHReturn ?
389 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit);
391 return (callsEHReturn ?
392 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit);
396 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
397 BitVector Reserved(getNumRegs());
398 // Set the stack-pointer register and its aliases as reserved.
399 Reserved.set(X86::RSP);
400 Reserved.set(X86::ESP);
401 Reserved.set(X86::SP);
402 Reserved.set(X86::SPL);
404 // Set the instruction pointer register and its aliases as reserved.
405 Reserved.set(X86::RIP);
406 Reserved.set(X86::EIP);
407 Reserved.set(X86::IP);
409 // Set the frame-pointer register and its aliases as reserved if needed.
411 Reserved.set(X86::RBP);
412 Reserved.set(X86::EBP);
413 Reserved.set(X86::BP);
414 Reserved.set(X86::BPL);
417 // Mark the x87 stack registers as reserved, since they don't behave normally
418 // with respect to liveness. We don't fully model the effects of x87 stack
419 // pushes and pops after stackification.
420 Reserved.set(X86::ST0);
421 Reserved.set(X86::ST1);
422 Reserved.set(X86::ST2);
423 Reserved.set(X86::ST3);
424 Reserved.set(X86::ST4);
425 Reserved.set(X86::ST5);
426 Reserved.set(X86::ST6);
427 Reserved.set(X86::ST7);
431 //===----------------------------------------------------------------------===//
432 // Stack Frame Processing methods
433 //===----------------------------------------------------------------------===//
435 /// hasFP - Return true if the specified function should have a dedicated frame
436 /// pointer register. This is true if the function has variable sized allocas
437 /// or if frame pointer elimination is disabled.
438 bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
439 const MachineFrameInfo *MFI = MF.getFrameInfo();
440 const MachineModuleInfo &MMI = MF.getMMI();
442 return (DisableFramePointerElim(MF) ||
443 needsStackRealignment(MF) ||
444 MFI->hasVarSizedObjects() ||
445 MFI->isFrameAddressTaken() ||
446 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
447 MMI.callsUnwindInit());
450 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
451 const MachineFrameInfo *MFI = MF.getFrameInfo();
452 return (RealignStack &&
453 !MFI->hasVarSizedObjects());
456 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
457 const MachineFrameInfo *MFI = MF.getFrameInfo();
458 const Function *F = MF.getFunction();
459 bool requiresRealignment =
460 RealignStack && ((MFI->getMaxAlignment() > StackAlign) ||
461 F->hasFnAttr(Attribute::StackAlignment));
463 // FIXME: Currently we don't support stack realignment for functions with
464 // variable-sized allocas.
465 // FIXME: Temporary disable the error - it seems to be too conservative.
466 if (0 && requiresRealignment && MFI->hasVarSizedObjects())
468 "Stack realignment in presense of dynamic allocas is not supported");
470 return (requiresRealignment && !MFI->hasVarSizedObjects());
473 bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
474 return !MF.getFrameInfo()->hasVarSizedObjects();
477 bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg,
478 int &FrameIdx) const {
479 if (Reg == FramePtr && hasFP(MF)) {
480 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
487 X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
488 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
489 const MachineFrameInfo *MFI = MF.getFrameInfo();
490 int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea();
491 uint64_t StackSize = MFI->getStackSize();
493 if (needsStackRealignment(MF)) {
495 // Skip the saved EBP.
498 unsigned Align = MFI->getObjectAlignment(FI);
499 assert((-(Offset + StackSize)) % Align == 0);
501 return Offset + StackSize;
503 // FIXME: Support tail calls
506 return Offset + StackSize;
508 // Skip the saved EBP.
511 // Skip the RETADDR move area
512 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
513 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
514 if (TailCallReturnAddrDelta < 0)
515 Offset -= TailCallReturnAddrDelta;
521 void X86RegisterInfo::
522 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
523 MachineBasicBlock::iterator I) const {
524 if (!hasReservedCallFrame(MF)) {
525 // If the stack pointer can be changed after prologue, turn the
526 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
527 // adjcallstackdown instruction into 'add ESP, <amt>'
528 // TODO: consider using push / pop instead of sub + store / add
529 MachineInstr *Old = I;
530 uint64_t Amount = Old->getOperand(0).getImm();
532 // We need to keep the stack aligned properly. To do this, we round the
533 // amount of space needed for the outgoing arguments up to the next
534 // alignment boundary.
535 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
537 MachineInstr *New = 0;
538 if (Old->getOpcode() == getCallFrameSetupOpcode()) {
539 New = BuildMI(MF, Old->getDebugLoc(),
540 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri),
545 assert(Old->getOpcode() == getCallFrameDestroyOpcode());
547 // Factor out the amount the callee already popped.
548 uint64_t CalleeAmt = Old->getOperand(1).getImm();
552 unsigned Opc = (Amount < 128) ?
553 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
554 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
555 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr)
562 // The EFLAGS implicit def is dead.
563 New->getOperand(3).setIsDead();
565 // Replace the pseudo instruction with a new instruction.
569 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) {
570 // If we are performing frame pointer elimination and if the callee pops
571 // something off the stack pointer, add it back. We do this until we have
572 // more advanced stack pointer tracking ability.
573 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) {
574 unsigned Opc = (CalleeAmt < 128) ?
575 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
576 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
577 MachineInstr *Old = I;
579 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc),
584 // The EFLAGS implicit def is dead.
585 New->getOperand(3).setIsDead();
594 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
595 int SPAdj, FrameIndexValue *Value,
596 RegScavenger *RS) const{
597 assert(SPAdj == 0 && "Unexpected");
600 MachineInstr &MI = *II;
601 MachineFunction &MF = *MI.getParent()->getParent();
603 while (!MI.getOperand(i).isFI()) {
605 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
608 int FrameIndex = MI.getOperand(i).getIndex();
611 unsigned Opc = MI.getOpcode();
612 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
613 if (needsStackRealignment(MF))
614 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
618 BasePtr = (hasFP(MF) ? FramePtr : StackPtr);
620 // This must be part of a four operand memory reference. Replace the
621 // FrameIndex with base register with EBP. Add an offset to the offset.
622 MI.getOperand(i).ChangeToRegister(BasePtr, false);
624 // Now add the frame object offset to the offset from EBP.
627 // Tail call jmp happens after FP is popped.
628 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
629 const MachineFrameInfo *MFI = MF.getFrameInfo();
630 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI.getOffsetOfLocalArea();
632 FIOffset = getFrameIndexOffset(MF, FrameIndex);
634 if (MI.getOperand(i+3).isImm()) {
635 // Offset is a 32-bit integer.
636 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm());
637 MI.getOperand(i + 3).ChangeToImmediate(Offset);
639 // Offset is symbolic. This is extremely rare.
640 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
641 MI.getOperand(i+3).setOffset(Offset);
647 X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
648 RegScavenger *RS) const {
649 MachineFrameInfo *MFI = MF.getFrameInfo();
651 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
652 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
654 if (TailCallReturnAddrDelta < 0) {
655 // create RETURNADDR area
664 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
665 (-1U*SlotSize)+TailCallReturnAddrDelta,
670 assert((TailCallReturnAddrDelta <= 0) &&
671 "The Delta should always be zero or negative");
672 const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
674 // Create a frame entry for the EBP register that must be saved.
675 int FrameIdx = MFI->CreateFixedObject(SlotSize,
677 TFI.getOffsetOfLocalArea() +
678 TailCallReturnAddrDelta,
680 assert(FrameIdx == MFI->getObjectIndexBegin() &&
681 "Slot for EBP register must be last in order to be found!");
686 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
687 /// stack pointer by a constant value.
689 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
690 unsigned StackPtr, int64_t NumBytes, bool Is64Bit,
691 const TargetInstrInfo &TII) {
692 bool isSub = NumBytes < 0;
693 uint64_t Offset = isSub ? -NumBytes : NumBytes;
696 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
697 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri))
699 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
700 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri));
701 uint64_t Chunk = (1LL << 31) - 1;
702 DebugLoc DL = MBB.findDebugLoc(MBBI);
705 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
707 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
710 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
715 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
717 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
718 unsigned StackPtr, uint64_t *NumBytes = NULL) {
719 if (MBBI == MBB.begin()) return;
721 MachineBasicBlock::iterator PI = prior(MBBI);
722 unsigned Opc = PI->getOpcode();
723 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
724 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
725 PI->getOperand(0).getReg() == StackPtr) {
727 *NumBytes += PI->getOperand(2).getImm();
729 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
730 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
731 PI->getOperand(0).getReg() == StackPtr) {
733 *NumBytes -= PI->getOperand(2).getImm();
738 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator.
740 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
741 MachineBasicBlock::iterator &MBBI,
742 unsigned StackPtr, uint64_t *NumBytes = NULL) {
743 // FIXME: THIS ISN'T RUN!!!
746 if (MBBI == MBB.end()) return;
748 MachineBasicBlock::iterator NI = llvm::next(MBBI);
749 if (NI == MBB.end()) return;
751 unsigned Opc = NI->getOpcode();
752 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
753 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
754 NI->getOperand(0).getReg() == StackPtr) {
756 *NumBytes -= NI->getOperand(2).getImm();
759 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
760 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
761 NI->getOperand(0).getReg() == StackPtr) {
763 *NumBytes += NI->getOperand(2).getImm();
769 /// mergeSPUpdates - Checks the instruction before/after the passed
770 /// instruction. If it is an ADD/SUB instruction it is deleted argument and the
771 /// stack adjustment is returned as a positive value for ADD and a negative for
773 static int mergeSPUpdates(MachineBasicBlock &MBB,
774 MachineBasicBlock::iterator &MBBI,
776 bool doMergeWithPrevious) {
777 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
778 (!doMergeWithPrevious && MBBI == MBB.end()))
781 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
782 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI);
783 unsigned Opc = PI->getOpcode();
786 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
787 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
788 PI->getOperand(0).getReg() == StackPtr){
789 Offset += PI->getOperand(2).getImm();
791 if (!doMergeWithPrevious) MBBI = NI;
792 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
793 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
794 PI->getOperand(0).getReg() == StackPtr) {
795 Offset -= PI->getOperand(2).getImm();
797 if (!doMergeWithPrevious) MBBI = NI;
803 void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
805 unsigned FramePtr) const {
806 MachineFrameInfo *MFI = MF.getFrameInfo();
807 MachineModuleInfo &MMI = MF.getMMI();
809 // Add callee saved registers to move list.
810 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
811 if (CSI.empty()) return;
813 std::vector<MachineMove> &Moves = MMI.getFrameMoves();
814 const TargetData *TD = MF.getTarget().getTargetData();
815 bool HasFP = hasFP(MF);
817 // Calculate amount of bytes used for return address storing.
819 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
820 TargetFrameInfo::StackGrowsUp ?
821 TD->getPointerSize() : -TD->getPointerSize());
823 // FIXME: This is dirty hack. The code itself is pretty mess right now.
824 // It should be rewritten from scratch and generalized sometimes.
826 // Determine maximum offset (minumum due to stack growth).
827 int64_t MaxOffset = 0;
828 for (std::vector<CalleeSavedInfo>::const_iterator
829 I = CSI.begin(), E = CSI.end(); I != E; ++I)
830 MaxOffset = std::min(MaxOffset,
831 MFI->getObjectOffset(I->getFrameIdx()));
833 // Calculate offsets.
834 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
835 for (std::vector<CalleeSavedInfo>::const_iterator
836 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
837 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
838 unsigned Reg = I->getReg();
839 Offset = MaxOffset - Offset + saveAreaOffset;
841 // Don't output a new machine move if we're re-saving the frame
842 // pointer. This happens when the PrologEpilogInserter has inserted an extra
843 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
844 // generates one when frame pointers are used. If we generate a "machine
845 // move" for this extra "PUSH", the linker will lose track of the fact that
846 // the frame pointer should have the value of the first "PUSH" when it's
849 // FIXME: This looks inelegant. It's possibly correct, but it's covering up
850 // another bug. I.e., one where we generate a prolog like this:
858 // The immediate re-push of EBP is unnecessary. At the least, it's an
859 // optimization bug. EBP can be used as a scratch register in certain
860 // cases, but probably not when we have a frame pointer.
861 if (HasFP && FramePtr == Reg)
864 MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
865 MachineLocation CSSrc(Reg);
866 Moves.push_back(MachineMove(Label, CSDst, CSSrc));
870 /// emitPrologue - Push callee-saved registers onto the stack, which
871 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
872 /// space for local variables. Also emit labels used by the exception handler to
873 /// generate the exception handling frames.
874 void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
875 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
876 MachineBasicBlock::iterator MBBI = MBB.begin();
877 MachineFrameInfo *MFI = MF.getFrameInfo();
878 const Function *Fn = MF.getFunction();
879 const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
880 MachineModuleInfo &MMI = MF.getMMI();
881 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
882 bool needsFrameMoves = MMI.hasDebugInfo() ||
883 !Fn->doesNotThrow() || UnwindTablesMandatory;
884 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
885 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
886 bool HasFP = hasFP(MF);
889 // Add RETADDR move area to callee saved frame size.
890 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
891 if (TailCallReturnAddrDelta < 0)
892 X86FI->setCalleeSavedFrameSize(
893 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
895 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
896 // function, and use up to 128 bytes of stack space, don't have a frame
897 // pointer, calls, or dynamic alloca then we do not need to adjust the
898 // stack pointer (we fit in the Red Zone).
899 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
900 !needsStackRealignment(MF) &&
901 !MFI->hasVarSizedObjects() && // No dynamic alloca.
902 !MFI->hasCalls() && // No calls.
903 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone
904 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
905 if (HasFP) MinSize += SlotSize;
906 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
907 MFI->setStackSize(StackSize);
908 } else if (Subtarget->isTargetWin64()) {
909 // We need to always allocate 32 bytes as register spill area.
910 // FIXME: We might reuse these 32 bytes for leaf functions.
912 MFI->setStackSize(StackSize);
915 // Insert stack pointer adjustment for later moving of return addr. Only
916 // applies to tail call optimized functions where the callee argument stack
917 // size is bigger than the callers.
918 if (TailCallReturnAddrDelta < 0) {
920 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri),
923 .addImm(-TailCallReturnAddrDelta);
924 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
927 // Mapping for machine moves:
929 // DST: VirtualFP AND
930 // SRC: VirtualFP => DW_CFA_def_cfa_offset
931 // ELSE => DW_CFA_def_cfa
933 // SRC: VirtualFP AND
934 // DST: Register => DW_CFA_def_cfa_register
937 // OFFSET < 0 => DW_CFA_offset_extended_sf
938 // REG < 64 => DW_CFA_offset + Reg
939 // ELSE => DW_CFA_offset_extended
941 std::vector<MachineMove> &Moves = MMI.getFrameMoves();
942 const TargetData *TD = MF.getTarget().getTargetData();
943 uint64_t NumBytes = 0;
944 int stackGrowth = -TD->getPointerSize();
947 // Calculate required stack adjustment.
948 uint64_t FrameSize = StackSize - SlotSize;
949 if (needsStackRealignment(MF))
950 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
952 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
954 // Get the offset of the stack slot for the EBP register, which is
955 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
956 // Update the frame offset adjustment.
957 MFI->setOffsetAdjustment(-NumBytes);
959 // Save EBP/RBP into the appropriate stack slot.
960 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
961 .addReg(FramePtr, RegState::Kill);
963 if (needsFrameMoves) {
964 // Mark the place where EBP/RBP was saved.
965 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
966 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel);
968 // Define the current CFA rule to use the provided offset.
970 MachineLocation SPDst(MachineLocation::VirtualFP);
971 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
972 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
974 // FIXME: Verify & implement for FP
975 MachineLocation SPDst(StackPtr);
976 MachineLocation SPSrc(StackPtr, stackGrowth);
977 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
980 // Change the rule for the FramePtr to be an "offset" rule.
981 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
982 MachineLocation FPSrc(FramePtr);
983 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
986 // Update EBP with the new base value...
987 BuildMI(MBB, MBBI, DL,
988 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
991 if (needsFrameMoves) {
992 // Mark effective beginning of when frame pointer becomes valid.
993 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
994 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel);
996 // Define the current CFA to use the EBP/RBP register.
997 MachineLocation FPDst(FramePtr);
998 MachineLocation FPSrc(MachineLocation::VirtualFP);
999 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
1002 // Mark the FramePtr as live-in in every block except the entry.
1003 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
1005 I->addLiveIn(FramePtr);
1008 if (needsStackRealignment(MF)) {
1010 BuildMI(MBB, MBBI, DL,
1011 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
1012 StackPtr).addReg(StackPtr).addImm(-MaxAlign);
1014 // The EFLAGS implicit def is dead.
1015 MI->getOperand(3).setIsDead();
1018 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1021 // Skip the callee-saved push instructions.
1022 bool PushedRegs = false;
1023 int StackOffset = 2 * stackGrowth;
1025 while (MBBI != MBB.end() &&
1026 (MBBI->getOpcode() == X86::PUSH32r ||
1027 MBBI->getOpcode() == X86::PUSH64r)) {
1031 if (!HasFP && needsFrameMoves) {
1032 // Mark callee-saved push instruction.
1033 MCSymbol *Label = MMI.getContext().CreateTempSymbol();
1034 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label);
1036 // Define the current CFA rule to use the provided offset.
1037 unsigned Ptr = StackSize ?
1038 MachineLocation::VirtualFP : StackPtr;
1039 MachineLocation SPDst(Ptr);
1040 MachineLocation SPSrc(Ptr, StackOffset);
1041 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
1042 StackOffset += stackGrowth;
1046 DL = MBB.findDebugLoc(MBBI);
1048 // Adjust stack pointer: ESP -= numbytes.
1049 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) {
1050 // Check, whether EAX is livein for this function.
1051 bool isEAXAlive = false;
1052 for (MachineRegisterInfo::livein_iterator
1053 II = MF.getRegInfo().livein_begin(),
1054 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) {
1055 unsigned Reg = II->first;
1056 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX ||
1057 Reg == X86::AH || Reg == X86::AL);
1060 // Function prologue calls _alloca to probe the stack when allocating more
1061 // than 4k bytes in one go. Touching the stack at 4K increments is necessary
1062 // to ensure that the guard pages used by the OS virtual memory manager are
1063 // allocated in correct sequence.
1065 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1067 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
1068 .addExternalSymbol("_alloca")
1069 .addReg(StackPtr, RegState::Define | RegState::Implicit);
1072 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1073 .addReg(X86::EAX, RegState::Kill);
1075 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
1076 // allocated bytes for EAX.
1077 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1078 .addImm(NumBytes - 4);
1079 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
1080 .addExternalSymbol("_alloca")
1081 .addReg(StackPtr, RegState::Define | RegState::Implicit);
1084 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
1086 StackPtr, false, NumBytes - 4);
1087 MBB.insert(MBBI, MI);
1089 } else if (NumBytes) {
1090 // If there is an SUB32ri of ESP immediately before this instruction, merge
1091 // the two. This can be the case when tail call elimination is enabled and
1092 // the callee has more arguments then the caller.
1093 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
1095 // If there is an ADD32ri or SUB32ri of ESP immediately after this
1096 // instruction, merge the two instructions.
1097 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
1100 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
1103 if ((NumBytes || PushedRegs) && needsFrameMoves) {
1104 // Mark end of stack pointer adjustment.
1105 MCSymbol *Label = MMI.getContext().CreateTempSymbol();
1106 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label);
1108 if (!HasFP && NumBytes) {
1109 // Define the current CFA rule to use the provided offset.
1111 MachineLocation SPDst(MachineLocation::VirtualFP);
1112 MachineLocation SPSrc(MachineLocation::VirtualFP,
1113 -StackSize + stackGrowth);
1114 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
1116 // FIXME: Verify & implement for FP
1117 MachineLocation SPDst(StackPtr);
1118 MachineLocation SPSrc(StackPtr, stackGrowth);
1119 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
1123 // Emit DWARF info specifying the offsets of the callee-saved registers.
1125 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
1129 void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
1130 MachineBasicBlock &MBB) const {
1131 const MachineFrameInfo *MFI = MF.getFrameInfo();
1132 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1133 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1134 unsigned RetOpcode = MBBI->getOpcode();
1135 DebugLoc DL = MBBI->getDebugLoc();
1137 switch (RetOpcode) {
1139 llvm_unreachable("Can only insert epilog into returning blocks");
1142 case X86::TCRETURNdi:
1143 case X86::TCRETURNri:
1144 case X86::TCRETURNmi:
1145 case X86::TCRETURNdi64:
1146 case X86::TCRETURNri64:
1147 case X86::TCRETURNmi64:
1148 case X86::EH_RETURN:
1149 case X86::EH_RETURN64:
1150 break; // These are ok
1153 // Get the number of bytes to allocate from the FrameInfo.
1154 uint64_t StackSize = MFI->getStackSize();
1155 uint64_t MaxAlign = MFI->getMaxAlignment();
1156 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1157 uint64_t NumBytes = 0;
1160 // Calculate required stack adjustment.
1161 uint64_t FrameSize = StackSize - SlotSize;
1162 if (needsStackRealignment(MF))
1163 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
1165 NumBytes = FrameSize - CSSize;
1168 BuildMI(MBB, MBBI, DL,
1169 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
1171 NumBytes = StackSize - CSSize;
1174 // Skip the callee-saved pop instructions.
1175 MachineBasicBlock::iterator LastCSPop = MBBI;
1176 while (MBBI != MBB.begin()) {
1177 MachineBasicBlock::iterator PI = prior(MBBI);
1178 unsigned Opc = PI->getOpcode();
1180 if (Opc != X86::POP32r && Opc != X86::POP64r &&
1181 !PI->getDesc().isTerminator())
1187 DL = MBBI->getDebugLoc();
1189 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1190 // instruction, merge the two instructions.
1191 if (NumBytes || MFI->hasVarSizedObjects())
1192 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
1194 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1195 // slot before popping them off! Same applies for the case, when stack was
1197 if (needsStackRealignment(MF)) {
1198 // We cannot use LEA here, because stack pointer was realigned. We need to
1199 // deallocate local frame back.
1201 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
1202 MBBI = prior(LastCSPop);
1205 BuildMI(MBB, MBBI, DL,
1206 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1207 StackPtr).addReg(FramePtr);
1208 } else if (MFI->hasVarSizedObjects()) {
1210 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
1212 addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
1213 FramePtr, false, -CSSize);
1214 MBB.insert(MBBI, MI);
1216 BuildMI(MBB, MBBI, DL,
1217 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr)
1220 } else if (NumBytes) {
1221 // Adjust stack pointer back: ESP += numbytes.
1222 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
1225 // We're returning from function via eh_return.
1226 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
1227 MBBI = prior(MBB.end());
1228 MachineOperand &DestAddr = MBBI->getOperand(0);
1229 assert(DestAddr.isReg() && "Offset should be in register!");
1230 BuildMI(MBB, MBBI, DL,
1231 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1232 StackPtr).addReg(DestAddr.getReg());
1233 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
1234 RetOpcode == X86::TCRETURNmi ||
1235 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
1236 RetOpcode == X86::TCRETURNmi64) {
1237 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
1238 // Tail call return: adjust the stack pointer and jump to callee.
1239 MBBI = prior(MBB.end());
1240 MachineOperand &JumpTarget = MBBI->getOperand(0);
1241 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
1242 assert(StackAdjust.isImm() && "Expecting immediate value.");
1244 // Adjust stack pointer.
1245 int StackAdj = StackAdjust.getImm();
1246 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
1248 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
1250 // Incoporate the retaddr area.
1251 Offset = StackAdj-MaxTCDelta;
1252 assert(Offset >= 0 && "Offset should never be negative");
1255 // Check for possible merge with preceeding ADD instruction.
1256 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1257 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII);
1260 // Jump to label or value in register.
1261 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
1262 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
1263 ? X86::TAILJMPd : X86::TAILJMPd64)).
1264 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1265 JumpTarget.getTargetFlags());
1266 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
1267 MachineInstrBuilder MIB =
1268 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
1269 ? X86::TAILJMPm : X86::TAILJMPm64));
1270 for (unsigned i = 0; i != 5; ++i)
1271 MIB.addOperand(MBBI->getOperand(i));
1272 } else if (RetOpcode == X86::TCRETURNri64) {
1273 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
1275 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg());
1278 MachineInstr *NewMI = prior(MBBI);
1279 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
1280 NewMI->addOperand(MBBI->getOperand(i));
1282 // Delete the pseudo instruction TCRETURN.
1284 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
1285 (X86FI->getTCReturnAddrDelta() < 0)) {
1286 // Add the return addr area delta back since we are not tail calling.
1287 int delta = -1*X86FI->getTCReturnAddrDelta();
1288 MBBI = prior(MBB.end());
1290 // Check for possible merge with preceeding ADD instruction.
1291 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1292 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII);
1296 unsigned X86RegisterInfo::getRARegister() const {
1297 return Is64Bit ? X86::RIP // Should have dwarf #16.
1298 : X86::EIP; // Should have dwarf #8.
1301 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1302 return hasFP(MF) ? FramePtr : StackPtr;
1306 X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const {
1307 // Calculate amount of bytes used for return address storing
1308 int stackGrowth = (Is64Bit ? -8 : -4);
1310 // Initial state of the frame pointer is esp+4.
1311 MachineLocation Dst(MachineLocation::VirtualFP);
1312 MachineLocation Src(StackPtr, stackGrowth);
1313 Moves.push_back(MachineMove(0, Dst, Src));
1315 // Add return address to move list
1316 MachineLocation CSDst(StackPtr, stackGrowth);
1317 MachineLocation CSSrc(getRARegister());
1318 Moves.push_back(MachineMove(0, CSDst, CSSrc));
1321 unsigned X86RegisterInfo::getEHExceptionRegister() const {
1322 llvm_unreachable("What is the exception register");
1326 unsigned X86RegisterInfo::getEHHandlerRegister() const {
1327 llvm_unreachable("What is the exception handler register");
1332 unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
1333 switch (VT.getSimpleVT().SimpleTy) {
1334 default: return Reg;
1339 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1341 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1343 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1345 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1351 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1353 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1355 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1357 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1359 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1361 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1363 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1365 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1367 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1369 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1371 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1373 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1375 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1377 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1379 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1381 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1387 default: return Reg;
1388 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1390 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1392 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1394 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1396 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1398 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1400 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1402 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1404 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1406 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1408 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1410 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1412 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1414 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1416 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1418 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1423 default: return Reg;
1424 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1426 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1428 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1430 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1432 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1434 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1436 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1438 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1440 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1442 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1444 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1446 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1448 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1450 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1452 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1454 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1459 default: return Reg;
1460 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1462 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1464 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1466 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1468 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1470 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1472 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1474 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1476 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1478 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1480 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1482 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1484 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1486 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1488 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1490 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1499 #include "X86GenRegisterInfo.inc"
1502 struct MSAH : public MachineFunctionPass {
1504 MSAH() : MachineFunctionPass(&ID) {}
1506 virtual bool runOnMachineFunction(MachineFunction &MF) {
1507 const X86TargetMachine *TM =
1508 static_cast<const X86TargetMachine *>(&MF.getTarget());
1509 const X86RegisterInfo *X86RI = TM->getRegisterInfo();
1510 MachineRegisterInfo &RI = MF.getRegInfo();
1511 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1512 unsigned StackAlignment = X86RI->getStackAlignment();
1514 // Be over-conservative: scan over all vreg defs and find whether vector
1515 // registers are used. If yes, there is a possibility that vector register
1516 // will be spilled and thus require dynamic stack realignment.
1517 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister;
1518 RegNum < RI.getLastVirtReg(); ++RegNum)
1519 if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) {
1520 FuncInfo->setReserveFP(true);
1528 virtual const char *getPassName() const {
1529 return "X86 Maximal Stack Alignment Check";
1532 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
1533 AU.setPreservesCFG();
1534 MachineFunctionPass::getAnalysisUsage(AU);
1542 llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }