1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/Analysis/LibCallSemantics.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/Target/TargetOptions.h"
33 #include "llvm/Support/Debug.h"
38 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
39 unsigned StackAlignOverride)
40 : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
41 STI.is64Bit() ? -8 : -4),
42 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
43 // Cache a bunch of frame-related predicates for this subtarget.
44 SlotSize = TRI->getSlotSize();
45 Is64Bit = STI.is64Bit();
46 IsLP64 = STI.isTarget64BitLP64();
47 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
48 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
49 StackPtr = TRI->getStackRegister();
52 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
53 return !MF.getFrameInfo()->hasVarSizedObjects() &&
54 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
57 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
58 /// call frame pseudos can be simplified. Having a FP, as in the default
59 /// implementation, is not sufficient here since we can't always use it.
60 /// Use a more nuanced condition.
62 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
63 return hasReservedCallFrame(MF) ||
64 (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
65 TRI->hasBasePointer(MF);
68 // needsFrameIndexResolution - Do we need to perform FI resolution for
69 // this function. Normally, this is required only when the function
70 // has any stack objects. However, FI resolution actually has another job,
71 // not apparent from the title - it resolves callframesetup/destroy
72 // that were not simplified earlier.
73 // So, this is required for x86 functions that have push sequences even
74 // when there are no stack objects.
76 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
77 return MF.getFrameInfo()->hasStackObjects() ||
78 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
81 /// hasFP - Return true if the specified function should have a dedicated frame
82 /// pointer register. This is true if the function has variable sized allocas
83 /// or if frame pointer elimination is disabled.
84 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
85 const MachineFrameInfo *MFI = MF.getFrameInfo();
86 const MachineModuleInfo &MMI = MF.getMMI();
88 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
89 TRI->needsStackRealignment(MF) ||
90 MFI->hasVarSizedObjects() ||
91 MFI->isFrameAddressTaken() || MFI->hasOpaqueSPAdjustment() ||
92 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
93 MMI.callsUnwindInit() || MMI.hasEHFunclets() || MMI.callsEHReturn() ||
94 MFI->hasStackMap() || MFI->hasPatchPoint());
97 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
100 return X86::SUB64ri8;
101 return X86::SUB64ri32;
104 return X86::SUB32ri8;
109 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
112 return X86::ADD64ri8;
113 return X86::ADD64ri32;
116 return X86::ADD32ri8;
121 static unsigned getSUBrrOpcode(unsigned isLP64) {
122 return isLP64 ? X86::SUB64rr : X86::SUB32rr;
125 static unsigned getADDrrOpcode(unsigned isLP64) {
126 return isLP64 ? X86::ADD64rr : X86::ADD32rr;
129 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
132 return X86::AND64ri8;
133 return X86::AND64ri32;
136 return X86::AND32ri8;
140 static unsigned getLEArOpcode(unsigned IsLP64) {
141 return IsLP64 ? X86::LEA64r : X86::LEA32r;
144 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
145 /// when it reaches the "return" instruction. We can then pop a stack object
146 /// to this register without worry about clobbering it.
147 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
148 MachineBasicBlock::iterator &MBBI,
149 const TargetRegisterInfo *TRI,
151 const MachineFunction *MF = MBB.getParent();
152 const Function *F = MF->getFunction();
153 if (!F || MF->getMMI().callsEHReturn())
156 static const uint16_t CallerSavedRegs32Bit[] = {
157 X86::EAX, X86::EDX, X86::ECX, 0
160 static const uint16_t CallerSavedRegs64Bit[] = {
161 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
162 X86::R8, X86::R9, X86::R10, X86::R11, 0
165 unsigned Opc = MBBI->getOpcode();
172 case X86::TCRETURNdi:
173 case X86::TCRETURNri:
174 case X86::TCRETURNmi:
175 case X86::TCRETURNdi64:
176 case X86::TCRETURNri64:
177 case X86::TCRETURNmi64:
179 case X86::EH_RETURN64: {
180 SmallSet<uint16_t, 8> Uses;
181 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
182 MachineOperand &MO = MBBI->getOperand(i);
183 if (!MO.isReg() || MO.isDef())
185 unsigned Reg = MO.getReg();
188 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
192 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
194 if (!Uses.count(*CS))
202 static bool isEAXLiveIn(MachineFunction &MF) {
203 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
204 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
205 unsigned Reg = II->first;
207 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
208 Reg == X86::AH || Reg == X86::AL)
215 /// Check whether or not the terminators of \p MBB needs to read EFLAGS.
216 static bool terminatorsNeedFlagsAsInput(const MachineBasicBlock &MBB) {
217 for (const MachineInstr &MI : MBB.terminators()) {
218 bool BreakNext = false;
219 for (const MachineOperand &MO : MI.operands()) {
222 unsigned Reg = MO.getReg();
223 if (Reg != X86::EFLAGS)
226 // This terminator needs an eflag that is not defined
227 // by a previous terminator.
238 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
239 /// stack pointer by a constant value.
240 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
241 MachineBasicBlock::iterator &MBBI,
242 int64_t NumBytes, bool InEpilogue) const {
243 bool isSub = NumBytes < 0;
244 uint64_t Offset = isSub ? -NumBytes : NumBytes;
246 uint64_t Chunk = (1LL << 31) - 1;
247 DebugLoc DL = MBB.findDebugLoc(MBBI);
250 if (Offset > Chunk) {
251 // Rather than emit a long series of instructions for large offsets,
252 // load the offset into a register and do one sub/add
255 if (isSub && !isEAXLiveIn(*MBB.getParent()))
256 Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
258 Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
261 unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
262 BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
265 ? getSUBrrOpcode(Is64Bit)
266 : getADDrrOpcode(Is64Bit);
267 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
270 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
276 uint64_t ThisVal = std::min(Offset, Chunk);
277 if (ThisVal == (Is64Bit ? 8 : 4)) {
278 // Use push / pop instead.
280 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
281 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
284 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
285 : (Is64Bit ? X86::POP64r : X86::POP32r);
286 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
287 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
289 MI->setFlag(MachineInstr::FrameSetup);
291 MI->setFlag(MachineInstr::FrameDestroy);
297 MachineInstrBuilder MI = BuildStackAdjustment(
298 MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
300 MI.setMIFlag(MachineInstr::FrameSetup);
302 MI.setMIFlag(MachineInstr::FrameDestroy);
308 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
309 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
310 int64_t Offset, bool InEpilogue) const {
311 assert(Offset != 0 && "zero offset stack adjustment requested");
313 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
317 UseLEA = STI.useLeaForSP();
319 // If we can use LEA for SP but we shouldn't, check that none
320 // of the terminators uses the eflags. Otherwise we will insert
321 // a ADD that will redefine the eflags and break the condition.
322 // Alternatively, we could move the ADD, but this may not be possible
323 // and is an optimization anyway.
324 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
325 if (UseLEA && !STI.useLeaForSP())
326 UseLEA = terminatorsNeedFlagsAsInput(MBB);
327 // If that assert breaks, that means we do not do the right thing
328 // in canUseAsEpilogue.
329 assert((UseLEA || !terminatorsNeedFlagsAsInput(MBB)) &&
330 "We shouldn't have allowed this insertion point");
333 MachineInstrBuilder MI;
335 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
336 TII.get(getLEArOpcode(Uses64BitFramePtr)),
338 StackPtr, false, Offset);
340 bool IsSub = Offset < 0;
341 uint64_t AbsOffset = IsSub ? -Offset : Offset;
342 unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
343 : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
344 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
347 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
352 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
353 MachineBasicBlock::iterator &MBBI,
354 bool doMergeWithPrevious) const {
355 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
356 (!doMergeWithPrevious && MBBI == MBB.end()))
359 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
360 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
362 unsigned Opc = PI->getOpcode();
365 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
366 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
367 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
368 PI->getOperand(0).getReg() == StackPtr){
369 Offset += PI->getOperand(2).getImm();
371 if (!doMergeWithPrevious) MBBI = NI;
372 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
373 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
374 PI->getOperand(0).getReg() == StackPtr) {
375 Offset -= PI->getOperand(2).getImm();
377 if (!doMergeWithPrevious) MBBI = NI;
383 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
384 MachineBasicBlock::iterator MBBI, DebugLoc DL,
385 MCCFIInstruction CFIInst) const {
386 MachineFunction &MF = *MBB.getParent();
387 unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
388 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
389 .addCFIIndex(CFIIndex);
393 X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
394 MachineBasicBlock::iterator MBBI,
396 MachineFunction &MF = *MBB.getParent();
397 MachineFrameInfo *MFI = MF.getFrameInfo();
398 MachineModuleInfo &MMI = MF.getMMI();
399 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
401 // Add callee saved registers to move list.
402 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
403 if (CSI.empty()) return;
405 // Calculate offsets.
406 for (std::vector<CalleeSavedInfo>::const_iterator
407 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
408 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
409 unsigned Reg = I->getReg();
411 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
412 BuildCFI(MBB, MBBI, DL,
413 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
417 /// usesTheStack - This function checks if any of the users of EFLAGS
418 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
419 /// to use the stack, and if we don't adjust the stack we clobber the first
421 /// See X86InstrInfo::copyPhysReg.
422 static bool usesTheStack(const MachineFunction &MF) {
423 const MachineRegisterInfo &MRI = MF.getRegInfo();
425 for (MachineRegisterInfo::reg_instr_iterator
426 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
434 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
435 MachineBasicBlock &MBB,
436 MachineBasicBlock::iterator MBBI,
438 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
442 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
444 CallOp = X86::CALLpcrel32;
448 if (STI.isTargetCygMing()) {
449 Symbol = "___chkstk_ms";
453 } else if (STI.isTargetCygMing())
458 MachineInstrBuilder CI;
460 // All current stack probes take AX and SP as input, clobber flags, and
461 // preserve all registers. x86_64 probes leave RSP unmodified.
462 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
463 // For the large code model, we have to call through a register. Use R11,
464 // as it is scratch in all supported calling conventions.
465 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
466 .addExternalSymbol(Symbol);
467 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
469 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
472 unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
473 unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
474 CI.addReg(AX, RegState::Implicit)
475 .addReg(SP, RegState::Implicit)
476 .addReg(AX, RegState::Define | RegState::Implicit)
477 .addReg(SP, RegState::Define | RegState::Implicit)
478 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
481 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
482 // themselves. It also does not clobber %rax so we can reuse it when
484 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
490 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
491 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
492 // and might require smaller successive adjustments.
493 const uint64_t Win64MaxSEHOffset = 128;
494 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
495 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
496 return SEHFrameOffset & -16;
499 // If we're forcing a stack realignment we can't rely on just the frame
500 // info, we need to know the ABI stack alignment as well in case we
501 // have a call out. Otherwise just make sure we have some alignment - we'll
502 // go with the minimum SlotSize.
503 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
504 const MachineFrameInfo *MFI = MF.getFrameInfo();
505 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
506 unsigned StackAlign = getStackAlignment();
507 if (MF.getFunction()->hasFnAttribute("stackrealign")) {
509 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
510 else if (MaxAlign < SlotSize)
516 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
517 MachineBasicBlock::iterator MBBI,
519 uint64_t MaxAlign) const {
520 uint64_t Val = -MaxAlign;
522 BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
526 .setMIFlag(MachineInstr::FrameSetup);
528 // The EFLAGS implicit def is dead.
529 MI->getOperand(3).setIsDead();
532 /// emitPrologue - Push callee-saved registers onto the stack, which
533 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
534 /// space for local variables. Also emit labels used by the exception handler to
535 /// generate the exception handling frames.
538 Here's a gist of what gets emitted:
540 ; Establish frame pointer, if needed
543 .cfi_def_cfa_offset 16
544 .cfi_offset %rbp, -16
547 .cfi_def_cfa_register %rbp
549 ; Spill general-purpose registers
550 [for all callee-saved GPRs]
553 .cfi_def_cfa_offset (offset from RETADDR)
556 ; If the required stack alignment > default stack alignment
557 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
558 ; of unknown size in the stack frame.
559 [if stack needs re-alignment]
562 ; Allocate space for locals
563 [if target is Windows and allocated space > 4096 bytes]
564 ; Windows needs special care for allocations larger
567 call ___chkstk_ms/___chkstk
573 .seh_stackalloc (size of XMM spill slots)
574 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
579 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
580 ; they may get spilled on any platform, if the current function
581 ; calls @llvm.eh.unwind.init
583 [for all callee-saved XMM registers]
584 movaps %<xmm reg>, -MMM(%rbp)
585 [for all callee-saved XMM registers]
586 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
587 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
589 [for all callee-saved XMM registers]
590 movaps %<xmm reg>, KKK(%rsp)
591 [for all callee-saved XMM registers]
592 .seh_savexmm %<xmm reg>, KKK
596 [if needs base pointer]
598 [if needs to restore base pointer]
603 [for all callee-saved registers]
604 .cfi_offset %<reg>, (offset from %rbp)
606 .cfi_def_cfa_offset (offset from RETADDR)
607 [for all callee-saved registers]
608 .cfi_offset %<reg>, (offset from %rsp)
611 - .seh directives are emitted only for Windows 64 ABI
612 - .cfi directives are emitted for all other ABIs
613 - for 32-bit code, substitute %e?? registers for %r??
616 void X86FrameLowering::emitPrologue(MachineFunction &MF,
617 MachineBasicBlock &MBB) const {
618 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
619 "MF used frame lowering for wrong subtarget");
620 MachineBasicBlock::iterator MBBI = MBB.begin();
621 MachineFrameInfo *MFI = MF.getFrameInfo();
622 const Function *Fn = MF.getFunction();
623 MachineModuleInfo &MMI = MF.getMMI();
624 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
625 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
626 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
627 bool IsFunclet = MBB.isEHFuncletEntry();
630 classifyEHPersonality(Fn->getPersonalityFn()) == EHPersonality::CoreCLR;
631 bool HasFP = hasFP(MF);
632 bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
633 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
634 bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
636 !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
637 unsigned FramePtr = TRI->getFrameRegister(MF);
638 const unsigned MachineFramePtr =
639 STI.isTarget64BitILP32()
640 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
642 unsigned BasePtr = TRI->getBaseRegister();
645 // Add RETADDR move area to callee saved frame size.
646 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
647 if (TailCallReturnAddrDelta && IsWin64Prologue)
648 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
650 if (TailCallReturnAddrDelta < 0)
651 X86FI->setCalleeSavedFrameSize(
652 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
654 bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
656 // The default stack probe size is 4096 if the function has no stackprobesize
658 unsigned StackProbeSize = 4096;
659 if (Fn->hasFnAttribute("stack-probe-size"))
660 Fn->getFnAttribute("stack-probe-size")
662 .getAsInteger(0, StackProbeSize);
664 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
665 // function, and use up to 128 bytes of stack space, don't have a frame
666 // pointer, calls, or dynamic alloca then we do not need to adjust the
667 // stack pointer (we fit in the Red Zone). We also check that we don't
668 // push and pop from the stack.
669 if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
670 !TRI->needsStackRealignment(MF) &&
671 !MFI->hasVarSizedObjects() && // No dynamic alloca.
672 !MFI->adjustsStack() && // No calls.
673 !IsWin64CC && // Win64 has no Red Zone
674 !usesTheStack(MF) && // Don't push and pop.
675 !MF.shouldSplitStack()) { // Regular stack
676 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
677 if (HasFP) MinSize += SlotSize;
678 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
679 MFI->setStackSize(StackSize);
682 // Insert stack pointer adjustment for later moving of return addr. Only
683 // applies to tail call optimized functions where the callee argument stack
684 // size is bigger than the callers.
685 if (TailCallReturnAddrDelta < 0) {
686 BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
687 /*InEpilogue=*/false)
688 .setMIFlag(MachineInstr::FrameSetup);
691 // Mapping for machine moves:
693 // DST: VirtualFP AND
694 // SRC: VirtualFP => DW_CFA_def_cfa_offset
695 // ELSE => DW_CFA_def_cfa
697 // SRC: VirtualFP AND
698 // DST: Register => DW_CFA_def_cfa_register
701 // OFFSET < 0 => DW_CFA_offset_extended_sf
702 // REG < 64 => DW_CFA_offset + Reg
703 // ELSE => DW_CFA_offset_extended
705 uint64_t NumBytes = 0;
706 int stackGrowth = -SlotSize;
708 // Find the funclet establisher parameter
709 unsigned Establisher = X86::NoRegister;
711 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
713 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
715 if (IsWin64Prologue && IsFunclet & !IsClrFunclet) {
716 // Immediately spill establisher into the home slot.
717 // The runtime cares about this.
718 // MOV64mr %rdx, 16(%rsp)
719 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
720 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
722 .setMIFlag(MachineInstr::FrameSetup);
726 // Calculate required stack adjustment.
727 uint64_t FrameSize = StackSize - SlotSize;
728 // If required, include space for extra hidden slot for stashing base pointer.
729 if (X86FI->getRestoreBasePointer())
730 FrameSize += SlotSize;
732 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
734 // Callee-saved registers are pushed on stack before the stack is realigned.
735 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
736 NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
738 // Get the offset of the stack slot for the EBP register, which is
739 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
740 // Update the frame offset adjustment.
742 MFI->setOffsetAdjustment(-NumBytes);
744 assert(MFI->getOffsetAdjustment() == -(int)NumBytes &&
745 "should calculate same local variable offset for funclets");
747 // Save EBP/RBP into the appropriate stack slot.
748 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
749 .addReg(MachineFramePtr, RegState::Kill)
750 .setMIFlag(MachineInstr::FrameSetup);
753 // Mark the place where EBP/RBP was saved.
754 // Define the current CFA rule to use the provided offset.
756 BuildCFI(MBB, MBBI, DL,
757 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
759 // Change the rule for the FramePtr to be an "offset" rule.
760 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
761 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
762 nullptr, DwarfFramePtr, 2 * stackGrowth));
766 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
768 .setMIFlag(MachineInstr::FrameSetup);
771 if (!IsWin64Prologue && !IsFunclet) {
772 // Update EBP with the new base value.
773 BuildMI(MBB, MBBI, DL,
774 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
777 .setMIFlag(MachineInstr::FrameSetup);
780 // Mark effective beginning of when frame pointer becomes valid.
781 // Define the current CFA to use the EBP/RBP register.
782 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
783 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
784 nullptr, DwarfFramePtr));
788 // Mark the FramePtr as live-in in every block.
789 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
790 I->addLiveIn(MachineFramePtr);
792 assert(!IsFunclet && "funclets without FPs not yet implemented");
793 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
796 // For EH funclets, only allocate enough space for outgoing calls. Save the
797 // NumBytes value that we would've used for the parent frame.
798 unsigned ParentFrameNumBytes = NumBytes;
800 NumBytes = getWinEHFuncletFrameSize(MF);
802 // Skip the callee-saved push instructions.
803 bool PushedRegs = false;
804 int StackOffset = 2 * stackGrowth;
806 while (MBBI != MBB.end() &&
807 MBBI->getFlag(MachineInstr::FrameSetup) &&
808 (MBBI->getOpcode() == X86::PUSH32r ||
809 MBBI->getOpcode() == X86::PUSH64r)) {
811 unsigned Reg = MBBI->getOperand(0).getReg();
814 if (!HasFP && NeedsDwarfCFI) {
815 // Mark callee-saved push instruction.
816 // Define the current CFA rule to use the provided offset.
818 BuildCFI(MBB, MBBI, DL,
819 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
820 StackOffset += stackGrowth;
824 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
825 MachineInstr::FrameSetup);
829 // Realign stack after we pushed callee-saved registers (so that we'll be
830 // able to calculate their offsets from the frame pointer).
831 // Don't do this for Win64, it needs to realign the stack after the prologue.
832 if (!IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) {
833 assert(HasFP && "There should be a frame pointer if stack is realigned.");
834 BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
837 // If there is an SUB32ri of ESP immediately before this instruction, merge
838 // the two. This can be the case when tail call elimination is enabled and
839 // the callee has more arguments then the caller.
840 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
842 // Adjust stack pointer: ESP -= numbytes.
844 // Windows and cygwin/mingw require a prologue helper routine when allocating
845 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
846 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
847 // stack and adjust the stack pointer in one go. The 64-bit version of
848 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
849 // responsible for adjusting the stack pointer. Touching the stack at 4K
850 // increments is necessary to ensure that the guard pages used by the OS
851 // virtual memory manager are allocated in correct sequence.
852 uint64_t AlignedNumBytes = NumBytes;
853 if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
854 AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
855 if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
856 // Check whether EAX is livein for this function.
857 bool isEAXAlive = isEAXLiveIn(MF);
860 // Sanity check that EAX is not livein for this function.
861 // It should not be, so throw an assert.
862 assert(!Is64Bit && "EAX is livein in x64 case!");
865 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
866 .addReg(X86::EAX, RegState::Kill)
867 .setMIFlag(MachineInstr::FrameSetup);
871 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
872 // Function prologue is responsible for adjusting the stack pointer.
873 if (isUInt<32>(NumBytes)) {
874 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
876 .setMIFlag(MachineInstr::FrameSetup);
877 } else if (isInt<32>(NumBytes)) {
878 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
880 .setMIFlag(MachineInstr::FrameSetup);
882 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
884 .setMIFlag(MachineInstr::FrameSetup);
887 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
888 // We'll also use 4 already allocated bytes for EAX.
889 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
890 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
891 .setMIFlag(MachineInstr::FrameSetup);
894 // Save a pointer to the MI where we set AX.
895 MachineBasicBlock::iterator SetRAX = MBBI;
898 // Call __chkstk, __chkstk_ms, or __alloca.
899 emitStackProbeCall(MF, MBB, MBBI, DL);
901 // Apply the frame setup flag to all inserted instrs.
902 for (; SetRAX != MBBI; ++SetRAX)
903 SetRAX->setFlag(MachineInstr::FrameSetup);
907 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
909 StackPtr, false, NumBytes - 4);
910 MI->setFlag(MachineInstr::FrameSetup);
911 MBB.insert(MBBI, MI);
913 } else if (NumBytes) {
914 emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
917 if (NeedsWinCFI && NumBytes)
918 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
920 .setMIFlag(MachineInstr::FrameSetup);
922 int SEHFrameOffset = 0;
923 if (IsWin64Prologue && HasFP) {
924 // Set RBP to a small fixed offset from RSP. In the funclet case, we base
925 // this calculation on the incoming establisher, which holds the value of
926 // RSP from the parent frame at the end of the prologue.
927 unsigned SPOrEstablisher = IsFunclet ? Establisher : StackPtr;
928 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
930 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
931 SPOrEstablisher, false, SEHFrameOffset);
933 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
934 .addReg(SPOrEstablisher);
936 // If this is not a funclet, emit the CFI describing our frame pointer.
937 if (NeedsWinCFI && !IsFunclet)
938 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
940 .addImm(SEHFrameOffset)
941 .setMIFlag(MachineInstr::FrameSetup);
942 } else if (IsFunclet && STI.is32Bit()) {
943 // Reset EBP / ESI to something good for funclets.
944 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
947 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
948 const MachineInstr *FrameInstr = &*MBBI;
953 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
954 if (X86::FR64RegClass.contains(Reg)) {
955 unsigned IgnoredFrameReg;
956 int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg);
957 Offset += SEHFrameOffset;
959 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
962 .setMIFlag(MachineInstr::FrameSetup);
969 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
970 .setMIFlag(MachineInstr::FrameSetup);
972 // Realign stack after we spilled callee-saved registers (so that we'll be
973 // able to calculate their offsets from the frame pointer).
974 // Win64 requires aligning the stack after the prologue.
975 if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
976 assert(HasFP && "There should be a frame pointer if stack is realigned.");
977 BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
980 // If we need a base pointer, set it up here. It's whatever the value
981 // of the stack pointer is at this point. Any variable size objects
982 // will be allocated after this, so we can still use the base pointer
983 // to reference locals.
984 if (TRI->hasBasePointer(MF)) {
985 // Update the base pointer with the current stack pointer.
986 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
987 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
989 .setMIFlag(MachineInstr::FrameSetup);
990 if (X86FI->getRestoreBasePointer()) {
991 // Stash value of base pointer. Saving RSP instead of EBP shortens
992 // dependence chain. Used by SjLj EH.
993 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
994 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
995 FramePtr, true, X86FI->getRestoreBasePointerOffset())
997 .setMIFlag(MachineInstr::FrameSetup);
1000 if (X86FI->getHasSEHFramePtrSave()) {
1001 // Stash the value of the frame pointer relative to the base pointer for
1002 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1003 // it recovers the frame pointer from the base pointer rather than the
1004 // other way around.
1005 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1008 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
1009 assert(UsedReg == BasePtr);
1010 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1012 .setMIFlag(MachineInstr::FrameSetup);
1016 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1017 // Mark end of stack pointer adjustment.
1018 if (!HasFP && NumBytes) {
1019 // Define the current CFA rule to use the provided offset.
1021 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
1022 nullptr, -StackSize + stackGrowth));
1025 // Emit DWARF info specifying the offsets of the callee-saved registers.
1027 emitCalleeSavedFrameMoves(MBB, MBBI, DL);
1031 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1032 const MachineFunction &MF) const {
1033 // We can't use LEA instructions for adjusting the stack pointer if this is a
1034 // leaf function in the Win64 ABI. Only ADD instructions may be used to
1035 // deallocate the stack.
1036 // This means that we can use LEA for SP in two situations:
1037 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1038 // 2. We *have* a frame pointer which means we are permitted to use LEA.
1039 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1042 static bool isFuncletReturnInstr(MachineInstr *MI) {
1043 switch (MI->getOpcode()) {
1045 case X86::CLEANUPRET:
1050 llvm_unreachable("impossible");
1053 unsigned X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1054 // This is the size of the pushed CSRs.
1056 MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
1057 // This is the amount of stack a funclet needs to allocate.
1058 unsigned MaxCallSize = MF.getFrameInfo()->getMaxCallFrameSize();
1059 // RBP is not included in the callee saved register block. After pushing RBP,
1060 // everything is 16 byte aligned. Everything we allocate before an outgoing
1061 // call must also be 16 byte aligned.
1062 unsigned FrameSizeMinusRBP =
1063 RoundUpToAlignment(CSSize + MaxCallSize, getStackAlignment());
1064 // Subtract out the size of the callee saved registers. This is how much stack
1065 // each funclet will allocate.
1066 return FrameSizeMinusRBP - CSSize;
1069 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1070 MachineBasicBlock &MBB) const {
1071 const MachineFrameInfo *MFI = MF.getFrameInfo();
1072 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1073 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1075 if (MBBI != MBB.end())
1076 DL = MBBI->getDebugLoc();
1077 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
1078 const bool Is64BitILP32 = STI.isTarget64BitILP32();
1079 unsigned FramePtr = TRI->getFrameRegister(MF);
1080 unsigned MachineFramePtr =
1081 Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
1084 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1086 IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
1087 bool IsFunclet = isFuncletReturnInstr(MBBI);
1088 MachineBasicBlock *RestoreMBB = nullptr;
1090 // Get the number of bytes to allocate from the FrameInfo.
1091 uint64_t StackSize = MFI->getStackSize();
1092 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1093 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1094 uint64_t NumBytes = 0;
1096 if (MBBI->getOpcode() == X86::CATCHRET) {
1097 NumBytes = getWinEHFuncletFrameSize(MF);
1098 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1099 MachineBasicBlock *TargetMBB = MBBI->getOperand(0).getMBB();
1101 // If this is SEH, this isn't really a funclet return.
1102 bool IsSEH = isAsynchronousEHPersonality(
1103 classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
1106 restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/true);
1107 BuildMI(MBB, MBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
1108 MBBI->eraseFromParent();
1112 // For 32-bit, create a new block for the restore code.
1113 RestoreMBB = TargetMBB;
1114 if (STI.is32Bit()) {
1115 RestoreMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1116 MF.insert(TargetMBB->getIterator(), RestoreMBB);
1117 MBB.removeSuccessor(TargetMBB);
1118 MBB.addSuccessor(RestoreMBB);
1119 RestoreMBB->addSuccessor(TargetMBB);
1120 MBBI->getOperand(0).setMBB(RestoreMBB);
1124 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1126 .setMIFlag(MachineInstr::FrameDestroy);
1128 // Insert frame restoration code in a new block.
1129 if (STI.is32Bit()) {
1130 auto RestoreMBBI = RestoreMBB->begin();
1131 restoreWin32EHStackPointers(*RestoreMBB, RestoreMBBI, DL,
1132 /*RestoreSP=*/true);
1133 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4))
1136 } else if (MBBI->getOpcode() == X86::CLEANUPRET) {
1137 NumBytes = getWinEHFuncletFrameSize(MF);
1138 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1139 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1141 .setMIFlag(MachineInstr::FrameDestroy);
1142 } else if (hasFP(MF)) {
1143 // Calculate required stack adjustment.
1144 uint64_t FrameSize = StackSize - SlotSize;
1145 NumBytes = FrameSize - CSSize;
1147 // Callee-saved registers were pushed on stack before the stack was
1149 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1150 NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
1153 BuildMI(MBB, MBBI, DL,
1154 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr)
1155 .setMIFlag(MachineInstr::FrameDestroy);
1157 NumBytes = StackSize - CSSize;
1159 uint64_t SEHStackAllocAmt = NumBytes;
1161 // Skip the callee-saved pop instructions.
1162 while (MBBI != MBB.begin()) {
1163 MachineBasicBlock::iterator PI = std::prev(MBBI);
1164 unsigned Opc = PI->getOpcode();
1166 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1167 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1168 Opc != X86::DBG_VALUE && !PI->isTerminator())
1173 MachineBasicBlock::iterator FirstCSPop = MBBI;
1176 // Fill EAX/RAX with the address of the target block.
1177 unsigned ReturnReg = STI.is64Bit() ? X86::RAX : X86::EAX;
1178 if (STI.is64Bit()) {
1179 // LEA64r RestoreMBB(%rip), %rax
1180 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::LEA64r), ReturnReg)
1187 // MOV32ri $RestoreMBB, %eax
1188 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::MOV32ri))
1190 .addMBB(RestoreMBB);
1192 // Record that we've taken the address of RestoreMBB and no longer just
1193 // reference it in a terminator.
1194 RestoreMBB->setHasAddressTaken();
1197 if (MBBI != MBB.end())
1198 DL = MBBI->getDebugLoc();
1200 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1201 // instruction, merge the two instructions.
1202 if (NumBytes || MFI->hasVarSizedObjects())
1203 NumBytes += mergeSPUpdates(MBB, MBBI, true);
1205 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1206 // slot before popping them off! Same applies for the case, when stack was
1207 // realigned. Don't do this if this was a funclet epilogue, since the funclets
1208 // will not do realignment or dynamic stack allocation.
1209 if ((TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) &&
1211 if (TRI->needsStackRealignment(MF))
1213 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
1214 uint64_t LEAAmount =
1215 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
1217 // There are only two legal forms of epilogue:
1218 // - add SEHAllocationSize, %rsp
1219 // - lea SEHAllocationSize(%FramePtr), %rsp
1221 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
1222 // However, we may use this sequence if we have a frame pointer because the
1223 // effects of the prologue can safely be undone.
1224 if (LEAAmount != 0) {
1225 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
1226 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
1227 FramePtr, false, LEAAmount);
1230 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
1231 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
1235 } else if (NumBytes) {
1236 // Adjust stack pointer back: ESP += numbytes.
1237 emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
1241 // Windows unwinder will not invoke function's exception handler if IP is
1242 // either in prologue or in epilogue. This behavior causes a problem when a
1243 // call immediately precedes an epilogue, because the return address points
1244 // into the epilogue. To cope with that, we insert an epilogue marker here,
1245 // then replace it with a 'nop' if it ends up immediately after a CALL in the
1246 // final emitted code.
1248 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
1250 // Add the return addr area delta back since we are not tail calling.
1251 int Offset = -1 * X86FI->getTCReturnAddrDelta();
1252 assert(Offset >= 0 && "TCDelta should never be positive");
1254 MBBI = MBB.getFirstTerminator();
1256 // Check for possible merge with preceding ADD instruction.
1257 Offset += mergeSPUpdates(MBB, MBBI, true);
1258 emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
1262 // NOTE: this only has a subset of the full frame index logic. In
1263 // particular, the FI < 0 and AfterFPPop logic is handled in
1264 // X86RegisterInfo::eliminateFrameIndex, but not here. Possibly
1265 // (probably?) it should be moved into here.
1266 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1267 unsigned &FrameReg) const {
1268 const MachineFrameInfo *MFI = MF.getFrameInfo();
1270 // We can't calculate offset from frame pointer if the stack is realigned,
1271 // so enforce usage of stack/base pointer. The base pointer is used when we
1272 // have dynamic allocas in addition to dynamic realignment.
1273 if (TRI->hasBasePointer(MF))
1274 FrameReg = TRI->getBaseRegister();
1275 else if (TRI->needsStackRealignment(MF))
1276 FrameReg = TRI->getStackRegister();
1278 FrameReg = TRI->getFrameRegister(MF);
1280 // Offset will hold the offset from the stack pointer at function entry to the
1282 // We need to factor in additional offsets applied during the prologue to the
1283 // frame, base, and stack pointer depending on which is used.
1284 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1285 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1286 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1287 uint64_t StackSize = MFI->getStackSize();
1288 bool HasFP = hasFP(MF);
1289 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1290 int64_t FPDelta = 0;
1292 if (IsWin64Prologue) {
1293 assert(!MFI->hasCalls() || (StackSize % 16) == 8);
1295 // Calculate required stack adjustment.
1296 uint64_t FrameSize = StackSize - SlotSize;
1297 // If required, include space for extra hidden slot for stashing base pointer.
1298 if (X86FI->getRestoreBasePointer())
1299 FrameSize += SlotSize;
1300 uint64_t NumBytes = FrameSize - CSSize;
1302 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
1303 if (FI && FI == X86FI->getFAIndex())
1304 return -SEHFrameOffset;
1306 // FPDelta is the offset from the "traditional" FP location of the old base
1307 // pointer followed by return address and the location required by the
1308 // restricted Win64 prologue.
1309 // Add FPDelta to all offsets below that go through the frame pointer.
1310 FPDelta = FrameSize - SEHFrameOffset;
1311 assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
1312 "FPDelta isn't aligned per the Win64 ABI!");
1316 if (TRI->hasBasePointer(MF)) {
1317 assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
1319 // Skip the saved EBP.
1320 return Offset + SlotSize + FPDelta;
1322 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1323 return Offset + StackSize;
1325 } else if (TRI->needsStackRealignment(MF)) {
1327 // Skip the saved EBP.
1328 return Offset + SlotSize + FPDelta;
1330 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1331 return Offset + StackSize;
1333 // FIXME: Support tail calls
1336 return Offset + StackSize;
1338 // Skip the saved EBP.
1341 // Skip the RETADDR move area
1342 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1343 if (TailCallReturnAddrDelta < 0)
1344 Offset -= TailCallReturnAddrDelta;
1347 return Offset + FPDelta;
1350 // Simplified from getFrameIndexReference keeping only StackPointer cases
1351 int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
1353 unsigned &FrameReg) const {
1354 const MachineFrameInfo *MFI = MF.getFrameInfo();
1355 // Does not include any dynamic realign.
1356 const uint64_t StackSize = MFI->getStackSize();
1359 // Note: LLVM arranges the stack as:
1360 // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
1361 // > "Stack Slots" (<--SP)
1362 // We can always address StackSlots from RSP. We can usually (unless
1363 // needsStackRealignment) address CSRs from RSP, but sometimes need to
1364 // address them from RBP. FixedObjects can be placed anywhere in the stack
1365 // frame depending on their specific requirements (i.e. we can actually
1366 // refer to arguments to the function which are stored in the *callers*
1367 // frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs
1368 // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.
1370 assert(!TRI->hasBasePointer(MF) && "we don't handle this case");
1372 // We don't handle tail calls, and shouldn't be seeing them
1374 int TailCallReturnAddrDelta =
1375 MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
1376 assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
1380 // Fill in FrameReg output argument.
1381 FrameReg = TRI->getStackRegister();
1383 // This is how the math works out:
1385 // %rsp grows (i.e. gets lower) left to right. Each box below is
1386 // one word (eight bytes). Obj0 is the stack slot we're trying to
1389 // ----------------------------------
1390 // | BP | Obj0 | Obj1 | ... | ObjN |
1391 // ----------------------------------
1395 // A is the incoming stack pointer.
1396 // (B - A) is the local area offset (-8 for x86-64) [1]
1397 // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
1399 // |(E - B)| is the StackSize (absolute value, positive). For a
1400 // stack that grown down, this works out to be (B - E). [3]
1402 // E is also the value of %rsp after stack has been set up, and we
1403 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
1404 // (C - E) == (C - A) - (B - A) + (B - E)
1405 // { Using [1], [2] and [3] above }
1406 // == getObjectOffset - LocalAreaOffset + StackSize
1409 // Get the Offset from the StackPointer
1410 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1412 return Offset + StackSize;
1415 bool X86FrameLowering::assignCalleeSavedSpillSlots(
1416 MachineFunction &MF, const TargetRegisterInfo *TRI,
1417 std::vector<CalleeSavedInfo> &CSI) const {
1418 MachineFrameInfo *MFI = MF.getFrameInfo();
1419 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1421 unsigned CalleeSavedFrameSize = 0;
1422 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
1425 // emitPrologue always spills frame register the first thing.
1426 SpillSlotOffset -= SlotSize;
1427 MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1429 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
1430 // the frame register, we can delete it from CSI list and not have to worry
1431 // about avoiding it later.
1432 unsigned FPReg = TRI->getFrameRegister(MF);
1433 for (unsigned i = 0; i < CSI.size(); ++i) {
1434 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
1435 CSI.erase(CSI.begin() + i);
1441 // Assign slots for GPRs. It increases frame size.
1442 for (unsigned i = CSI.size(); i != 0; --i) {
1443 unsigned Reg = CSI[i - 1].getReg();
1445 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1448 SpillSlotOffset -= SlotSize;
1449 CalleeSavedFrameSize += SlotSize;
1451 int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1452 CSI[i - 1].setFrameIdx(SlotIndex);
1455 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
1457 // Assign slots for XMMs.
1458 for (unsigned i = CSI.size(); i != 0; --i) {
1459 unsigned Reg = CSI[i - 1].getReg();
1460 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1463 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1465 SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
1467 SpillSlotOffset -= RC->getSize();
1469 MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
1470 CSI[i - 1].setFrameIdx(SlotIndex);
1471 MFI->ensureMaxAlignment(RC->getAlignment());
1477 bool X86FrameLowering::spillCalleeSavedRegisters(
1478 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1479 const std::vector<CalleeSavedInfo> &CSI,
1480 const TargetRegisterInfo *TRI) const {
1481 DebugLoc DL = MBB.findDebugLoc(MI);
1483 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
1484 // for us, and there are no XMM CSRs on Win32.
1485 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
1488 // Push GPRs. It increases frame size.
1489 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1490 for (unsigned i = CSI.size(); i != 0; --i) {
1491 unsigned Reg = CSI[i - 1].getReg();
1493 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1495 // Add the callee-saved register as live-in. It's killed at the spill.
1498 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1499 .setMIFlag(MachineInstr::FrameSetup);
1502 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1503 // It can be done by spilling XMMs to stack frame.
1504 for (unsigned i = CSI.size(); i != 0; --i) {
1505 unsigned Reg = CSI[i-1].getReg();
1506 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1508 // Add the callee-saved register as live-in. It's killed at the spill.
1510 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1512 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
1515 MI->setFlag(MachineInstr::FrameSetup);
1522 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1523 MachineBasicBlock::iterator MI,
1524 const std::vector<CalleeSavedInfo> &CSI,
1525 const TargetRegisterInfo *TRI) const {
1529 if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
1530 // Don't restore CSRs in 32-bit EH funclets. Matches
1531 // spillCalleeSavedRegisters.
1534 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
1535 // funclets. emitEpilogue transforms these to normal jumps.
1536 if (MI->getOpcode() == X86::CATCHRET) {
1537 const Function *Func = MBB.getParent()->getFunction();
1538 bool IsSEH = isAsynchronousEHPersonality(
1539 classifyEHPersonality(Func->getPersonalityFn()));
1545 DebugLoc DL = MBB.findDebugLoc(MI);
1547 // Reload XMMs from stack frame.
1548 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1549 unsigned Reg = CSI[i].getReg();
1550 if (X86::GR64RegClass.contains(Reg) ||
1551 X86::GR32RegClass.contains(Reg))
1554 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1555 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
1559 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1560 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1561 unsigned Reg = CSI[i].getReg();
1562 if (!X86::GR64RegClass.contains(Reg) &&
1563 !X86::GR32RegClass.contains(Reg))
1566 BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
1567 .setMIFlag(MachineInstr::FrameDestroy);
1572 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
1573 BitVector &SavedRegs,
1574 RegScavenger *RS) const {
1575 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1577 MachineFrameInfo *MFI = MF.getFrameInfo();
1579 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1580 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1582 if (TailCallReturnAddrDelta < 0) {
1583 // create RETURNADDR area
1592 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1593 TailCallReturnAddrDelta - SlotSize, true);
1596 // Spill the BasePtr if it's used.
1597 if (TRI->hasBasePointer(MF)) {
1598 SavedRegs.set(TRI->getBaseRegister());
1600 // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
1601 if (MF.getMMI().hasEHFunclets()) {
1602 int FI = MFI->CreateSpillStackObject(SlotSize, SlotSize);
1603 X86FI->setHasSEHFramePtrSave(true);
1604 X86FI->setSEHFramePtrSaveIndex(FI);
1610 HasNestArgument(const MachineFunction *MF) {
1611 const Function *F = MF->getFunction();
1612 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1614 if (I->hasNestAttr())
1620 /// GetScratchRegister - Get a temp register for performing work in the
1621 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1622 /// and the properties of the function either one or two registers will be
1623 /// needed. Set primary to true for the first register, false for the second.
1625 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
1626 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1629 if (CallingConvention == CallingConv::HiPE) {
1631 return Primary ? X86::R14 : X86::R13;
1633 return Primary ? X86::EBX : X86::EDI;
1638 return Primary ? X86::R11 : X86::R12;
1640 return Primary ? X86::R11D : X86::R12D;
1643 bool IsNested = HasNestArgument(&MF);
1645 if (CallingConvention == CallingConv::X86_FastCall ||
1646 CallingConvention == CallingConv::Fast) {
1648 report_fatal_error("Segmented stacks does not support fastcall with "
1649 "nested function.");
1650 return Primary ? X86::EAX : X86::ECX;
1653 return Primary ? X86::EDX : X86::EAX;
1654 return Primary ? X86::ECX : X86::EAX;
1657 // The stack limit in the TCB is set to this many bytes above the actual stack
1659 static const uint64_t kSplitStackAvailable = 256;
1661 void X86FrameLowering::adjustForSegmentedStacks(
1662 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
1663 MachineFrameInfo *MFI = MF.getFrameInfo();
1665 unsigned TlsReg, TlsOffset;
1668 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1669 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1670 "Scratch register is live-in");
1672 if (MF.getFunction()->isVarArg())
1673 report_fatal_error("Segmented stacks do not support vararg functions.");
1674 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
1675 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
1676 !STI.isTargetDragonFly())
1677 report_fatal_error("Segmented stacks not supported on this platform.");
1679 // Eventually StackSize will be calculated by a link-time pass; which will
1680 // also decide whether checking code needs to be injected into this particular
1682 StackSize = MFI->getStackSize();
1684 // Do not generate a prologue for functions with a stack of size zero
1688 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1689 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1690 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1691 bool IsNested = false;
1693 // We need to know if the function has a nest argument only in 64 bit mode.
1695 IsNested = HasNestArgument(&MF);
1697 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1698 // allocMBB needs to be last (terminating) instruction.
1700 for (const auto &LI : PrologueMBB.liveins()) {
1701 allocMBB->addLiveIn(LI);
1702 checkMBB->addLiveIn(LI);
1706 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
1708 MF.push_front(allocMBB);
1709 MF.push_front(checkMBB);
1711 // When the frame size is less than 256 we just compare the stack
1712 // boundary directly to the value of the stack pointer, per gcc.
1713 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1715 // Read the limit off the current stacklet off the stack_guard location.
1717 if (STI.isTargetLinux()) {
1719 TlsOffset = IsLP64 ? 0x70 : 0x40;
1720 } else if (STI.isTargetDarwin()) {
1722 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1723 } else if (STI.isTargetWin64()) {
1725 TlsOffset = 0x28; // pvArbitrary, reserved for application use
1726 } else if (STI.isTargetFreeBSD()) {
1729 } else if (STI.isTargetDragonFly()) {
1731 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
1733 report_fatal_error("Segmented stacks not supported on this platform.");
1736 if (CompareStackPointer)
1737 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
1739 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
1740 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1742 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
1743 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1745 if (STI.isTargetLinux()) {
1748 } else if (STI.isTargetDarwin()) {
1750 TlsOffset = 0x48 + 90*4;
1751 } else if (STI.isTargetWin32()) {
1753 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1754 } else if (STI.isTargetDragonFly()) {
1756 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
1757 } else if (STI.isTargetFreeBSD()) {
1758 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1760 report_fatal_error("Segmented stacks not supported on this platform.");
1763 if (CompareStackPointer)
1764 ScratchReg = X86::ESP;
1766 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1767 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1769 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
1770 STI.isTargetDragonFly()) {
1771 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1772 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1773 } else if (STI.isTargetDarwin()) {
1775 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
1776 unsigned ScratchReg2;
1778 if (CompareStackPointer) {
1779 // The primary scratch register is available for holding the TLS offset.
1780 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1781 SaveScratch2 = false;
1783 // Need to use a second register to hold the TLS offset
1784 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
1786 // Unfortunately, with fastcc the second scratch register may hold an
1788 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1791 // If Scratch2 is live-in then it needs to be saved.
1792 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1793 "Scratch register is live-in and not saved");
1796 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1797 .addReg(ScratchReg2, RegState::Kill);
1799 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1801 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1803 .addReg(ScratchReg2).addImm(1).addReg(0)
1808 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1812 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1813 // It jumps to normal execution of the function body.
1814 BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&PrologueMBB);
1816 // On 32 bit we first push the arguments size and then the frame size. On 64
1817 // bit, we pass the stack frame size in r10 and the argument size in r11.
1819 // Functions with nested arguments use R10, so it needs to be saved across
1820 // the call to _morestack
1822 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
1823 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
1824 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
1825 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
1826 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
1829 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
1831 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
1833 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
1834 .addImm(X86FI->getArgumentStackSize());
1836 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1837 .addImm(X86FI->getArgumentStackSize());
1838 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1842 // __morestack is in libgcc
1843 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
1844 // Under the large code model, we cannot assume that __morestack lives
1845 // within 2^31 bytes of the call site, so we cannot use pc-relative
1846 // addressing. We cannot perform the call via a temporary register,
1847 // as the rax register may be used to store the static chain, and all
1848 // other suitable registers may be either callee-save or used for
1849 // parameter passing. We cannot use the stack at this point either
1850 // because __morestack manipulates the stack directly.
1852 // To avoid these issues, perform an indirect call via a read-only memory
1853 // location containing the address.
1855 // This solution is not perfect, as it assumes that the .rodata section
1856 // is laid out within 2^31 bytes of each function body, but this seems
1857 // to be sufficient for JIT.
1858 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
1862 .addExternalSymbol("__morestack_addr")
1864 MF.getMMI().setUsesMorestackAddr(true);
1867 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1868 .addExternalSymbol("__morestack");
1870 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1871 .addExternalSymbol("__morestack");
1875 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1877 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1879 allocMBB->addSuccessor(&PrologueMBB);
1881 checkMBB->addSuccessor(allocMBB);
1882 checkMBB->addSuccessor(&PrologueMBB);
1889 /// Erlang programs may need a special prologue to handle the stack size they
1890 /// might need at runtime. That is because Erlang/OTP does not implement a C
1891 /// stack but uses a custom implementation of hybrid stack/heap architecture.
1892 /// (for more information see Eric Stenman's Ph.D. thesis:
1893 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1896 /// temp0 = sp - MaxStack
1897 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1901 /// call inc_stack # doubles the stack space
1902 /// temp0 = sp - MaxStack
1903 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1904 void X86FrameLowering::adjustForHiPEPrologue(
1905 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
1906 MachineFrameInfo *MFI = MF.getFrameInfo();
1908 // HiPE-specific values
1909 const unsigned HipeLeafWords = 24;
1910 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1911 const unsigned Guaranteed = HipeLeafWords * SlotSize;
1912 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1913 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1914 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1916 assert(STI.isTargetLinux() &&
1917 "HiPE prologue is only supported on Linux operating systems.");
1919 // Compute the largest caller's frame that is needed to fit the callees'
1920 // frames. This 'MaxStack' is computed from:
1922 // a) the fixed frame size, which is the space needed for all spilled temps,
1923 // b) outgoing on-stack parameter areas, and
1924 // c) the minimum stack space this function needs to make available for the
1925 // functions it calls (a tunable ABI property).
1926 if (MFI->hasCalls()) {
1927 unsigned MoreStackForCalls = 0;
1929 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1930 MBBI != MBBE; ++MBBI)
1931 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1936 // Get callee operand.
1937 const MachineOperand &MO = MI->getOperand(0);
1939 // Only take account of global function calls (no closures etc.).
1943 const Function *F = dyn_cast<Function>(MO.getGlobal());
1947 // Do not update 'MaxStack' for primitive and built-in functions
1948 // (encoded with names either starting with "erlang."/"bif_" or not
1949 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1950 // "_", such as the BIF "suspend_0") as they are executed on another
1952 if (F->getName().find("erlang.") != StringRef::npos ||
1953 F->getName().find("bif_") != StringRef::npos ||
1954 F->getName().find_first_of("._") == StringRef::npos)
1957 unsigned CalleeStkArity =
1958 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1959 if (HipeLeafWords - 1 > CalleeStkArity)
1960 MoreStackForCalls = std::max(MoreStackForCalls,
1961 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1963 MaxStack += MoreStackForCalls;
1966 // If the stack frame needed is larger than the guaranteed then runtime checks
1967 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1968 if (MaxStack > Guaranteed) {
1969 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1970 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1972 for (const auto &LI : PrologueMBB.liveins()) {
1973 stackCheckMBB->addLiveIn(LI);
1974 incStackMBB->addLiveIn(LI);
1977 MF.push_front(incStackMBB);
1978 MF.push_front(stackCheckMBB);
1980 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1981 unsigned LEAop, CMPop, CALLop;
1985 LEAop = X86::LEA64r;
1986 CMPop = X86::CMP64rm;
1987 CALLop = X86::CALL64pcrel32;
1988 SPLimitOffset = 0x90;
1992 LEAop = X86::LEA32r;
1993 CMPop = X86::CMP32rm;
1994 CALLop = X86::CALLpcrel32;
1995 SPLimitOffset = 0x4c;
1998 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1999 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2000 "HiPE prologue scratch register is live-in");
2002 // Create new MBB for StackCheck:
2003 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
2004 SPReg, false, -MaxStack);
2005 // SPLimitOffset is in a fixed heap location (pointed by BP).
2006 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
2007 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2008 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&PrologueMBB);
2010 // Create new MBB for IncStack:
2011 BuildMI(incStackMBB, DL, TII.get(CALLop)).
2012 addExternalSymbol("inc_stack_0");
2013 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
2014 SPReg, false, -MaxStack);
2015 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
2016 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2017 BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
2019 stackCheckMBB->addSuccessor(&PrologueMBB, 99);
2020 stackCheckMBB->addSuccessor(incStackMBB, 1);
2021 incStackMBB->addSuccessor(&PrologueMBB, 99);
2022 incStackMBB->addSuccessor(incStackMBB, 1);
2029 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
2030 MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
2035 if (Offset % SlotSize)
2038 int NumPops = Offset / SlotSize;
2039 // This is only worth it if we have at most 2 pops.
2040 if (NumPops != 1 && NumPops != 2)
2043 // Handle only the trivial case where the adjustment directly follows
2044 // a call. This is the most common one, anyway.
2045 if (MBBI == MBB.begin())
2047 MachineBasicBlock::iterator Prev = std::prev(MBBI);
2048 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
2052 unsigned FoundRegs = 0;
2054 auto RegMask = Prev->getOperand(1);
2057 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
2058 // Try to find up to NumPops free registers.
2059 for (auto Candidate : RegClass) {
2061 // Poor man's liveness:
2062 // Since we're immediately after a call, any register that is clobbered
2063 // by the call and not defined by it can be considered dead.
2064 if (!RegMask.clobbersPhysReg(Candidate))
2068 for (const MachineOperand &MO : Prev->implicit_operands()) {
2069 if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) {
2078 Regs[FoundRegs++] = Candidate;
2079 if (FoundRegs == (unsigned)NumPops)
2086 // If we found only one free register, but need two, reuse the same one twice.
2087 while (FoundRegs < (unsigned)NumPops)
2088 Regs[FoundRegs++] = Regs[0];
2090 for (int i = 0; i < NumPops; ++i)
2091 BuildMI(MBB, MBBI, DL,
2092 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
2097 void X86FrameLowering::
2098 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2099 MachineBasicBlock::iterator I) const {
2100 bool reserveCallFrame = hasReservedCallFrame(MF);
2101 unsigned Opcode = I->getOpcode();
2102 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
2103 DebugLoc DL = I->getDebugLoc();
2104 uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
2105 uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
2108 if (!reserveCallFrame) {
2109 // If the stack pointer can be changed after prologue, turn the
2110 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
2111 // adjcallstackdown instruction into 'add ESP, <amt>'
2113 // We need to keep the stack aligned properly. To do this, we round the
2114 // amount of space needed for the outgoing arguments up to the next
2115 // alignment boundary.
2116 unsigned StackAlign = getStackAlignment();
2117 Amount = RoundUpToAlignment(Amount, StackAlign);
2119 MachineModuleInfo &MMI = MF.getMMI();
2120 const Function *Fn = MF.getFunction();
2121 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2122 bool DwarfCFI = !WindowsCFI &&
2123 (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
2125 // If we have any exception handlers in this function, and we adjust
2126 // the SP before calls, we may need to indicate this to the unwinder
2127 // using GNU_ARGS_SIZE. Note that this may be necessary even when
2128 // Amount == 0, because the preceding function may have set a non-0
2130 // TODO: We don't need to reset this between subsequent functions,
2131 // if it didn't change.
2132 bool HasDwarfEHHandlers = !WindowsCFI &&
2133 !MF.getMMI().getLandingPads().empty();
2135 if (HasDwarfEHHandlers && !isDestroy &&
2136 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
2137 BuildCFI(MBB, I, DL,
2138 MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
2143 // Factor out the amount that gets handled inside the sequence
2144 // (Pushes of argument for frame setup, callee pops for frame destroy)
2145 Amount -= InternalAmt;
2147 // If this is a callee-pop calling convention, and we're emitting precise
2148 // SP-based CFI, emit a CFA adjust for the amount the callee popped.
2149 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF) &&
2150 MMI.usePreciseUnwindInfo())
2151 BuildCFI(MBB, I, DL,
2152 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
2155 // Add Amount to SP to destroy a frame, and subtract to setup.
2156 int Offset = isDestroy ? Amount : -Amount;
2158 if (!(Fn->optForMinSize() &&
2159 adjustStackWithPops(MBB, I, DL, Offset)))
2160 BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
2163 if (DwarfCFI && !hasFP(MF)) {
2164 // If we don't have FP, but need to generate unwind information,
2165 // we need to set the correct CFA offset after the stack adjustment.
2166 // How much we adjust the CFA offset depends on whether we're emitting
2167 // CFI only for EH purposes or for debugging. EH only requires the CFA
2168 // offset to be correct at each call site, while for debugging we want
2169 // it to be more precise.
2170 int CFAOffset = Amount;
2171 if (!MMI.usePreciseUnwindInfo())
2172 CFAOffset += InternalAmt;
2173 CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
2174 BuildCFI(MBB, I, DL,
2175 MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
2181 if (isDestroy && InternalAmt) {
2182 // If we are performing frame pointer elimination and if the callee pops
2183 // something off the stack pointer, add it back. We do this until we have
2184 // more advanced stack pointer tracking ability.
2185 // We are not tracking the stack pointer adjustment by the callee, so make
2186 // sure we restore the stack pointer immediately after the call, there may
2187 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
2188 MachineBasicBlock::iterator B = MBB.begin();
2189 while (I != B && !std::prev(I)->isCall())
2191 BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
2195 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2196 assert(MBB.getParent() && "Block is not attached to a function!");
2198 // Win64 has strict requirements in terms of epilogue and we are
2199 // not taking a chance at messing with them.
2200 // I.e., unless this block is already an exit block, we can't use
2201 // it as an epilogue.
2202 if (MBB.getParent()->getSubtarget<X86Subtarget>().isTargetWin64() &&
2203 !MBB.succ_empty() && !MBB.isReturnBlock())
2206 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
2209 // If we cannot use LEA to adjust SP, we may need to use ADD, which
2210 // clobbers the EFLAGS. Check that none of the terminators reads the
2211 // EFLAGS, and if one uses it, conservatively assume this is not
2212 // safe to insert the epilogue here.
2213 return !terminatorsNeedFlagsAsInput(MBB);
2216 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
2217 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
2218 DebugLoc DL, bool RestoreSP) const {
2219 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
2220 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
2221 assert(STI.is32Bit() && !Uses64BitFramePtr &&
2222 "restoring EBP/ESI on non-32-bit target");
2224 MachineFunction &MF = *MBB.getParent();
2225 unsigned FramePtr = TRI->getFrameRegister(MF);
2226 unsigned BasePtr = TRI->getBaseRegister();
2227 MachineModuleInfo &MMI = MF.getMMI();
2228 const Function *Fn = MF.getFunction();
2229 WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(Fn);
2230 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2231 MachineFrameInfo *MFI = MF.getFrameInfo();
2233 // FIXME: Don't set FrameSetup flag in catchret case.
2235 int FI = FuncInfo.EHRegNodeFrameIndex;
2236 int EHRegSize = MFI->getObjectSize(FI);
2239 // MOV32rm -EHRegSize(%ebp), %esp
2240 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
2241 X86::EBP, true, -EHRegSize)
2242 .setMIFlag(MachineInstr::FrameSetup);
2246 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg);
2247 int EndOffset = -EHRegOffset - EHRegSize;
2248 FuncInfo.EHRegNodeEndOffset = EndOffset;
2250 if (UsedReg == FramePtr) {
2251 // ADD $offset, %ebp
2252 unsigned ADDri = getADDriOpcode(false, EndOffset);
2253 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
2256 .setMIFlag(MachineInstr::FrameSetup)
2259 assert(EndOffset >= 0 &&
2260 "end of registration object above normal EBP position!");
2261 } else if (UsedReg == BasePtr) {
2262 // LEA offset(%ebp), %esi
2263 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
2264 FramePtr, false, EndOffset)
2265 .setMIFlag(MachineInstr::FrameSetup);
2266 // MOV32rm SavedEBPOffset(%esi), %ebp
2267 assert(X86FI->getHasSEHFramePtrSave());
2269 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
2270 assert(UsedReg == BasePtr);
2271 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
2272 UsedReg, true, Offset)
2273 .setMIFlag(MachineInstr::FrameSetup);
2275 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
2280 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
2281 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
2282 unsigned Offset = 16;
2283 // RBP is immediately pushed.
2285 // All callee-saved registers are then pushed.
2286 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
2287 // Every funclet allocates enough stack space for the largest outgoing call.
2288 Offset += getWinEHFuncletFrameSize(MF);