1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/Analysis/LibCallSemantics.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/Target/TargetOptions.h"
33 #include "llvm/Support/Debug.h"
38 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
39 unsigned StackAlignOverride)
40 : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
41 STI.is64Bit() ? -8 : -4),
42 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
43 // Cache a bunch of frame-related predicates for this subtarget.
44 SlotSize = TRI->getSlotSize();
45 Is64Bit = STI.is64Bit();
46 IsLP64 = STI.isTarget64BitLP64();
47 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
48 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
49 StackPtr = TRI->getStackRegister();
52 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
53 return !MF.getFrameInfo()->hasVarSizedObjects() &&
54 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
57 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
58 /// call frame pseudos can be simplified. Having a FP, as in the default
59 /// implementation, is not sufficient here since we can't always use it.
60 /// Use a more nuanced condition.
62 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
63 return hasReservedCallFrame(MF) ||
64 (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
65 TRI->hasBasePointer(MF);
68 // needsFrameIndexResolution - Do we need to perform FI resolution for
69 // this function. Normally, this is required only when the function
70 // has any stack objects. However, FI resolution actually has another job,
71 // not apparent from the title - it resolves callframesetup/destroy
72 // that were not simplified earlier.
73 // So, this is required for x86 functions that have push sequences even
74 // when there are no stack objects.
76 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
77 return MF.getFrameInfo()->hasStackObjects() ||
78 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
81 /// hasFP - Return true if the specified function should have a dedicated frame
82 /// pointer register. This is true if the function has variable sized allocas
83 /// or if frame pointer elimination is disabled.
84 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
85 const MachineFrameInfo *MFI = MF.getFrameInfo();
86 const MachineModuleInfo &MMI = MF.getMMI();
88 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
89 TRI->needsStackRealignment(MF) ||
90 MFI->hasVarSizedObjects() ||
91 MFI->isFrameAddressTaken() || MFI->hasOpaqueSPAdjustment() ||
92 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
93 MMI.callsUnwindInit() || MMI.hasEHFunclets() || MMI.callsEHReturn() ||
94 MFI->hasStackMap() || MFI->hasPatchPoint());
97 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
100 return X86::SUB64ri8;
101 return X86::SUB64ri32;
104 return X86::SUB32ri8;
109 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
112 return X86::ADD64ri8;
113 return X86::ADD64ri32;
116 return X86::ADD32ri8;
121 static unsigned getSUBrrOpcode(unsigned isLP64) {
122 return isLP64 ? X86::SUB64rr : X86::SUB32rr;
125 static unsigned getADDrrOpcode(unsigned isLP64) {
126 return isLP64 ? X86::ADD64rr : X86::ADD32rr;
129 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
132 return X86::AND64ri8;
133 return X86::AND64ri32;
136 return X86::AND32ri8;
140 static unsigned getLEArOpcode(unsigned IsLP64) {
141 return IsLP64 ? X86::LEA64r : X86::LEA32r;
144 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
145 /// when it reaches the "return" instruction. We can then pop a stack object
146 /// to this register without worry about clobbering it.
147 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
148 MachineBasicBlock::iterator &MBBI,
149 const TargetRegisterInfo *TRI,
151 const MachineFunction *MF = MBB.getParent();
152 const Function *F = MF->getFunction();
153 if (!F || MF->getMMI().callsEHReturn())
156 static const uint16_t CallerSavedRegs32Bit[] = {
157 X86::EAX, X86::EDX, X86::ECX, 0
160 static const uint16_t CallerSavedRegs64Bit[] = {
161 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
162 X86::R8, X86::R9, X86::R10, X86::R11, 0
165 unsigned Opc = MBBI->getOpcode();
172 case X86::TCRETURNdi:
173 case X86::TCRETURNri:
174 case X86::TCRETURNmi:
175 case X86::TCRETURNdi64:
176 case X86::TCRETURNri64:
177 case X86::TCRETURNmi64:
179 case X86::EH_RETURN64: {
180 SmallSet<uint16_t, 8> Uses;
181 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
182 MachineOperand &MO = MBBI->getOperand(i);
183 if (!MO.isReg() || MO.isDef())
185 unsigned Reg = MO.getReg();
188 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
192 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
194 if (!Uses.count(*CS))
202 static bool isEAXLiveIn(MachineFunction &MF) {
203 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
204 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
205 unsigned Reg = II->first;
207 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
208 Reg == X86::AH || Reg == X86::AL)
215 /// Check whether or not the terminators of \p MBB needs to read EFLAGS.
216 static bool terminatorsNeedFlagsAsInput(const MachineBasicBlock &MBB) {
217 for (const MachineInstr &MI : MBB.terminators()) {
218 bool BreakNext = false;
219 for (const MachineOperand &MO : MI.operands()) {
222 unsigned Reg = MO.getReg();
223 if (Reg != X86::EFLAGS)
226 // This terminator needs an eflag that is not defined
227 // by a previous terminator.
238 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
239 /// stack pointer by a constant value.
240 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
241 MachineBasicBlock::iterator &MBBI,
242 int64_t NumBytes, bool InEpilogue) const {
243 bool isSub = NumBytes < 0;
244 uint64_t Offset = isSub ? -NumBytes : NumBytes;
246 uint64_t Chunk = (1LL << 31) - 1;
247 DebugLoc DL = MBB.findDebugLoc(MBBI);
250 if (Offset > Chunk) {
251 // Rather than emit a long series of instructions for large offsets,
252 // load the offset into a register and do one sub/add
255 if (isSub && !isEAXLiveIn(*MBB.getParent()))
256 Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
258 Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
261 unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
262 BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
265 ? getSUBrrOpcode(Is64Bit)
266 : getADDrrOpcode(Is64Bit);
267 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
270 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
276 uint64_t ThisVal = std::min(Offset, Chunk);
277 if (ThisVal == (Is64Bit ? 8 : 4)) {
278 // Use push / pop instead.
280 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
281 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
284 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
285 : (Is64Bit ? X86::POP64r : X86::POP32r);
286 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
287 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
289 MI->setFlag(MachineInstr::FrameSetup);
291 MI->setFlag(MachineInstr::FrameDestroy);
297 MachineInstrBuilder MI = BuildStackAdjustment(
298 MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
300 MI.setMIFlag(MachineInstr::FrameSetup);
302 MI.setMIFlag(MachineInstr::FrameDestroy);
308 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
309 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
310 int64_t Offset, bool InEpilogue) const {
311 assert(Offset != 0 && "zero offset stack adjustment requested");
313 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
317 UseLEA = STI.useLeaForSP();
319 // If we can use LEA for SP but we shouldn't, check that none
320 // of the terminators uses the eflags. Otherwise we will insert
321 // a ADD that will redefine the eflags and break the condition.
322 // Alternatively, we could move the ADD, but this may not be possible
323 // and is an optimization anyway.
324 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
325 if (UseLEA && !STI.useLeaForSP())
326 UseLEA = terminatorsNeedFlagsAsInput(MBB);
327 // If that assert breaks, that means we do not do the right thing
328 // in canUseAsEpilogue.
329 assert((UseLEA || !terminatorsNeedFlagsAsInput(MBB)) &&
330 "We shouldn't have allowed this insertion point");
333 MachineInstrBuilder MI;
335 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
336 TII.get(getLEArOpcode(Uses64BitFramePtr)),
338 StackPtr, false, Offset);
340 bool IsSub = Offset < 0;
341 uint64_t AbsOffset = IsSub ? -Offset : Offset;
342 unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
343 : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
344 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
347 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
352 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
353 MachineBasicBlock::iterator &MBBI,
354 bool doMergeWithPrevious) const {
355 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
356 (!doMergeWithPrevious && MBBI == MBB.end()))
359 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
360 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
362 unsigned Opc = PI->getOpcode();
365 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
366 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
367 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
368 PI->getOperand(0).getReg() == StackPtr){
369 Offset += PI->getOperand(2).getImm();
371 if (!doMergeWithPrevious) MBBI = NI;
372 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
373 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
374 PI->getOperand(0).getReg() == StackPtr) {
375 Offset -= PI->getOperand(2).getImm();
377 if (!doMergeWithPrevious) MBBI = NI;
383 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
384 MachineBasicBlock::iterator MBBI, DebugLoc DL,
385 MCCFIInstruction CFIInst) const {
386 MachineFunction &MF = *MBB.getParent();
387 unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
388 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
389 .addCFIIndex(CFIIndex);
393 X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
394 MachineBasicBlock::iterator MBBI,
396 MachineFunction &MF = *MBB.getParent();
397 MachineFrameInfo *MFI = MF.getFrameInfo();
398 MachineModuleInfo &MMI = MF.getMMI();
399 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
401 // Add callee saved registers to move list.
402 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
403 if (CSI.empty()) return;
405 // Calculate offsets.
406 for (std::vector<CalleeSavedInfo>::const_iterator
407 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
408 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
409 unsigned Reg = I->getReg();
411 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
412 BuildCFI(MBB, MBBI, DL,
413 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
417 /// usesTheStack - This function checks if any of the users of EFLAGS
418 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
419 /// to use the stack, and if we don't adjust the stack we clobber the first
421 /// See X86InstrInfo::copyPhysReg.
422 static bool usesTheStack(const MachineFunction &MF) {
423 const MachineRegisterInfo &MRI = MF.getRegInfo();
425 for (MachineRegisterInfo::reg_instr_iterator
426 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
434 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
435 MachineBasicBlock &MBB,
436 MachineBasicBlock::iterator MBBI,
438 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
442 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
444 CallOp = X86::CALLpcrel32;
448 if (STI.isTargetCygMing()) {
449 Symbol = "___chkstk_ms";
453 } else if (STI.isTargetCygMing())
458 MachineInstrBuilder CI;
460 // All current stack probes take AX and SP as input, clobber flags, and
461 // preserve all registers. x86_64 probes leave RSP unmodified.
462 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
463 // For the large code model, we have to call through a register. Use R11,
464 // as it is scratch in all supported calling conventions.
465 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
466 .addExternalSymbol(Symbol);
467 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
469 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
472 unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
473 unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
474 CI.addReg(AX, RegState::Implicit)
475 .addReg(SP, RegState::Implicit)
476 .addReg(AX, RegState::Define | RegState::Implicit)
477 .addReg(SP, RegState::Define | RegState::Implicit)
478 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
481 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
482 // themselves. It also does not clobber %rax so we can reuse it when
484 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
490 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
491 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
492 // and might require smaller successive adjustments.
493 const uint64_t Win64MaxSEHOffset = 128;
494 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
495 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
496 return SEHFrameOffset & -16;
499 // If we're forcing a stack realignment we can't rely on just the frame
500 // info, we need to know the ABI stack alignment as well in case we
501 // have a call out. Otherwise just make sure we have some alignment - we'll
502 // go with the minimum SlotSize.
503 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
504 const MachineFrameInfo *MFI = MF.getFrameInfo();
505 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
506 unsigned StackAlign = getStackAlignment();
507 if (MF.getFunction()->hasFnAttribute("stackrealign")) {
509 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
510 else if (MaxAlign < SlotSize)
516 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
517 MachineBasicBlock::iterator MBBI,
518 DebugLoc DL, unsigned Reg,
519 uint64_t MaxAlign) const {
520 uint64_t Val = -MaxAlign;
521 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
522 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
525 .setMIFlag(MachineInstr::FrameSetup);
527 // The EFLAGS implicit def is dead.
528 MI->getOperand(3).setIsDead();
531 /// emitPrologue - Push callee-saved registers onto the stack, which
532 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
533 /// space for local variables. Also emit labels used by the exception handler to
534 /// generate the exception handling frames.
537 Here's a gist of what gets emitted:
539 ; Establish frame pointer, if needed
542 .cfi_def_cfa_offset 16
543 .cfi_offset %rbp, -16
546 .cfi_def_cfa_register %rbp
548 ; Spill general-purpose registers
549 [for all callee-saved GPRs]
552 .cfi_def_cfa_offset (offset from RETADDR)
555 ; If the required stack alignment > default stack alignment
556 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
557 ; of unknown size in the stack frame.
558 [if stack needs re-alignment]
561 ; Allocate space for locals
562 [if target is Windows and allocated space > 4096 bytes]
563 ; Windows needs special care for allocations larger
566 call ___chkstk_ms/___chkstk
572 .seh_stackalloc (size of XMM spill slots)
573 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
578 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
579 ; they may get spilled on any platform, if the current function
580 ; calls @llvm.eh.unwind.init
582 [for all callee-saved XMM registers]
583 movaps %<xmm reg>, -MMM(%rbp)
584 [for all callee-saved XMM registers]
585 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
586 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
588 [for all callee-saved XMM registers]
589 movaps %<xmm reg>, KKK(%rsp)
590 [for all callee-saved XMM registers]
591 .seh_savexmm %<xmm reg>, KKK
595 [if needs base pointer]
597 [if needs to restore base pointer]
602 [for all callee-saved registers]
603 .cfi_offset %<reg>, (offset from %rbp)
605 .cfi_def_cfa_offset (offset from RETADDR)
606 [for all callee-saved registers]
607 .cfi_offset %<reg>, (offset from %rsp)
610 - .seh directives are emitted only for Windows 64 ABI
611 - .cfi directives are emitted for all other ABIs
612 - for 32-bit code, substitute %e?? registers for %r??
615 void X86FrameLowering::emitPrologue(MachineFunction &MF,
616 MachineBasicBlock &MBB) const {
617 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
618 "MF used frame lowering for wrong subtarget");
619 MachineBasicBlock::iterator MBBI = MBB.begin();
620 MachineFrameInfo *MFI = MF.getFrameInfo();
621 const Function *Fn = MF.getFunction();
622 MachineModuleInfo &MMI = MF.getMMI();
623 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
624 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
625 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
626 bool IsFunclet = MBB.isEHFuncletEntry();
629 classifyEHPersonality(Fn->getPersonalityFn()) == EHPersonality::CoreCLR;
630 bool HasFP = hasFP(MF);
631 bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
632 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
633 bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
635 !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
636 unsigned FramePtr = TRI->getFrameRegister(MF);
637 const unsigned MachineFramePtr =
638 STI.isTarget64BitILP32()
639 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
641 unsigned BasePtr = TRI->getBaseRegister();
643 // Debug location must be unknown since the first debug location is used
644 // to determine the end of the prologue.
647 // Add RETADDR move area to callee saved frame size.
648 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
649 if (TailCallReturnAddrDelta && IsWin64Prologue)
650 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
652 if (TailCallReturnAddrDelta < 0)
653 X86FI->setCalleeSavedFrameSize(
654 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
656 bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
658 // The default stack probe size is 4096 if the function has no stackprobesize
660 unsigned StackProbeSize = 4096;
661 if (Fn->hasFnAttribute("stack-probe-size"))
662 Fn->getFnAttribute("stack-probe-size")
664 .getAsInteger(0, StackProbeSize);
666 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
667 // function, and use up to 128 bytes of stack space, don't have a frame
668 // pointer, calls, or dynamic alloca then we do not need to adjust the
669 // stack pointer (we fit in the Red Zone). We also check that we don't
670 // push and pop from the stack.
671 if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
672 !TRI->needsStackRealignment(MF) &&
673 !MFI->hasVarSizedObjects() && // No dynamic alloca.
674 !MFI->adjustsStack() && // No calls.
675 !IsWin64CC && // Win64 has no Red Zone
676 !usesTheStack(MF) && // Don't push and pop.
677 !MF.shouldSplitStack()) { // Regular stack
678 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
679 if (HasFP) MinSize += SlotSize;
680 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
681 MFI->setStackSize(StackSize);
684 // Insert stack pointer adjustment for later moving of return addr. Only
685 // applies to tail call optimized functions where the callee argument stack
686 // size is bigger than the callers.
687 if (TailCallReturnAddrDelta < 0) {
688 BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
689 /*InEpilogue=*/false)
690 .setMIFlag(MachineInstr::FrameSetup);
693 // Mapping for machine moves:
695 // DST: VirtualFP AND
696 // SRC: VirtualFP => DW_CFA_def_cfa_offset
697 // ELSE => DW_CFA_def_cfa
699 // SRC: VirtualFP AND
700 // DST: Register => DW_CFA_def_cfa_register
703 // OFFSET < 0 => DW_CFA_offset_extended_sf
704 // REG < 64 => DW_CFA_offset + Reg
705 // ELSE => DW_CFA_offset_extended
707 uint64_t NumBytes = 0;
708 int stackGrowth = -SlotSize;
710 // Find the funclet establisher parameter
711 unsigned Establisher = X86::NoRegister;
713 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
715 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
717 if (IsWin64Prologue && IsFunclet & !IsClrFunclet) {
718 // Immediately spill establisher into the home slot.
719 // The runtime cares about this.
720 // MOV64mr %rdx, 16(%rsp)
721 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
722 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
724 .setMIFlag(MachineInstr::FrameSetup);
725 MBB.addLiveIn(Establisher);
729 // Calculate required stack adjustment.
730 uint64_t FrameSize = StackSize - SlotSize;
731 // If required, include space for extra hidden slot for stashing base pointer.
732 if (X86FI->getRestoreBasePointer())
733 FrameSize += SlotSize;
735 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
737 // Callee-saved registers are pushed on stack before the stack is realigned.
738 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
739 NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
741 // Get the offset of the stack slot for the EBP register, which is
742 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
743 // Update the frame offset adjustment.
745 MFI->setOffsetAdjustment(-NumBytes);
747 assert(MFI->getOffsetAdjustment() == -(int)NumBytes &&
748 "should calculate same local variable offset for funclets");
750 // Save EBP/RBP into the appropriate stack slot.
751 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
752 .addReg(MachineFramePtr, RegState::Kill)
753 .setMIFlag(MachineInstr::FrameSetup);
756 // Mark the place where EBP/RBP was saved.
757 // Define the current CFA rule to use the provided offset.
759 BuildCFI(MBB, MBBI, DL,
760 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
762 // Change the rule for the FramePtr to be an "offset" rule.
763 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
764 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
765 nullptr, DwarfFramePtr, 2 * stackGrowth));
769 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
771 .setMIFlag(MachineInstr::FrameSetup);
774 if (!IsWin64Prologue && !IsFunclet) {
775 // Update EBP with the new base value.
776 BuildMI(MBB, MBBI, DL,
777 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
780 .setMIFlag(MachineInstr::FrameSetup);
783 // Mark effective beginning of when frame pointer becomes valid.
784 // Define the current CFA to use the EBP/RBP register.
785 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
786 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
787 nullptr, DwarfFramePtr));
791 // Mark the FramePtr as live-in in every block. Don't do this again for
792 // funclet prologues.
794 for (MachineBasicBlock &EveryMBB : MF)
795 EveryMBB.addLiveIn(MachineFramePtr);
798 assert(!IsFunclet && "funclets without FPs not yet implemented");
799 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
802 // For EH funclets, only allocate enough space for outgoing calls. Save the
803 // NumBytes value that we would've used for the parent frame.
804 unsigned ParentFrameNumBytes = NumBytes;
806 NumBytes = getWinEHFuncletFrameSize(MF);
808 // Skip the callee-saved push instructions.
809 bool PushedRegs = false;
810 int StackOffset = 2 * stackGrowth;
812 while (MBBI != MBB.end() &&
813 MBBI->getFlag(MachineInstr::FrameSetup) &&
814 (MBBI->getOpcode() == X86::PUSH32r ||
815 MBBI->getOpcode() == X86::PUSH64r)) {
817 unsigned Reg = MBBI->getOperand(0).getReg();
820 if (!HasFP && NeedsDwarfCFI) {
821 // Mark callee-saved push instruction.
822 // Define the current CFA rule to use the provided offset.
824 BuildCFI(MBB, MBBI, DL,
825 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
826 StackOffset += stackGrowth;
830 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
831 MachineInstr::FrameSetup);
835 // Realign stack after we pushed callee-saved registers (so that we'll be
836 // able to calculate their offsets from the frame pointer).
837 // Don't do this for Win64, it needs to realign the stack after the prologue.
838 if (!IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) {
839 assert(HasFP && "There should be a frame pointer if stack is realigned.");
840 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
843 // If there is an SUB32ri of ESP immediately before this instruction, merge
844 // the two. This can be the case when tail call elimination is enabled and
845 // the callee has more arguments then the caller.
846 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
848 // Adjust stack pointer: ESP -= numbytes.
850 // Windows and cygwin/mingw require a prologue helper routine when allocating
851 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
852 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
853 // stack and adjust the stack pointer in one go. The 64-bit version of
854 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
855 // responsible for adjusting the stack pointer. Touching the stack at 4K
856 // increments is necessary to ensure that the guard pages used by the OS
857 // virtual memory manager are allocated in correct sequence.
858 uint64_t AlignedNumBytes = NumBytes;
859 if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
860 AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
861 if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
862 // Check whether EAX is livein for this function.
863 bool isEAXAlive = isEAXLiveIn(MF);
866 // Sanity check that EAX is not livein for this function.
867 // It should not be, so throw an assert.
868 assert(!Is64Bit && "EAX is livein in x64 case!");
871 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
872 .addReg(X86::EAX, RegState::Kill)
873 .setMIFlag(MachineInstr::FrameSetup);
877 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
878 // Function prologue is responsible for adjusting the stack pointer.
879 if (isUInt<32>(NumBytes)) {
880 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
882 .setMIFlag(MachineInstr::FrameSetup);
883 } else if (isInt<32>(NumBytes)) {
884 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
886 .setMIFlag(MachineInstr::FrameSetup);
888 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
890 .setMIFlag(MachineInstr::FrameSetup);
893 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
894 // We'll also use 4 already allocated bytes for EAX.
895 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
896 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
897 .setMIFlag(MachineInstr::FrameSetup);
900 // Save a pointer to the MI where we set AX.
901 MachineBasicBlock::iterator SetRAX = MBBI;
904 // Call __chkstk, __chkstk_ms, or __alloca.
905 emitStackProbeCall(MF, MBB, MBBI, DL);
907 // Apply the frame setup flag to all inserted instrs.
908 for (; SetRAX != MBBI; ++SetRAX)
909 SetRAX->setFlag(MachineInstr::FrameSetup);
913 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
915 StackPtr, false, NumBytes - 4);
916 MI->setFlag(MachineInstr::FrameSetup);
917 MBB.insert(MBBI, MI);
919 } else if (NumBytes) {
920 emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
923 if (NeedsWinCFI && NumBytes)
924 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
926 .setMIFlag(MachineInstr::FrameSetup);
928 int SEHFrameOffset = 0;
929 unsigned SPOrEstablisher = IsFunclet ? Establisher : StackPtr;
930 if (IsWin64Prologue && HasFP) {
931 // Set RBP to a small fixed offset from RSP. In the funclet case, we base
932 // this calculation on the incoming establisher, which holds the value of
933 // RSP from the parent frame at the end of the prologue.
934 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
936 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
937 SPOrEstablisher, false, SEHFrameOffset);
939 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
940 .addReg(SPOrEstablisher);
942 // If this is not a funclet, emit the CFI describing our frame pointer.
943 if (NeedsWinCFI && !IsFunclet)
944 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
946 .addImm(SEHFrameOffset)
947 .setMIFlag(MachineInstr::FrameSetup);
948 } else if (IsFunclet && STI.is32Bit()) {
949 // Reset EBP / ESI to something good for funclets.
950 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
953 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
954 const MachineInstr *FrameInstr = &*MBBI;
959 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
960 if (X86::FR64RegClass.contains(Reg)) {
961 unsigned IgnoredFrameReg;
962 int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg);
963 Offset += SEHFrameOffset;
965 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
968 .setMIFlag(MachineInstr::FrameSetup);
975 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
976 .setMIFlag(MachineInstr::FrameSetup);
978 // Realign stack after we spilled callee-saved registers (so that we'll be
979 // able to calculate their offsets from the frame pointer).
980 // Win64 requires aligning the stack after the prologue.
981 if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
982 assert(HasFP && "There should be a frame pointer if stack is realigned.");
983 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
986 // We already dealt with stack realignment and funclets above.
987 if (IsFunclet && STI.is32Bit())
990 // If we need a base pointer, set it up here. It's whatever the value
991 // of the stack pointer is at this point. Any variable size objects
992 // will be allocated after this, so we can still use the base pointer
993 // to reference locals.
994 if (TRI->hasBasePointer(MF)) {
995 // Update the base pointer with the current stack pointer.
996 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
997 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
998 .addReg(SPOrEstablisher)
999 .setMIFlag(MachineInstr::FrameSetup);
1000 if (X86FI->getRestoreBasePointer()) {
1001 // Stash value of base pointer. Saving RSP instead of EBP shortens
1002 // dependence chain. Used by SjLj EH.
1003 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1004 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
1005 FramePtr, true, X86FI->getRestoreBasePointerOffset())
1006 .addReg(SPOrEstablisher)
1007 .setMIFlag(MachineInstr::FrameSetup);
1010 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
1011 // Stash the value of the frame pointer relative to the base pointer for
1012 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1013 // it recovers the frame pointer from the base pointer rather than the
1014 // other way around.
1015 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1018 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
1019 assert(UsedReg == BasePtr);
1020 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1022 .setMIFlag(MachineInstr::FrameSetup);
1026 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1027 // Mark end of stack pointer adjustment.
1028 if (!HasFP && NumBytes) {
1029 // Define the current CFA rule to use the provided offset.
1031 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
1032 nullptr, -StackSize + stackGrowth));
1035 // Emit DWARF info specifying the offsets of the callee-saved registers.
1037 emitCalleeSavedFrameMoves(MBB, MBBI, DL);
1041 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1042 const MachineFunction &MF) const {
1043 // We can't use LEA instructions for adjusting the stack pointer if this is a
1044 // leaf function in the Win64 ABI. Only ADD instructions may be used to
1045 // deallocate the stack.
1046 // This means that we can use LEA for SP in two situations:
1047 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1048 // 2. We *have* a frame pointer which means we are permitted to use LEA.
1049 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1052 static bool isFuncletReturnInstr(MachineInstr *MI) {
1053 switch (MI->getOpcode()) {
1055 case X86::CLEANUPRET:
1060 llvm_unreachable("impossible");
1063 unsigned X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1064 // This is the size of the pushed CSRs.
1066 MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
1067 // This is the amount of stack a funclet needs to allocate.
1068 unsigned MaxCallSize = MF.getFrameInfo()->getMaxCallFrameSize();
1069 // RBP is not included in the callee saved register block. After pushing RBP,
1070 // everything is 16 byte aligned. Everything we allocate before an outgoing
1071 // call must also be 16 byte aligned.
1072 unsigned FrameSizeMinusRBP =
1073 RoundUpToAlignment(CSSize + MaxCallSize, getStackAlignment());
1074 // Subtract out the size of the callee saved registers. This is how much stack
1075 // each funclet will allocate.
1076 return FrameSizeMinusRBP - CSSize;
1079 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1080 MachineBasicBlock &MBB) const {
1081 const MachineFrameInfo *MFI = MF.getFrameInfo();
1082 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1083 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1085 if (MBBI != MBB.end())
1086 DL = MBBI->getDebugLoc();
1087 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
1088 const bool Is64BitILP32 = STI.isTarget64BitILP32();
1089 unsigned FramePtr = TRI->getFrameRegister(MF);
1090 unsigned MachineFramePtr =
1091 Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
1094 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1096 IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
1097 bool IsFunclet = isFuncletReturnInstr(MBBI);
1098 MachineBasicBlock *TargetMBB = nullptr;
1100 // Get the number of bytes to allocate from the FrameInfo.
1101 uint64_t StackSize = MFI->getStackSize();
1102 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1103 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1104 uint64_t NumBytes = 0;
1106 if (MBBI->getOpcode() == X86::CATCHRET) {
1107 // SEH shouldn't use catchret.
1108 assert(!isAsynchronousEHPersonality(
1109 classifyEHPersonality(MF.getFunction()->getPersonalityFn())) &&
1110 "SEH should not use CATCHRET");
1112 NumBytes = getWinEHFuncletFrameSize(MF);
1113 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1114 TargetMBB = MBBI->getOperand(0).getMBB();
1117 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1119 .setMIFlag(MachineInstr::FrameDestroy);
1120 } else if (MBBI->getOpcode() == X86::CLEANUPRET) {
1121 NumBytes = getWinEHFuncletFrameSize(MF);
1122 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1123 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1125 .setMIFlag(MachineInstr::FrameDestroy);
1126 } else if (hasFP(MF)) {
1127 // Calculate required stack adjustment.
1128 uint64_t FrameSize = StackSize - SlotSize;
1129 NumBytes = FrameSize - CSSize;
1131 // Callee-saved registers were pushed on stack before the stack was
1133 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1134 NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
1137 BuildMI(MBB, MBBI, DL,
1138 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr)
1139 .setMIFlag(MachineInstr::FrameDestroy);
1141 NumBytes = StackSize - CSSize;
1143 uint64_t SEHStackAllocAmt = NumBytes;
1145 // Skip the callee-saved pop instructions.
1146 while (MBBI != MBB.begin()) {
1147 MachineBasicBlock::iterator PI = std::prev(MBBI);
1148 unsigned Opc = PI->getOpcode();
1150 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1151 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1152 Opc != X86::DBG_VALUE && !PI->isTerminator())
1157 MachineBasicBlock::iterator FirstCSPop = MBBI;
1160 // Fill EAX/RAX with the address of the target block.
1161 unsigned ReturnReg = STI.is64Bit() ? X86::RAX : X86::EAX;
1162 if (STI.is64Bit()) {
1163 // LEA64r TargetMBB(%rip), %rax
1164 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::LEA64r), ReturnReg)
1171 // MOV32ri $TargetMBB, %eax
1172 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::MOV32ri), ReturnReg)
1175 // Record that we've taken the address of TargetMBB and no longer just
1176 // reference it in a terminator.
1177 TargetMBB->setHasAddressTaken();
1180 if (MBBI != MBB.end())
1181 DL = MBBI->getDebugLoc();
1183 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1184 // instruction, merge the two instructions.
1185 if (NumBytes || MFI->hasVarSizedObjects())
1186 NumBytes += mergeSPUpdates(MBB, MBBI, true);
1188 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1189 // slot before popping them off! Same applies for the case, when stack was
1190 // realigned. Don't do this if this was a funclet epilogue, since the funclets
1191 // will not do realignment or dynamic stack allocation.
1192 if ((TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) &&
1194 if (TRI->needsStackRealignment(MF))
1196 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
1197 uint64_t LEAAmount =
1198 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
1200 // There are only two legal forms of epilogue:
1201 // - add SEHAllocationSize, %rsp
1202 // - lea SEHAllocationSize(%FramePtr), %rsp
1204 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
1205 // However, we may use this sequence if we have a frame pointer because the
1206 // effects of the prologue can safely be undone.
1207 if (LEAAmount != 0) {
1208 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
1209 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
1210 FramePtr, false, LEAAmount);
1213 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
1214 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
1218 } else if (NumBytes) {
1219 // Adjust stack pointer back: ESP += numbytes.
1220 emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
1224 // Windows unwinder will not invoke function's exception handler if IP is
1225 // either in prologue or in epilogue. This behavior causes a problem when a
1226 // call immediately precedes an epilogue, because the return address points
1227 // into the epilogue. To cope with that, we insert an epilogue marker here,
1228 // then replace it with a 'nop' if it ends up immediately after a CALL in the
1229 // final emitted code.
1231 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
1233 // Add the return addr area delta back since we are not tail calling.
1234 int Offset = -1 * X86FI->getTCReturnAddrDelta();
1235 assert(Offset >= 0 && "TCDelta should never be positive");
1237 MBBI = MBB.getFirstTerminator();
1239 // Check for possible merge with preceding ADD instruction.
1240 Offset += mergeSPUpdates(MBB, MBBI, true);
1241 emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
1245 // NOTE: this only has a subset of the full frame index logic. In
1246 // particular, the FI < 0 and AfterFPPop logic is handled in
1247 // X86RegisterInfo::eliminateFrameIndex, but not here. Possibly
1248 // (probably?) it should be moved into here.
1249 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1250 unsigned &FrameReg) const {
1251 const MachineFrameInfo *MFI = MF.getFrameInfo();
1253 // We can't calculate offset from frame pointer if the stack is realigned,
1254 // so enforce usage of stack/base pointer. The base pointer is used when we
1255 // have dynamic allocas in addition to dynamic realignment.
1256 if (TRI->hasBasePointer(MF))
1257 FrameReg = TRI->getBaseRegister();
1258 else if (TRI->needsStackRealignment(MF))
1259 FrameReg = TRI->getStackRegister();
1261 FrameReg = TRI->getFrameRegister(MF);
1263 // Offset will hold the offset from the stack pointer at function entry to the
1265 // We need to factor in additional offsets applied during the prologue to the
1266 // frame, base, and stack pointer depending on which is used.
1267 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1268 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1269 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1270 uint64_t StackSize = MFI->getStackSize();
1271 bool HasFP = hasFP(MF);
1272 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1273 int64_t FPDelta = 0;
1275 if (IsWin64Prologue) {
1276 assert(!MFI->hasCalls() || (StackSize % 16) == 8);
1278 // Calculate required stack adjustment.
1279 uint64_t FrameSize = StackSize - SlotSize;
1280 // If required, include space for extra hidden slot for stashing base pointer.
1281 if (X86FI->getRestoreBasePointer())
1282 FrameSize += SlotSize;
1283 uint64_t NumBytes = FrameSize - CSSize;
1285 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
1286 if (FI && FI == X86FI->getFAIndex())
1287 return -SEHFrameOffset;
1289 // FPDelta is the offset from the "traditional" FP location of the old base
1290 // pointer followed by return address and the location required by the
1291 // restricted Win64 prologue.
1292 // Add FPDelta to all offsets below that go through the frame pointer.
1293 FPDelta = FrameSize - SEHFrameOffset;
1294 assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
1295 "FPDelta isn't aligned per the Win64 ABI!");
1299 if (TRI->hasBasePointer(MF)) {
1300 assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
1302 // Skip the saved EBP.
1303 return Offset + SlotSize + FPDelta;
1305 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1306 return Offset + StackSize;
1308 } else if (TRI->needsStackRealignment(MF)) {
1310 // Skip the saved EBP.
1311 return Offset + SlotSize + FPDelta;
1313 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1314 return Offset + StackSize;
1316 // FIXME: Support tail calls
1319 return Offset + StackSize;
1321 // Skip the saved EBP.
1324 // Skip the RETADDR move area
1325 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1326 if (TailCallReturnAddrDelta < 0)
1327 Offset -= TailCallReturnAddrDelta;
1330 return Offset + FPDelta;
1333 // Simplified from getFrameIndexReference keeping only StackPointer cases
1334 int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
1336 unsigned &FrameReg) const {
1337 const MachineFrameInfo *MFI = MF.getFrameInfo();
1338 // Does not include any dynamic realign.
1339 const uint64_t StackSize = MFI->getStackSize();
1342 // LLVM arranges the stack as follows:
1347 // PUSH RBP <-- RBP points here
1349 // ~~~~~~~ <-- optional stack realignment dynamic adjustment
1352 // ... <-- RSP after prologue points here
1354 // if (hasVarSizedObjects()):
1355 // ... <-- "base pointer" (ESI/RBX) points here
1357 // ... <-- RSP points here
1359 // Case 1: In the simple case of no stack realignment and no dynamic
1360 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
1361 // with fixed offsets from RSP.
1363 // Case 2: In the case of stack realignment with no dynamic allocas, fixed
1364 // stack objects are addressed with RBP and regular stack objects with RSP.
1366 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
1367 // to address stack arguments for outgoing calls and nothing else. The "base
1368 // pointer" points to local variables, and RBP points to fixed objects.
1370 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
1371 // answer we give is relative to the SP after the prologue, and not the
1372 // SP in the middle of the function.
1374 assert((!TRI->needsStackRealignment(MF) || !MFI->isFixedObjectIndex(FI)) &&
1375 "offset from fixed object to SP is not static");
1377 // We don't handle tail calls, and shouldn't be seeing them either.
1378 int TailCallReturnAddrDelta =
1379 MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
1380 assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
1384 // Fill in FrameReg output argument.
1385 FrameReg = TRI->getStackRegister();
1387 // This is how the math works out:
1389 // %rsp grows (i.e. gets lower) left to right. Each box below is
1390 // one word (eight bytes). Obj0 is the stack slot we're trying to
1393 // ----------------------------------
1394 // | BP | Obj0 | Obj1 | ... | ObjN |
1395 // ----------------------------------
1399 // A is the incoming stack pointer.
1400 // (B - A) is the local area offset (-8 for x86-64) [1]
1401 // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
1403 // |(E - B)| is the StackSize (absolute value, positive). For a
1404 // stack that grown down, this works out to be (B - E). [3]
1406 // E is also the value of %rsp after stack has been set up, and we
1407 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
1408 // (C - E) == (C - A) - (B - A) + (B - E)
1409 // { Using [1], [2] and [3] above }
1410 // == getObjectOffset - LocalAreaOffset + StackSize
1413 // Get the Offset from the StackPointer
1414 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1416 return Offset + StackSize;
1419 bool X86FrameLowering::assignCalleeSavedSpillSlots(
1420 MachineFunction &MF, const TargetRegisterInfo *TRI,
1421 std::vector<CalleeSavedInfo> &CSI) const {
1422 MachineFrameInfo *MFI = MF.getFrameInfo();
1423 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1425 unsigned CalleeSavedFrameSize = 0;
1426 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
1429 // emitPrologue always spills frame register the first thing.
1430 SpillSlotOffset -= SlotSize;
1431 MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1433 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
1434 // the frame register, we can delete it from CSI list and not have to worry
1435 // about avoiding it later.
1436 unsigned FPReg = TRI->getFrameRegister(MF);
1437 for (unsigned i = 0; i < CSI.size(); ++i) {
1438 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
1439 CSI.erase(CSI.begin() + i);
1445 // Assign slots for GPRs. It increases frame size.
1446 for (unsigned i = CSI.size(); i != 0; --i) {
1447 unsigned Reg = CSI[i - 1].getReg();
1449 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1452 SpillSlotOffset -= SlotSize;
1453 CalleeSavedFrameSize += SlotSize;
1455 int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1456 CSI[i - 1].setFrameIdx(SlotIndex);
1459 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
1461 // Assign slots for XMMs.
1462 for (unsigned i = CSI.size(); i != 0; --i) {
1463 unsigned Reg = CSI[i - 1].getReg();
1464 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1467 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1469 SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
1471 SpillSlotOffset -= RC->getSize();
1473 MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
1474 CSI[i - 1].setFrameIdx(SlotIndex);
1475 MFI->ensureMaxAlignment(RC->getAlignment());
1481 bool X86FrameLowering::spillCalleeSavedRegisters(
1482 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1483 const std::vector<CalleeSavedInfo> &CSI,
1484 const TargetRegisterInfo *TRI) const {
1485 DebugLoc DL = MBB.findDebugLoc(MI);
1487 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
1488 // for us, and there are no XMM CSRs on Win32.
1489 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
1492 // Push GPRs. It increases frame size.
1493 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1494 for (unsigned i = CSI.size(); i != 0; --i) {
1495 unsigned Reg = CSI[i - 1].getReg();
1497 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1499 // Add the callee-saved register as live-in. It's killed at the spill.
1502 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1503 .setMIFlag(MachineInstr::FrameSetup);
1506 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1507 // It can be done by spilling XMMs to stack frame.
1508 for (unsigned i = CSI.size(); i != 0; --i) {
1509 unsigned Reg = CSI[i-1].getReg();
1510 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1512 // Add the callee-saved register as live-in. It's killed at the spill.
1514 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1516 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
1519 MI->setFlag(MachineInstr::FrameSetup);
1526 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1527 MachineBasicBlock::iterator MI,
1528 const std::vector<CalleeSavedInfo> &CSI,
1529 const TargetRegisterInfo *TRI) const {
1533 if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
1534 // Don't restore CSRs in 32-bit EH funclets. Matches
1535 // spillCalleeSavedRegisters.
1538 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
1539 // funclets. emitEpilogue transforms these to normal jumps.
1540 if (MI->getOpcode() == X86::CATCHRET) {
1541 const Function *Func = MBB.getParent()->getFunction();
1542 bool IsSEH = isAsynchronousEHPersonality(
1543 classifyEHPersonality(Func->getPersonalityFn()));
1549 DebugLoc DL = MBB.findDebugLoc(MI);
1551 // Reload XMMs from stack frame.
1552 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1553 unsigned Reg = CSI[i].getReg();
1554 if (X86::GR64RegClass.contains(Reg) ||
1555 X86::GR32RegClass.contains(Reg))
1558 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1559 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
1563 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1564 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1565 unsigned Reg = CSI[i].getReg();
1566 if (!X86::GR64RegClass.contains(Reg) &&
1567 !X86::GR32RegClass.contains(Reg))
1570 BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
1571 .setMIFlag(MachineInstr::FrameDestroy);
1576 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
1577 BitVector &SavedRegs,
1578 RegScavenger *RS) const {
1579 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1581 MachineFrameInfo *MFI = MF.getFrameInfo();
1583 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1584 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1586 if (TailCallReturnAddrDelta < 0) {
1587 // create RETURNADDR area
1596 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1597 TailCallReturnAddrDelta - SlotSize, true);
1600 // Spill the BasePtr if it's used.
1601 if (TRI->hasBasePointer(MF)) {
1602 SavedRegs.set(TRI->getBaseRegister());
1604 // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
1605 if (MF.getMMI().hasEHFunclets()) {
1606 int FI = MFI->CreateSpillStackObject(SlotSize, SlotSize);
1607 X86FI->setHasSEHFramePtrSave(true);
1608 X86FI->setSEHFramePtrSaveIndex(FI);
1614 HasNestArgument(const MachineFunction *MF) {
1615 const Function *F = MF->getFunction();
1616 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1618 if (I->hasNestAttr())
1624 /// GetScratchRegister - Get a temp register for performing work in the
1625 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1626 /// and the properties of the function either one or two registers will be
1627 /// needed. Set primary to true for the first register, false for the second.
1629 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
1630 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1633 if (CallingConvention == CallingConv::HiPE) {
1635 return Primary ? X86::R14 : X86::R13;
1637 return Primary ? X86::EBX : X86::EDI;
1642 return Primary ? X86::R11 : X86::R12;
1644 return Primary ? X86::R11D : X86::R12D;
1647 bool IsNested = HasNestArgument(&MF);
1649 if (CallingConvention == CallingConv::X86_FastCall ||
1650 CallingConvention == CallingConv::Fast) {
1652 report_fatal_error("Segmented stacks does not support fastcall with "
1653 "nested function.");
1654 return Primary ? X86::EAX : X86::ECX;
1657 return Primary ? X86::EDX : X86::EAX;
1658 return Primary ? X86::ECX : X86::EAX;
1661 // The stack limit in the TCB is set to this many bytes above the actual stack
1663 static const uint64_t kSplitStackAvailable = 256;
1665 void X86FrameLowering::adjustForSegmentedStacks(
1666 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
1667 MachineFrameInfo *MFI = MF.getFrameInfo();
1669 unsigned TlsReg, TlsOffset;
1672 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1673 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1674 "Scratch register is live-in");
1676 if (MF.getFunction()->isVarArg())
1677 report_fatal_error("Segmented stacks do not support vararg functions.");
1678 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
1679 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
1680 !STI.isTargetDragonFly())
1681 report_fatal_error("Segmented stacks not supported on this platform.");
1683 // Eventually StackSize will be calculated by a link-time pass; which will
1684 // also decide whether checking code needs to be injected into this particular
1686 StackSize = MFI->getStackSize();
1688 // Do not generate a prologue for functions with a stack of size zero
1692 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1693 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1694 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1695 bool IsNested = false;
1697 // We need to know if the function has a nest argument only in 64 bit mode.
1699 IsNested = HasNestArgument(&MF);
1701 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1702 // allocMBB needs to be last (terminating) instruction.
1704 for (const auto &LI : PrologueMBB.liveins()) {
1705 allocMBB->addLiveIn(LI);
1706 checkMBB->addLiveIn(LI);
1710 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
1712 MF.push_front(allocMBB);
1713 MF.push_front(checkMBB);
1715 // When the frame size is less than 256 we just compare the stack
1716 // boundary directly to the value of the stack pointer, per gcc.
1717 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1719 // Read the limit off the current stacklet off the stack_guard location.
1721 if (STI.isTargetLinux()) {
1723 TlsOffset = IsLP64 ? 0x70 : 0x40;
1724 } else if (STI.isTargetDarwin()) {
1726 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1727 } else if (STI.isTargetWin64()) {
1729 TlsOffset = 0x28; // pvArbitrary, reserved for application use
1730 } else if (STI.isTargetFreeBSD()) {
1733 } else if (STI.isTargetDragonFly()) {
1735 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
1737 report_fatal_error("Segmented stacks not supported on this platform.");
1740 if (CompareStackPointer)
1741 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
1743 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
1744 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1746 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
1747 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1749 if (STI.isTargetLinux()) {
1752 } else if (STI.isTargetDarwin()) {
1754 TlsOffset = 0x48 + 90*4;
1755 } else if (STI.isTargetWin32()) {
1757 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1758 } else if (STI.isTargetDragonFly()) {
1760 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
1761 } else if (STI.isTargetFreeBSD()) {
1762 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1764 report_fatal_error("Segmented stacks not supported on this platform.");
1767 if (CompareStackPointer)
1768 ScratchReg = X86::ESP;
1770 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1771 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1773 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
1774 STI.isTargetDragonFly()) {
1775 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1776 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1777 } else if (STI.isTargetDarwin()) {
1779 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
1780 unsigned ScratchReg2;
1782 if (CompareStackPointer) {
1783 // The primary scratch register is available for holding the TLS offset.
1784 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1785 SaveScratch2 = false;
1787 // Need to use a second register to hold the TLS offset
1788 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
1790 // Unfortunately, with fastcc the second scratch register may hold an
1792 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1795 // If Scratch2 is live-in then it needs to be saved.
1796 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1797 "Scratch register is live-in and not saved");
1800 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1801 .addReg(ScratchReg2, RegState::Kill);
1803 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1805 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1807 .addReg(ScratchReg2).addImm(1).addReg(0)
1812 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1816 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1817 // It jumps to normal execution of the function body.
1818 BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&PrologueMBB);
1820 // On 32 bit we first push the arguments size and then the frame size. On 64
1821 // bit, we pass the stack frame size in r10 and the argument size in r11.
1823 // Functions with nested arguments use R10, so it needs to be saved across
1824 // the call to _morestack
1826 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
1827 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
1828 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
1829 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
1830 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
1833 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
1835 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
1837 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
1838 .addImm(X86FI->getArgumentStackSize());
1840 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1841 .addImm(X86FI->getArgumentStackSize());
1842 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1846 // __morestack is in libgcc
1847 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
1848 // Under the large code model, we cannot assume that __morestack lives
1849 // within 2^31 bytes of the call site, so we cannot use pc-relative
1850 // addressing. We cannot perform the call via a temporary register,
1851 // as the rax register may be used to store the static chain, and all
1852 // other suitable registers may be either callee-save or used for
1853 // parameter passing. We cannot use the stack at this point either
1854 // because __morestack manipulates the stack directly.
1856 // To avoid these issues, perform an indirect call via a read-only memory
1857 // location containing the address.
1859 // This solution is not perfect, as it assumes that the .rodata section
1860 // is laid out within 2^31 bytes of each function body, but this seems
1861 // to be sufficient for JIT.
1862 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
1866 .addExternalSymbol("__morestack_addr")
1868 MF.getMMI().setUsesMorestackAddr(true);
1871 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1872 .addExternalSymbol("__morestack");
1874 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1875 .addExternalSymbol("__morestack");
1879 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1881 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1883 allocMBB->addSuccessor(&PrologueMBB);
1885 checkMBB->addSuccessor(allocMBB);
1886 checkMBB->addSuccessor(&PrologueMBB);
1893 /// Erlang programs may need a special prologue to handle the stack size they
1894 /// might need at runtime. That is because Erlang/OTP does not implement a C
1895 /// stack but uses a custom implementation of hybrid stack/heap architecture.
1896 /// (for more information see Eric Stenman's Ph.D. thesis:
1897 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1900 /// temp0 = sp - MaxStack
1901 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1905 /// call inc_stack # doubles the stack space
1906 /// temp0 = sp - MaxStack
1907 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1908 void X86FrameLowering::adjustForHiPEPrologue(
1909 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
1910 MachineFrameInfo *MFI = MF.getFrameInfo();
1912 // HiPE-specific values
1913 const unsigned HipeLeafWords = 24;
1914 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1915 const unsigned Guaranteed = HipeLeafWords * SlotSize;
1916 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1917 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1918 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1920 assert(STI.isTargetLinux() &&
1921 "HiPE prologue is only supported on Linux operating systems.");
1923 // Compute the largest caller's frame that is needed to fit the callees'
1924 // frames. This 'MaxStack' is computed from:
1926 // a) the fixed frame size, which is the space needed for all spilled temps,
1927 // b) outgoing on-stack parameter areas, and
1928 // c) the minimum stack space this function needs to make available for the
1929 // functions it calls (a tunable ABI property).
1930 if (MFI->hasCalls()) {
1931 unsigned MoreStackForCalls = 0;
1933 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1934 MBBI != MBBE; ++MBBI)
1935 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1940 // Get callee operand.
1941 const MachineOperand &MO = MI->getOperand(0);
1943 // Only take account of global function calls (no closures etc.).
1947 const Function *F = dyn_cast<Function>(MO.getGlobal());
1951 // Do not update 'MaxStack' for primitive and built-in functions
1952 // (encoded with names either starting with "erlang."/"bif_" or not
1953 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1954 // "_", such as the BIF "suspend_0") as they are executed on another
1956 if (F->getName().find("erlang.") != StringRef::npos ||
1957 F->getName().find("bif_") != StringRef::npos ||
1958 F->getName().find_first_of("._") == StringRef::npos)
1961 unsigned CalleeStkArity =
1962 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1963 if (HipeLeafWords - 1 > CalleeStkArity)
1964 MoreStackForCalls = std::max(MoreStackForCalls,
1965 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1967 MaxStack += MoreStackForCalls;
1970 // If the stack frame needed is larger than the guaranteed then runtime checks
1971 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1972 if (MaxStack > Guaranteed) {
1973 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1974 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1976 for (const auto &LI : PrologueMBB.liveins()) {
1977 stackCheckMBB->addLiveIn(LI);
1978 incStackMBB->addLiveIn(LI);
1981 MF.push_front(incStackMBB);
1982 MF.push_front(stackCheckMBB);
1984 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1985 unsigned LEAop, CMPop, CALLop;
1989 LEAop = X86::LEA64r;
1990 CMPop = X86::CMP64rm;
1991 CALLop = X86::CALL64pcrel32;
1992 SPLimitOffset = 0x90;
1996 LEAop = X86::LEA32r;
1997 CMPop = X86::CMP32rm;
1998 CALLop = X86::CALLpcrel32;
1999 SPLimitOffset = 0x4c;
2002 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2003 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2004 "HiPE prologue scratch register is live-in");
2006 // Create new MBB for StackCheck:
2007 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
2008 SPReg, false, -MaxStack);
2009 // SPLimitOffset is in a fixed heap location (pointed by BP).
2010 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
2011 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2012 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&PrologueMBB);
2014 // Create new MBB for IncStack:
2015 BuildMI(incStackMBB, DL, TII.get(CALLop)).
2016 addExternalSymbol("inc_stack_0");
2017 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
2018 SPReg, false, -MaxStack);
2019 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
2020 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2021 BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
2023 stackCheckMBB->addSuccessor(&PrologueMBB, 99);
2024 stackCheckMBB->addSuccessor(incStackMBB, 1);
2025 incStackMBB->addSuccessor(&PrologueMBB, 99);
2026 incStackMBB->addSuccessor(incStackMBB, 1);
2033 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
2034 MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
2039 if (Offset % SlotSize)
2042 int NumPops = Offset / SlotSize;
2043 // This is only worth it if we have at most 2 pops.
2044 if (NumPops != 1 && NumPops != 2)
2047 // Handle only the trivial case where the adjustment directly follows
2048 // a call. This is the most common one, anyway.
2049 if (MBBI == MBB.begin())
2051 MachineBasicBlock::iterator Prev = std::prev(MBBI);
2052 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
2056 unsigned FoundRegs = 0;
2058 auto RegMask = Prev->getOperand(1);
2061 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
2062 // Try to find up to NumPops free registers.
2063 for (auto Candidate : RegClass) {
2065 // Poor man's liveness:
2066 // Since we're immediately after a call, any register that is clobbered
2067 // by the call and not defined by it can be considered dead.
2068 if (!RegMask.clobbersPhysReg(Candidate))
2072 for (const MachineOperand &MO : Prev->implicit_operands()) {
2073 if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) {
2082 Regs[FoundRegs++] = Candidate;
2083 if (FoundRegs == (unsigned)NumPops)
2090 // If we found only one free register, but need two, reuse the same one twice.
2091 while (FoundRegs < (unsigned)NumPops)
2092 Regs[FoundRegs++] = Regs[0];
2094 for (int i = 0; i < NumPops; ++i)
2095 BuildMI(MBB, MBBI, DL,
2096 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
2101 void X86FrameLowering::
2102 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2103 MachineBasicBlock::iterator I) const {
2104 bool reserveCallFrame = hasReservedCallFrame(MF);
2105 unsigned Opcode = I->getOpcode();
2106 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
2107 DebugLoc DL = I->getDebugLoc();
2108 uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
2109 uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
2112 if (!reserveCallFrame) {
2113 // If the stack pointer can be changed after prologue, turn the
2114 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
2115 // adjcallstackdown instruction into 'add ESP, <amt>'
2117 // We need to keep the stack aligned properly. To do this, we round the
2118 // amount of space needed for the outgoing arguments up to the next
2119 // alignment boundary.
2120 unsigned StackAlign = getStackAlignment();
2121 Amount = RoundUpToAlignment(Amount, StackAlign);
2123 MachineModuleInfo &MMI = MF.getMMI();
2124 const Function *Fn = MF.getFunction();
2125 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2126 bool DwarfCFI = !WindowsCFI &&
2127 (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
2129 // If we have any exception handlers in this function, and we adjust
2130 // the SP before calls, we may need to indicate this to the unwinder
2131 // using GNU_ARGS_SIZE. Note that this may be necessary even when
2132 // Amount == 0, because the preceding function may have set a non-0
2134 // TODO: We don't need to reset this between subsequent functions,
2135 // if it didn't change.
2136 bool HasDwarfEHHandlers = !WindowsCFI &&
2137 !MF.getMMI().getLandingPads().empty();
2139 if (HasDwarfEHHandlers && !isDestroy &&
2140 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
2141 BuildCFI(MBB, I, DL,
2142 MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
2147 // Factor out the amount that gets handled inside the sequence
2148 // (Pushes of argument for frame setup, callee pops for frame destroy)
2149 Amount -= InternalAmt;
2151 // If this is a callee-pop calling convention, and we're emitting precise
2152 // SP-based CFI, emit a CFA adjust for the amount the callee popped.
2153 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF) &&
2154 MMI.usePreciseUnwindInfo())
2155 BuildCFI(MBB, I, DL,
2156 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
2159 // Add Amount to SP to destroy a frame, and subtract to setup.
2160 int Offset = isDestroy ? Amount : -Amount;
2162 if (!(Fn->optForMinSize() &&
2163 adjustStackWithPops(MBB, I, DL, Offset)))
2164 BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
2167 if (DwarfCFI && !hasFP(MF)) {
2168 // If we don't have FP, but need to generate unwind information,
2169 // we need to set the correct CFA offset after the stack adjustment.
2170 // How much we adjust the CFA offset depends on whether we're emitting
2171 // CFI only for EH purposes or for debugging. EH only requires the CFA
2172 // offset to be correct at each call site, while for debugging we want
2173 // it to be more precise.
2174 int CFAOffset = Amount;
2175 if (!MMI.usePreciseUnwindInfo())
2176 CFAOffset += InternalAmt;
2177 CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
2178 BuildCFI(MBB, I, DL,
2179 MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
2185 if (isDestroy && InternalAmt) {
2186 // If we are performing frame pointer elimination and if the callee pops
2187 // something off the stack pointer, add it back. We do this until we have
2188 // more advanced stack pointer tracking ability.
2189 // We are not tracking the stack pointer adjustment by the callee, so make
2190 // sure we restore the stack pointer immediately after the call, there may
2191 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
2192 MachineBasicBlock::iterator B = MBB.begin();
2193 while (I != B && !std::prev(I)->isCall())
2195 BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
2199 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2200 assert(MBB.getParent() && "Block is not attached to a function!");
2202 // Win64 has strict requirements in terms of epilogue and we are
2203 // not taking a chance at messing with them.
2204 // I.e., unless this block is already an exit block, we can't use
2205 // it as an epilogue.
2206 if (MBB.getParent()->getSubtarget<X86Subtarget>().isTargetWin64() &&
2207 !MBB.succ_empty() && !MBB.isReturnBlock())
2210 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
2213 // If we cannot use LEA to adjust SP, we may need to use ADD, which
2214 // clobbers the EFLAGS. Check that none of the terminators reads the
2215 // EFLAGS, and if one uses it, conservatively assume this is not
2216 // safe to insert the epilogue here.
2217 return !terminatorsNeedFlagsAsInput(MBB);
2220 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
2221 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
2222 DebugLoc DL, bool RestoreSP) const {
2223 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
2224 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
2225 assert(STI.is32Bit() && !Uses64BitFramePtr &&
2226 "restoring EBP/ESI on non-32-bit target");
2228 MachineFunction &MF = *MBB.getParent();
2229 unsigned FramePtr = TRI->getFrameRegister(MF);
2230 unsigned BasePtr = TRI->getBaseRegister();
2231 MachineModuleInfo &MMI = MF.getMMI();
2232 const Function *Fn = MF.getFunction();
2233 WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(Fn);
2234 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2235 MachineFrameInfo *MFI = MF.getFrameInfo();
2237 // FIXME: Don't set FrameSetup flag in catchret case.
2239 int FI = FuncInfo.EHRegNodeFrameIndex;
2240 int EHRegSize = MFI->getObjectSize(FI);
2243 // MOV32rm -EHRegSize(%ebp), %esp
2244 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
2245 X86::EBP, true, -EHRegSize)
2246 .setMIFlag(MachineInstr::FrameSetup);
2250 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg);
2251 int EndOffset = -EHRegOffset - EHRegSize;
2252 FuncInfo.EHRegNodeEndOffset = EndOffset;
2254 if (UsedReg == FramePtr) {
2255 // ADD $offset, %ebp
2256 unsigned ADDri = getADDriOpcode(false, EndOffset);
2257 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
2260 .setMIFlag(MachineInstr::FrameSetup)
2263 assert(EndOffset >= 0 &&
2264 "end of registration object above normal EBP position!");
2265 } else if (UsedReg == BasePtr) {
2266 // LEA offset(%ebp), %esi
2267 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
2268 FramePtr, false, EndOffset)
2269 .setMIFlag(MachineInstr::FrameSetup);
2270 // MOV32rm SavedEBPOffset(%esi), %ebp
2271 assert(X86FI->getHasSEHFramePtrSave());
2273 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
2274 assert(UsedReg == BasePtr);
2275 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
2276 UsedReg, true, Offset)
2277 .setMIFlag(MachineInstr::FrameSetup);
2279 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
2284 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
2285 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
2286 unsigned Offset = 16;
2287 // RBP is immediately pushed.
2289 // All callee-saved registers are then pushed.
2290 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
2291 // Every funclet allocates enough stack space for the largest outgoing call.
2292 Offset += getWinEHFuncletFrameSize(MF);