1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/Analysis/EHPersonalities.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/Target/TargetOptions.h"
33 #include "llvm/Support/Debug.h"
38 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
39 unsigned StackAlignOverride)
40 : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
41 STI.is64Bit() ? -8 : -4),
42 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
43 // Cache a bunch of frame-related predicates for this subtarget.
44 SlotSize = TRI->getSlotSize();
45 Is64Bit = STI.is64Bit();
46 IsLP64 = STI.isTarget64BitLP64();
47 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
48 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
49 StackPtr = TRI->getStackRegister();
52 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
53 return !MF.getFrameInfo()->hasVarSizedObjects() &&
54 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
57 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
58 /// call frame pseudos can be simplified. Having a FP, as in the default
59 /// implementation, is not sufficient here since we can't always use it.
60 /// Use a more nuanced condition.
62 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
63 return hasReservedCallFrame(MF) ||
64 (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
65 TRI->hasBasePointer(MF);
68 // needsFrameIndexResolution - Do we need to perform FI resolution for
69 // this function. Normally, this is required only when the function
70 // has any stack objects. However, FI resolution actually has another job,
71 // not apparent from the title - it resolves callframesetup/destroy
72 // that were not simplified earlier.
73 // So, this is required for x86 functions that have push sequences even
74 // when there are no stack objects.
76 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
77 return MF.getFrameInfo()->hasStackObjects() ||
78 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
81 /// usesTheStack - This function checks if any of the users of EFLAGS
82 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
83 /// to use the stack, and if we don't adjust the stack we clobber the first
85 /// See X86InstrInfo::copyPhysReg.
86 static bool usesTheStack(const MachineFunction &MF) {
87 const MachineRegisterInfo &MRI = MF.getRegInfo();
89 return any_of(MRI.reg_instructions(X86::EFLAGS),
90 [](const MachineInstr &RI) { return RI.isCopy(); });
93 static bool doesStackUseImplyFP(const MachineFunction &MF) {
94 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
95 return IsWin64Prologue && usesTheStack(MF);
98 /// hasFP - Return true if the specified function should have a dedicated frame
99 /// pointer register. This is true if the function has variable sized allocas
100 /// or if frame pointer elimination is disabled.
101 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
102 const MachineFrameInfo *MFI = MF.getFrameInfo();
103 const MachineModuleInfo &MMI = MF.getMMI();
105 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
106 TRI->needsStackRealignment(MF) ||
107 MFI->hasVarSizedObjects() ||
108 MFI->isFrameAddressTaken() || MFI->hasOpaqueSPAdjustment() ||
109 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
110 MMI.callsUnwindInit() || MMI.hasEHFunclets() || MMI.callsEHReturn() ||
111 MFI->hasStackMap() || MFI->hasPatchPoint() ||
112 doesStackUseImplyFP(MF));
115 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
118 return X86::SUB64ri8;
119 return X86::SUB64ri32;
122 return X86::SUB32ri8;
127 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
130 return X86::ADD64ri8;
131 return X86::ADD64ri32;
134 return X86::ADD32ri8;
139 static unsigned getSUBrrOpcode(unsigned isLP64) {
140 return isLP64 ? X86::SUB64rr : X86::SUB32rr;
143 static unsigned getADDrrOpcode(unsigned isLP64) {
144 return isLP64 ? X86::ADD64rr : X86::ADD32rr;
147 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
150 return X86::AND64ri8;
151 return X86::AND64ri32;
154 return X86::AND32ri8;
158 static unsigned getLEArOpcode(unsigned IsLP64) {
159 return IsLP64 ? X86::LEA64r : X86::LEA32r;
162 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
163 /// when it reaches the "return" instruction. We can then pop a stack object
164 /// to this register without worry about clobbering it.
165 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
166 MachineBasicBlock::iterator &MBBI,
167 const X86RegisterInfo *TRI,
169 const MachineFunction *MF = MBB.getParent();
170 const Function *F = MF->getFunction();
171 if (!F || MF->getMMI().callsEHReturn())
174 const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
176 unsigned Opc = MBBI->getOpcode();
183 case X86::TCRETURNdi:
184 case X86::TCRETURNri:
185 case X86::TCRETURNmi:
186 case X86::TCRETURNdi64:
187 case X86::TCRETURNri64:
188 case X86::TCRETURNmi64:
190 case X86::EH_RETURN64: {
191 SmallSet<uint16_t, 8> Uses;
192 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
193 MachineOperand &MO = MBBI->getOperand(i);
194 if (!MO.isReg() || MO.isDef())
196 unsigned Reg = MO.getReg();
199 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
203 for (auto CS : AvailableRegs)
204 if (!Uses.count(CS) && CS != X86::RIP)
212 static bool isEAXLiveIn(MachineFunction &MF) {
213 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
214 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
215 unsigned Reg = II->first;
217 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
218 Reg == X86::AH || Reg == X86::AL)
225 /// Check if the flags need to be preserved before the terminators.
226 /// This would be the case, if the eflags is live-in of the region
227 /// composed by the terminators or live-out of that region, without
228 /// being defined by a terminator.
230 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
231 for (const MachineInstr &MI : MBB.terminators()) {
232 bool BreakNext = false;
233 for (const MachineOperand &MO : MI.operands()) {
236 unsigned Reg = MO.getReg();
237 if (Reg != X86::EFLAGS)
240 // This terminator needs an eflags that is not defined
241 // by a previous another terminator:
242 // EFLAGS is live-in of the region composed by the terminators.
245 // This terminator defines the eflags, i.e., we don't need to preserve it.
246 // However, we still need to check this specific terminator does not
247 // read a live-in value.
250 // We found a definition of the eflags, no need to preserve them.
255 // None of the terminators use or define the eflags.
256 // Check if they are live-out, that would imply we need to preserve them.
257 for (const MachineBasicBlock *Succ : MBB.successors())
258 if (Succ->isLiveIn(X86::EFLAGS))
264 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
265 /// stack pointer by a constant value.
266 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
267 MachineBasicBlock::iterator &MBBI,
268 int64_t NumBytes, bool InEpilogue) const {
269 bool isSub = NumBytes < 0;
270 uint64_t Offset = isSub ? -NumBytes : NumBytes;
272 uint64_t Chunk = (1LL << 31) - 1;
273 DebugLoc DL = MBB.findDebugLoc(MBBI);
276 if (Offset > Chunk) {
277 // Rather than emit a long series of instructions for large offsets,
278 // load the offset into a register and do one sub/add
281 if (isSub && !isEAXLiveIn(*MBB.getParent()))
282 Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
284 Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
287 unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
288 BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
291 ? getSUBrrOpcode(Is64Bit)
292 : getADDrrOpcode(Is64Bit);
293 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
296 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
302 uint64_t ThisVal = std::min(Offset, Chunk);
303 if (ThisVal == (Is64Bit ? 8 : 4)) {
304 // Use push / pop instead.
306 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
307 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
310 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
311 : (Is64Bit ? X86::POP64r : X86::POP32r);
312 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
313 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
315 MI->setFlag(MachineInstr::FrameSetup);
317 MI->setFlag(MachineInstr::FrameDestroy);
323 MachineInstrBuilder MI = BuildStackAdjustment(
324 MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
326 MI.setMIFlag(MachineInstr::FrameSetup);
328 MI.setMIFlag(MachineInstr::FrameDestroy);
334 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
335 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
336 int64_t Offset, bool InEpilogue) const {
337 assert(Offset != 0 && "zero offset stack adjustment requested");
339 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
343 // Check if inserting the prologue at the beginning
344 // of MBB would require to use LEA operations.
345 // We need to use LEA operations if EFLAGS is live in, because
346 // it means an instruction will read it before it gets defined.
347 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
349 // If we can use LEA for SP but we shouldn't, check that none
350 // of the terminators uses the eflags. Otherwise we will insert
351 // a ADD that will redefine the eflags and break the condition.
352 // Alternatively, we could move the ADD, but this may not be possible
353 // and is an optimization anyway.
354 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
355 if (UseLEA && !STI.useLeaForSP())
356 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB);
357 // If that assert breaks, that means we do not do the right thing
358 // in canUseAsEpilogue.
359 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) &&
360 "We shouldn't have allowed this insertion point");
363 MachineInstrBuilder MI;
365 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
366 TII.get(getLEArOpcode(Uses64BitFramePtr)),
368 StackPtr, false, Offset);
370 bool IsSub = Offset < 0;
371 uint64_t AbsOffset = IsSub ? -Offset : Offset;
372 unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
373 : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
374 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
377 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
382 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
383 MachineBasicBlock::iterator &MBBI,
384 bool doMergeWithPrevious) const {
385 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
386 (!doMergeWithPrevious && MBBI == MBB.end()))
389 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
390 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
392 unsigned Opc = PI->getOpcode();
395 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
396 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
397 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
398 PI->getOperand(0).getReg() == StackPtr){
399 Offset += PI->getOperand(2).getImm();
401 if (!doMergeWithPrevious) MBBI = NI;
402 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
403 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
404 PI->getOperand(0).getReg() == StackPtr) {
405 Offset -= PI->getOperand(2).getImm();
407 if (!doMergeWithPrevious) MBBI = NI;
413 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
414 MachineBasicBlock::iterator MBBI, DebugLoc DL,
415 MCCFIInstruction CFIInst) const {
416 MachineFunction &MF = *MBB.getParent();
417 unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
418 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
419 .addCFIIndex(CFIIndex);
423 X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
424 MachineBasicBlock::iterator MBBI,
426 MachineFunction &MF = *MBB.getParent();
427 MachineFrameInfo *MFI = MF.getFrameInfo();
428 MachineModuleInfo &MMI = MF.getMMI();
429 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
431 // Add callee saved registers to move list.
432 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
433 if (CSI.empty()) return;
435 // Calculate offsets.
436 for (std::vector<CalleeSavedInfo>::const_iterator
437 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
438 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
439 unsigned Reg = I->getReg();
441 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
442 BuildCFI(MBB, MBBI, DL,
443 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
447 MachineInstr *X86FrameLowering::emitStackProbe(MachineFunction &MF,
448 MachineBasicBlock &MBB,
449 MachineBasicBlock::iterator MBBI,
451 bool InProlog) const {
452 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
453 if (STI.isTargetWindowsCoreCLR()) {
455 return emitStackProbeInlineStub(MF, MBB, MBBI, DL, true);
457 return emitStackProbeInline(MF, MBB, MBBI, DL, false);
460 return emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
464 void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
465 MachineBasicBlock &PrologMBB) const {
466 const StringRef ChkStkStubSymbol = "__chkstk_stub";
467 MachineInstr *ChkStkStub = nullptr;
469 for (MachineInstr &MI : PrologMBB) {
470 if (MI.isCall() && MI.getOperand(0).isSymbol() &&
471 ChkStkStubSymbol == MI.getOperand(0).getSymbolName()) {
477 if (ChkStkStub != nullptr) {
478 MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
479 assert(std::prev(MBBI).operator==(ChkStkStub) &&
480 "MBBI expected after __chkstk_stub.");
481 DebugLoc DL = PrologMBB.findDebugLoc(MBBI);
482 emitStackProbeInline(MF, PrologMBB, MBBI, DL, true);
483 ChkStkStub->eraseFromParent();
487 MachineInstr *X86FrameLowering::emitStackProbeInline(
488 MachineFunction &MF, MachineBasicBlock &MBB,
489 MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
490 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
491 assert(STI.is64Bit() && "different expansion needed for 32 bit");
492 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
493 const TargetInstrInfo &TII = *STI.getInstrInfo();
494 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
496 // RAX contains the number of bytes of desired stack adjustment.
497 // The handling here assumes this value has already been updated so as to
498 // maintain stack alignment.
500 // We need to exit with RSP modified by this amount and execute suitable
501 // page touches to notify the OS that we're growing the stack responsibly.
502 // All stack probing must be done without modifying RSP.
508 // Flags, TestReg = CopyReg - SizeReg
509 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg
510 // LimitReg = gs magic thread env access
511 // if FinalReg >= LimitReg goto ContinueMBB
513 // RoundReg = page address of FinalReg
515 // LoopReg = PHI(LimitReg,ProbeReg)
516 // ProbeReg = LoopReg - PageSize
518 // if (ProbeReg > RoundReg) goto LoopMBB
521 // [rest of original MBB]
523 // Set up the new basic blocks
524 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
525 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
526 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
528 MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
529 MF.insert(MBBIter, RoundMBB);
530 MF.insert(MBBIter, LoopMBB);
531 MF.insert(MBBIter, ContinueMBB);
533 // Split MBB and move the tail portion down to ContinueMBB.
534 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
535 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
536 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
538 // Some useful constants
539 const int64_t ThreadEnvironmentStackLimit = 0x10;
540 const int64_t PageSize = 0x1000;
541 const int64_t PageMask = ~(PageSize - 1);
543 // Registers we need. For the normal case we use virtual
544 // registers. For the prolog expansion we use RAX, RCX and RDX.
545 MachineRegisterInfo &MRI = MF.getRegInfo();
546 const TargetRegisterClass *RegClass = &X86::GR64RegClass;
547 const unsigned SizeReg = InProlog ? (unsigned)X86::RAX
548 : MRI.createVirtualRegister(RegClass),
549 ZeroReg = InProlog ? (unsigned)X86::RCX
550 : MRI.createVirtualRegister(RegClass),
551 CopyReg = InProlog ? (unsigned)X86::RDX
552 : MRI.createVirtualRegister(RegClass),
553 TestReg = InProlog ? (unsigned)X86::RDX
554 : MRI.createVirtualRegister(RegClass),
555 FinalReg = InProlog ? (unsigned)X86::RDX
556 : MRI.createVirtualRegister(RegClass),
557 RoundedReg = InProlog ? (unsigned)X86::RDX
558 : MRI.createVirtualRegister(RegClass),
559 LimitReg = InProlog ? (unsigned)X86::RCX
560 : MRI.createVirtualRegister(RegClass),
561 JoinReg = InProlog ? (unsigned)X86::RCX
562 : MRI.createVirtualRegister(RegClass),
563 ProbeReg = InProlog ? (unsigned)X86::RCX
564 : MRI.createVirtualRegister(RegClass);
566 // SP-relative offsets where we can save RCX and RDX.
567 int64_t RCXShadowSlot = 0;
568 int64_t RDXShadowSlot = 0;
570 // If inlining in the prolog, save RCX and RDX.
571 // Future optimization: don't save or restore if not live in.
573 // Compute the offsets. We need to account for things already
574 // pushed onto the stack at this point: return address, frame
575 // pointer (if used), and callee saves.
576 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
577 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
578 const bool HasFP = hasFP(MF);
579 RCXShadowSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
580 RDXShadowSlot = RCXShadowSlot + 8;
582 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
585 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
589 // Not in the prolog. Copy RAX to a virtual reg.
590 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
593 // Add code to MBB to check for overflow and set the new target stack pointer
595 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
596 .addReg(ZeroReg, RegState::Undef)
597 .addReg(ZeroReg, RegState::Undef);
598 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
599 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
602 BuildMI(&MBB, DL, TII.get(X86::CMOVB64rr), FinalReg)
606 // FinalReg now holds final stack pointer value, or zero if
607 // allocation would overflow. Compare against the current stack
608 // limit from the thread environment block. Note this limit is the
609 // lowest touched page on the stack, not the point at which the OS
610 // will cause an overflow exception, so this is just an optimization
611 // to avoid unnecessarily touching pages that are below the current
612 // SP but already commited to the stack by the OS.
613 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
617 .addImm(ThreadEnvironmentStackLimit)
619 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
620 // Jump if the desired stack pointer is at or above the stack limit.
621 BuildMI(&MBB, DL, TII.get(X86::JAE_1)).addMBB(ContinueMBB);
623 // Add code to roundMBB to round the final stack pointer to a page boundary.
624 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
627 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
629 // LimitReg now holds the current stack limit, RoundedReg page-rounded
630 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
631 // and probe until we reach RoundedReg.
633 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
640 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
643 // Probe by storing a byte onto the stack.
644 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
651 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
654 BuildMI(LoopMBB, DL, TII.get(X86::JNE_1)).addMBB(LoopMBB);
656 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
658 // If in prolog, restore RDX and RCX.
660 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm),
662 X86::RSP, false, RCXShadowSlot);
663 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm),
665 X86::RSP, false, RDXShadowSlot);
668 // Now that the probing is done, add code to continueMBB to update
669 // the stack pointer for real.
670 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
674 // Add the control flow edges we need.
675 MBB.addSuccessor(ContinueMBB);
676 MBB.addSuccessor(RoundMBB);
677 RoundMBB->addSuccessor(LoopMBB);
678 LoopMBB->addSuccessor(ContinueMBB);
679 LoopMBB->addSuccessor(LoopMBB);
681 // Mark all the instructions added to the prolog as frame setup.
683 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
684 BeforeMBBI->setFlag(MachineInstr::FrameSetup);
686 for (MachineInstr &MI : *RoundMBB) {
687 MI.setFlag(MachineInstr::FrameSetup);
689 for (MachineInstr &MI : *LoopMBB) {
690 MI.setFlag(MachineInstr::FrameSetup);
692 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
693 CMBBI != ContinueMBBI; ++CMBBI) {
694 CMBBI->setFlag(MachineInstr::FrameSetup);
698 // Possible TODO: physreg liveness for InProlog case.
703 MachineInstr *X86FrameLowering::emitStackProbeCall(
704 MachineFunction &MF, MachineBasicBlock &MBB,
705 MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
706 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
710 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
712 CallOp = X86::CALLpcrel32;
716 if (STI.isTargetCygMing()) {
717 Symbol = "___chkstk_ms";
721 } else if (STI.isTargetCygMing())
726 MachineInstrBuilder CI;
727 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
729 // All current stack probes take AX and SP as input, clobber flags, and
730 // preserve all registers. x86_64 probes leave RSP unmodified.
731 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
732 // For the large code model, we have to call through a register. Use R11,
733 // as it is scratch in all supported calling conventions.
734 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
735 .addExternalSymbol(Symbol);
736 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
738 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
741 unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
742 unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
743 CI.addReg(AX, RegState::Implicit)
744 .addReg(SP, RegState::Implicit)
745 .addReg(AX, RegState::Define | RegState::Implicit)
746 .addReg(SP, RegState::Define | RegState::Implicit)
747 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
750 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
751 // themselves. It also does not clobber %rax so we can reuse it when
753 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
759 // Apply the frame setup flag to all inserted instrs.
760 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
761 ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
767 MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
768 MachineFunction &MF, MachineBasicBlock &MBB,
769 MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
771 assert(InProlog && "ChkStkStub called outside prolog!");
773 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
774 .addExternalSymbol("__chkstk_stub");
779 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
780 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
781 // and might require smaller successive adjustments.
782 const uint64_t Win64MaxSEHOffset = 128;
783 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
784 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
785 return SEHFrameOffset & -16;
788 // If we're forcing a stack realignment we can't rely on just the frame
789 // info, we need to know the ABI stack alignment as well in case we
790 // have a call out. Otherwise just make sure we have some alignment - we'll
791 // go with the minimum SlotSize.
792 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
793 const MachineFrameInfo *MFI = MF.getFrameInfo();
794 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
795 unsigned StackAlign = getStackAlignment();
796 if (MF.getFunction()->hasFnAttribute("stackrealign")) {
798 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
799 else if (MaxAlign < SlotSize)
805 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
806 MachineBasicBlock::iterator MBBI,
807 DebugLoc DL, unsigned Reg,
808 uint64_t MaxAlign) const {
809 uint64_t Val = -MaxAlign;
810 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
811 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
814 .setMIFlag(MachineInstr::FrameSetup);
816 // The EFLAGS implicit def is dead.
817 MI->getOperand(3).setIsDead();
820 /// emitPrologue - Push callee-saved registers onto the stack, which
821 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
822 /// space for local variables. Also emit labels used by the exception handler to
823 /// generate the exception handling frames.
826 Here's a gist of what gets emitted:
828 ; Establish frame pointer, if needed
831 .cfi_def_cfa_offset 16
832 .cfi_offset %rbp, -16
835 .cfi_def_cfa_register %rbp
837 ; Spill general-purpose registers
838 [for all callee-saved GPRs]
841 .cfi_def_cfa_offset (offset from RETADDR)
844 ; If the required stack alignment > default stack alignment
845 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
846 ; of unknown size in the stack frame.
847 [if stack needs re-alignment]
850 ; Allocate space for locals
851 [if target is Windows and allocated space > 4096 bytes]
852 ; Windows needs special care for allocations larger
855 call ___chkstk_ms/___chkstk
861 .seh_stackalloc (size of XMM spill slots)
862 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
867 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
868 ; they may get spilled on any platform, if the current function
869 ; calls @llvm.eh.unwind.init
871 [for all callee-saved XMM registers]
872 movaps %<xmm reg>, -MMM(%rbp)
873 [for all callee-saved XMM registers]
874 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
875 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
877 [for all callee-saved XMM registers]
878 movaps %<xmm reg>, KKK(%rsp)
879 [for all callee-saved XMM registers]
880 .seh_savexmm %<xmm reg>, KKK
884 [if needs base pointer]
886 [if needs to restore base pointer]
891 [for all callee-saved registers]
892 .cfi_offset %<reg>, (offset from %rbp)
894 .cfi_def_cfa_offset (offset from RETADDR)
895 [for all callee-saved registers]
896 .cfi_offset %<reg>, (offset from %rsp)
899 - .seh directives are emitted only for Windows 64 ABI
900 - .cfi directives are emitted for all other ABIs
901 - for 32-bit code, substitute %e?? registers for %r??
904 void X86FrameLowering::emitPrologue(MachineFunction &MF,
905 MachineBasicBlock &MBB) const {
906 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
907 "MF used frame lowering for wrong subtarget");
908 MachineBasicBlock::iterator MBBI = MBB.begin();
909 MachineFrameInfo *MFI = MF.getFrameInfo();
910 const Function *Fn = MF.getFunction();
911 MachineModuleInfo &MMI = MF.getMMI();
912 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
913 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
914 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
915 bool IsFunclet = MBB.isEHFuncletEntry();
916 EHPersonality Personality = EHPersonality::Unknown;
917 if (Fn->hasPersonalityFn())
918 Personality = classifyEHPersonality(Fn->getPersonalityFn());
919 bool FnHasClrFunclet =
920 MMI.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
921 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
922 bool HasFP = hasFP(MF);
923 bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
924 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
925 bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
927 !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
928 unsigned FramePtr = TRI->getFrameRegister(MF);
929 const unsigned MachineFramePtr =
930 STI.isTarget64BitILP32()
931 ? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
932 unsigned BasePtr = TRI->getBaseRegister();
934 // Debug location must be unknown since the first debug location is used
935 // to determine the end of the prologue.
938 // Add RETADDR move area to callee saved frame size.
939 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
940 if (TailCallReturnAddrDelta && IsWin64Prologue)
941 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
943 if (TailCallReturnAddrDelta < 0)
944 X86FI->setCalleeSavedFrameSize(
945 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
947 bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
949 // The default stack probe size is 4096 if the function has no stackprobesize
951 unsigned StackProbeSize = 4096;
952 if (Fn->hasFnAttribute("stack-probe-size"))
953 Fn->getFnAttribute("stack-probe-size")
955 .getAsInteger(0, StackProbeSize);
957 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
958 // function, and use up to 128 bytes of stack space, don't have a frame
959 // pointer, calls, or dynamic alloca then we do not need to adjust the
960 // stack pointer (we fit in the Red Zone). We also check that we don't
961 // push and pop from the stack.
962 if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
963 !TRI->needsStackRealignment(MF) &&
964 !MFI->hasVarSizedObjects() && // No dynamic alloca.
965 !MFI->adjustsStack() && // No calls.
966 !IsWin64CC && // Win64 has no Red Zone
967 !usesTheStack(MF) && // Don't push and pop.
968 !MF.shouldSplitStack()) { // Regular stack
969 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
970 if (HasFP) MinSize += SlotSize;
971 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
972 MFI->setStackSize(StackSize);
975 // Insert stack pointer adjustment for later moving of return addr. Only
976 // applies to tail call optimized functions where the callee argument stack
977 // size is bigger than the callers.
978 if (TailCallReturnAddrDelta < 0) {
979 BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
980 /*InEpilogue=*/false)
981 .setMIFlag(MachineInstr::FrameSetup);
984 // Mapping for machine moves:
986 // DST: VirtualFP AND
987 // SRC: VirtualFP => DW_CFA_def_cfa_offset
988 // ELSE => DW_CFA_def_cfa
990 // SRC: VirtualFP AND
991 // DST: Register => DW_CFA_def_cfa_register
994 // OFFSET < 0 => DW_CFA_offset_extended_sf
995 // REG < 64 => DW_CFA_offset + Reg
996 // ELSE => DW_CFA_offset_extended
998 uint64_t NumBytes = 0;
999 int stackGrowth = -SlotSize;
1001 // Find the funclet establisher parameter
1002 unsigned Establisher = X86::NoRegister;
1004 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
1006 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
1008 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1009 // Immediately spill establisher into the home slot.
1010 // The runtime cares about this.
1011 // MOV64mr %rdx, 16(%rsp)
1012 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1013 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
1014 .addReg(Establisher)
1015 .setMIFlag(MachineInstr::FrameSetup);
1016 MBB.addLiveIn(Establisher);
1020 // Calculate required stack adjustment.
1021 uint64_t FrameSize = StackSize - SlotSize;
1022 // If required, include space for extra hidden slot for stashing base pointer.
1023 if (X86FI->getRestoreBasePointer())
1024 FrameSize += SlotSize;
1026 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
1028 // Callee-saved registers are pushed on stack before the stack is realigned.
1029 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1030 NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
1032 // Get the offset of the stack slot for the EBP register, which is
1033 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
1034 // Update the frame offset adjustment.
1036 MFI->setOffsetAdjustment(-NumBytes);
1038 assert(MFI->getOffsetAdjustment() == -(int)NumBytes &&
1039 "should calculate same local variable offset for funclets");
1041 // Save EBP/RBP into the appropriate stack slot.
1042 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
1043 .addReg(MachineFramePtr, RegState::Kill)
1044 .setMIFlag(MachineInstr::FrameSetup);
1046 if (NeedsDwarfCFI) {
1047 // Mark the place where EBP/RBP was saved.
1048 // Define the current CFA rule to use the provided offset.
1050 BuildCFI(MBB, MBBI, DL,
1051 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
1053 // Change the rule for the FramePtr to be an "offset" rule.
1054 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1055 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
1056 nullptr, DwarfFramePtr, 2 * stackGrowth));
1060 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1062 .setMIFlag(MachineInstr::FrameSetup);
1065 if (!IsWin64Prologue && !IsFunclet) {
1066 // Update EBP with the new base value.
1067 BuildMI(MBB, MBBI, DL,
1068 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
1071 .setMIFlag(MachineInstr::FrameSetup);
1073 if (NeedsDwarfCFI) {
1074 // Mark effective beginning of when frame pointer becomes valid.
1075 // Define the current CFA to use the EBP/RBP register.
1076 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1077 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
1078 nullptr, DwarfFramePtr));
1082 // Mark the FramePtr as live-in in every block. Don't do this again for
1083 // funclet prologues.
1085 for (MachineBasicBlock &EveryMBB : MF)
1086 EveryMBB.addLiveIn(MachineFramePtr);
1089 assert(!IsFunclet && "funclets without FPs not yet implemented");
1090 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1093 // For EH funclets, only allocate enough space for outgoing calls. Save the
1094 // NumBytes value that we would've used for the parent frame.
1095 unsigned ParentFrameNumBytes = NumBytes;
1097 NumBytes = getWinEHFuncletFrameSize(MF);
1099 // Skip the callee-saved push instructions.
1100 bool PushedRegs = false;
1101 int StackOffset = 2 * stackGrowth;
1103 while (MBBI != MBB.end() &&
1104 MBBI->getFlag(MachineInstr::FrameSetup) &&
1105 (MBBI->getOpcode() == X86::PUSH32r ||
1106 MBBI->getOpcode() == X86::PUSH64r)) {
1108 unsigned Reg = MBBI->getOperand(0).getReg();
1111 if (!HasFP && NeedsDwarfCFI) {
1112 // Mark callee-saved push instruction.
1113 // Define the current CFA rule to use the provided offset.
1115 BuildCFI(MBB, MBBI, DL,
1116 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
1117 StackOffset += stackGrowth;
1121 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
1122 MachineInstr::FrameSetup);
1126 // Realign stack after we pushed callee-saved registers (so that we'll be
1127 // able to calculate their offsets from the frame pointer).
1128 // Don't do this for Win64, it needs to realign the stack after the prologue.
1129 if (!IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) {
1130 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1131 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1134 // If there is an SUB32ri of ESP immediately before this instruction, merge
1135 // the two. This can be the case when tail call elimination is enabled and
1136 // the callee has more arguments then the caller.
1137 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
1139 // Adjust stack pointer: ESP -= numbytes.
1141 // Windows and cygwin/mingw require a prologue helper routine when allocating
1142 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
1143 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
1144 // stack and adjust the stack pointer in one go. The 64-bit version of
1145 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
1146 // responsible for adjusting the stack pointer. Touching the stack at 4K
1147 // increments is necessary to ensure that the guard pages used by the OS
1148 // virtual memory manager are allocated in correct sequence.
1149 uint64_t AlignedNumBytes = NumBytes;
1150 if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
1151 AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
1152 if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
1153 // Check whether EAX is livein for this function.
1154 bool isEAXAlive = isEAXLiveIn(MF);
1157 // Sanity check that EAX is not livein for this function.
1158 // It should not be, so throw an assert.
1159 assert(!Is64Bit && "EAX is livein in x64 case!");
1162 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1163 .addReg(X86::EAX, RegState::Kill)
1164 .setMIFlag(MachineInstr::FrameSetup);
1168 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1169 // Function prologue is responsible for adjusting the stack pointer.
1170 if (isUInt<32>(NumBytes)) {
1171 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1173 .setMIFlag(MachineInstr::FrameSetup);
1174 } else if (isInt<32>(NumBytes)) {
1175 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
1177 .setMIFlag(MachineInstr::FrameSetup);
1179 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
1181 .setMIFlag(MachineInstr::FrameSetup);
1184 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
1185 // We'll also use 4 already allocated bytes for EAX.
1186 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1187 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1188 .setMIFlag(MachineInstr::FrameSetup);
1191 // Call __chkstk, __chkstk_ms, or __alloca.
1192 emitStackProbe(MF, MBB, MBBI, DL, true);
1197 addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
1198 StackPtr, false, NumBytes - 4);
1199 MI->setFlag(MachineInstr::FrameSetup);
1200 MBB.insert(MBBI, MI);
1202 } else if (NumBytes) {
1203 emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
1206 if (NeedsWinCFI && NumBytes)
1207 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
1209 .setMIFlag(MachineInstr::FrameSetup);
1211 int SEHFrameOffset = 0;
1212 unsigned SPOrEstablisher;
1215 // The establisher parameter passed to a CLR funclet is actually a pointer
1216 // to the (mostly empty) frame of its nearest enclosing funclet; we have
1217 // to find the root function establisher frame by loading the PSPSym from
1218 // the intermediate frame.
1219 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1220 MachinePointerInfo NoInfo;
1221 MBB.addLiveIn(Establisher);
1222 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
1223 Establisher, false, PSPSlotOffset)
1224 .addMemOperand(MF.getMachineMemOperand(
1225 NoInfo, MachineMemOperand::MOLoad, SlotSize, SlotSize));
1227 // Save the root establisher back into the current funclet's (mostly
1228 // empty) frame, in case a sub-funclet or the GC needs it.
1229 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
1230 false, PSPSlotOffset)
1231 .addReg(Establisher)
1233 MF.getMachineMemOperand(NoInfo, MachineMemOperand::MOStore |
1234 MachineMemOperand::MOVolatile,
1235 SlotSize, SlotSize));
1237 SPOrEstablisher = Establisher;
1239 SPOrEstablisher = StackPtr;
1242 if (IsWin64Prologue && HasFP) {
1243 // Set RBP to a small fixed offset from RSP. In the funclet case, we base
1244 // this calculation on the incoming establisher, which holds the value of
1245 // RSP from the parent frame at the end of the prologue.
1246 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
1248 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
1249 SPOrEstablisher, false, SEHFrameOffset);
1251 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
1252 .addReg(SPOrEstablisher);
1254 // If this is not a funclet, emit the CFI describing our frame pointer.
1255 if (NeedsWinCFI && !IsFunclet) {
1256 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1258 .addImm(SEHFrameOffset)
1259 .setMIFlag(MachineInstr::FrameSetup);
1260 if (isAsynchronousEHPersonality(Personality))
1261 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset;
1263 } else if (IsFunclet && STI.is32Bit()) {
1264 // Reset EBP / ESI to something good for funclets.
1265 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
1266 // If we're a catch funclet, we can be returned to via catchret. Save ESP
1267 // into the registration node so that the runtime will restore it for us.
1268 if (!MBB.isCleanupFuncletEntry()) {
1269 assert(Personality == EHPersonality::MSVC_CXX);
1271 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
1272 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg);
1273 // ESP is the first field, so no extra displacement is needed.
1274 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
1280 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
1281 const MachineInstr *FrameInstr = &*MBBI;
1286 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
1287 if (X86::FR64RegClass.contains(Reg)) {
1288 unsigned IgnoredFrameReg;
1289 int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg);
1290 Offset += SEHFrameOffset;
1292 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
1295 .setMIFlag(MachineInstr::FrameSetup);
1302 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
1303 .setMIFlag(MachineInstr::FrameSetup);
1305 if (FnHasClrFunclet && !IsFunclet) {
1306 // Save the so-called Initial-SP (i.e. the value of the stack pointer
1307 // immediately after the prolog) into the PSPSlot so that funclets
1308 // and the GC can recover it.
1309 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1310 auto PSPInfo = MachinePointerInfo::getFixedStack(
1311 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
1312 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
1315 .addMemOperand(MF.getMachineMemOperand(
1316 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1317 SlotSize, SlotSize));
1320 // Realign stack after we spilled callee-saved registers (so that we'll be
1321 // able to calculate their offsets from the frame pointer).
1322 // Win64 requires aligning the stack after the prologue.
1323 if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
1324 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1325 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
1328 // We already dealt with stack realignment and funclets above.
1329 if (IsFunclet && STI.is32Bit())
1332 // If we need a base pointer, set it up here. It's whatever the value
1333 // of the stack pointer is at this point. Any variable size objects
1334 // will be allocated after this, so we can still use the base pointer
1335 // to reference locals.
1336 if (TRI->hasBasePointer(MF)) {
1337 // Update the base pointer with the current stack pointer.
1338 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
1339 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
1340 .addReg(SPOrEstablisher)
1341 .setMIFlag(MachineInstr::FrameSetup);
1342 if (X86FI->getRestoreBasePointer()) {
1343 // Stash value of base pointer. Saving RSP instead of EBP shortens
1344 // dependence chain. Used by SjLj EH.
1345 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1346 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
1347 FramePtr, true, X86FI->getRestoreBasePointerOffset())
1348 .addReg(SPOrEstablisher)
1349 .setMIFlag(MachineInstr::FrameSetup);
1352 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
1353 // Stash the value of the frame pointer relative to the base pointer for
1354 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1355 // it recovers the frame pointer from the base pointer rather than the
1356 // other way around.
1357 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1360 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
1361 assert(UsedReg == BasePtr);
1362 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1364 .setMIFlag(MachineInstr::FrameSetup);
1368 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1369 // Mark end of stack pointer adjustment.
1370 if (!HasFP && NumBytes) {
1371 // Define the current CFA rule to use the provided offset.
1373 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
1374 nullptr, -StackSize + stackGrowth));
1377 // Emit DWARF info specifying the offsets of the callee-saved registers.
1379 emitCalleeSavedFrameMoves(MBB, MBBI, DL);
1383 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1384 const MachineFunction &MF) const {
1385 // We can't use LEA instructions for adjusting the stack pointer if this is a
1386 // leaf function in the Win64 ABI. Only ADD instructions may be used to
1387 // deallocate the stack.
1388 // This means that we can use LEA for SP in two situations:
1389 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1390 // 2. We *have* a frame pointer which means we are permitted to use LEA.
1391 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1394 static bool isFuncletReturnInstr(MachineInstr *MI) {
1395 switch (MI->getOpcode()) {
1397 case X86::CLEANUPRET:
1402 llvm_unreachable("impossible");
1405 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
1406 // stack. It holds a pointer to the bottom of the root function frame. The
1407 // establisher frame pointer passed to a nested funclet may point to the
1408 // (mostly empty) frame of its parent funclet, but it will need to find
1409 // the frame of the root function to access locals. To facilitate this,
1410 // every funclet copies the pointer to the bottom of the root function
1411 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
1412 // same offset for the PSPSym in the root function frame that's used in the
1413 // funclets' frames allows each funclet to dynamically accept any ancestor
1414 // frame as its establisher argument (the runtime doesn't guarantee the
1415 // immediate parent for some reason lost to history), and also allows the GC,
1416 // which uses the PSPSym for some bookkeeping, to find it in any funclet's
1417 // frame with only a single offset reported for the entire method.
1419 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
1420 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
1421 // getFrameIndexReferenceFromSP has an out ref parameter for the stack
1422 // pointer register; pass a dummy that we ignore
1424 int Offset = getFrameIndexReferenceFromSP(MF, Info.PSPSymFrameIdx, SPReg);
1425 assert(Offset >= 0);
1426 return static_cast<unsigned>(Offset);
1430 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1431 // This is the size of the pushed CSRs.
1433 MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
1434 // This is the amount of stack a funclet needs to allocate.
1436 EHPersonality Personality =
1437 classifyEHPersonality(MF.getFunction()->getPersonalityFn());
1438 if (Personality == EHPersonality::CoreCLR) {
1439 // CLR funclets need to hold enough space to include the PSPSym, at the
1440 // same offset from the stack pointer (immediately after the prolog) as it
1441 // resides at in the main function.
1442 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
1444 // Other funclets just need enough stack for outgoing call arguments.
1445 UsedSize = MF.getFrameInfo()->getMaxCallFrameSize();
1447 // RBP is not included in the callee saved register block. After pushing RBP,
1448 // everything is 16 byte aligned. Everything we allocate before an outgoing
1449 // call must also be 16 byte aligned.
1450 unsigned FrameSizeMinusRBP =
1451 RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
1452 // Subtract out the size of the callee saved registers. This is how much stack
1453 // each funclet will allocate.
1454 return FrameSizeMinusRBP - CSSize;
1457 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1458 MachineBasicBlock &MBB) const {
1459 const MachineFrameInfo *MFI = MF.getFrameInfo();
1460 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1461 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1463 if (MBBI != MBB.end())
1464 DL = MBBI->getDebugLoc();
1465 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
1466 const bool Is64BitILP32 = STI.isTarget64BitILP32();
1467 unsigned FramePtr = TRI->getFrameRegister(MF);
1468 unsigned MachineFramePtr =
1469 Is64BitILP32 ? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
1471 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1473 IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
1474 bool IsFunclet = isFuncletReturnInstr(MBBI);
1475 MachineBasicBlock *TargetMBB = nullptr;
1477 // Get the number of bytes to allocate from the FrameInfo.
1478 uint64_t StackSize = MFI->getStackSize();
1479 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1480 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1481 uint64_t NumBytes = 0;
1483 if (MBBI->getOpcode() == X86::CATCHRET) {
1484 // SEH shouldn't use catchret.
1485 assert(!isAsynchronousEHPersonality(
1486 classifyEHPersonality(MF.getFunction()->getPersonalityFn())) &&
1487 "SEH should not use CATCHRET");
1489 NumBytes = getWinEHFuncletFrameSize(MF);
1490 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1491 TargetMBB = MBBI->getOperand(0).getMBB();
1494 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1496 .setMIFlag(MachineInstr::FrameDestroy);
1497 } else if (MBBI->getOpcode() == X86::CLEANUPRET) {
1498 NumBytes = getWinEHFuncletFrameSize(MF);
1499 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1500 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1502 .setMIFlag(MachineInstr::FrameDestroy);
1503 } else if (hasFP(MF)) {
1504 // Calculate required stack adjustment.
1505 uint64_t FrameSize = StackSize - SlotSize;
1506 NumBytes = FrameSize - CSSize;
1508 // Callee-saved registers were pushed on stack before the stack was
1510 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1511 NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
1514 BuildMI(MBB, MBBI, DL,
1515 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr)
1516 .setMIFlag(MachineInstr::FrameDestroy);
1518 NumBytes = StackSize - CSSize;
1520 uint64_t SEHStackAllocAmt = NumBytes;
1522 // Skip the callee-saved pop instructions.
1523 while (MBBI != MBB.begin()) {
1524 MachineBasicBlock::iterator PI = std::prev(MBBI);
1525 unsigned Opc = PI->getOpcode();
1527 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1528 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1529 Opc != X86::DBG_VALUE && !PI->isTerminator())
1534 MachineBasicBlock::iterator FirstCSPop = MBBI;
1537 // Fill EAX/RAX with the address of the target block.
1538 unsigned ReturnReg = STI.is64Bit() ? X86::RAX : X86::EAX;
1539 if (STI.is64Bit()) {
1540 // LEA64r TargetMBB(%rip), %rax
1541 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::LEA64r), ReturnReg)
1548 // MOV32ri $TargetMBB, %eax
1549 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::MOV32ri), ReturnReg)
1552 // Record that we've taken the address of TargetMBB and no longer just
1553 // reference it in a terminator.
1554 TargetMBB->setHasAddressTaken();
1557 if (MBBI != MBB.end())
1558 DL = MBBI->getDebugLoc();
1560 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1561 // instruction, merge the two instructions.
1562 if (NumBytes || MFI->hasVarSizedObjects())
1563 NumBytes += mergeSPUpdates(MBB, MBBI, true);
1565 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1566 // slot before popping them off! Same applies for the case, when stack was
1567 // realigned. Don't do this if this was a funclet epilogue, since the funclets
1568 // will not do realignment or dynamic stack allocation.
1569 if ((TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) &&
1571 if (TRI->needsStackRealignment(MF))
1573 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
1574 uint64_t LEAAmount =
1575 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
1577 // There are only two legal forms of epilogue:
1578 // - add SEHAllocationSize, %rsp
1579 // - lea SEHAllocationSize(%FramePtr), %rsp
1581 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
1582 // However, we may use this sequence if we have a frame pointer because the
1583 // effects of the prologue can safely be undone.
1584 if (LEAAmount != 0) {
1585 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
1586 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
1587 FramePtr, false, LEAAmount);
1590 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
1591 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
1595 } else if (NumBytes) {
1596 // Adjust stack pointer back: ESP += numbytes.
1597 emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
1601 // Windows unwinder will not invoke function's exception handler if IP is
1602 // either in prologue or in epilogue. This behavior causes a problem when a
1603 // call immediately precedes an epilogue, because the return address points
1604 // into the epilogue. To cope with that, we insert an epilogue marker here,
1605 // then replace it with a 'nop' if it ends up immediately after a CALL in the
1606 // final emitted code.
1608 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
1610 // Add the return addr area delta back since we are not tail calling.
1611 int Offset = -1 * X86FI->getTCReturnAddrDelta();
1612 assert(Offset >= 0 && "TCDelta should never be positive");
1614 MBBI = MBB.getFirstTerminator();
1616 // Check for possible merge with preceding ADD instruction.
1617 Offset += mergeSPUpdates(MBB, MBBI, true);
1618 emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
1622 // NOTE: this only has a subset of the full frame index logic. In
1623 // particular, the FI < 0 and AfterFPPop logic is handled in
1624 // X86RegisterInfo::eliminateFrameIndex, but not here. Possibly
1625 // (probably?) it should be moved into here.
1626 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1627 unsigned &FrameReg) const {
1628 const MachineFrameInfo *MFI = MF.getFrameInfo();
1630 // We can't calculate offset from frame pointer if the stack is realigned,
1631 // so enforce usage of stack/base pointer. The base pointer is used when we
1632 // have dynamic allocas in addition to dynamic realignment.
1633 if (TRI->hasBasePointer(MF))
1634 FrameReg = TRI->getBaseRegister();
1635 else if (TRI->needsStackRealignment(MF))
1636 FrameReg = TRI->getStackRegister();
1638 FrameReg = TRI->getFrameRegister(MF);
1640 // Offset will hold the offset from the stack pointer at function entry to the
1642 // We need to factor in additional offsets applied during the prologue to the
1643 // frame, base, and stack pointer depending on which is used.
1644 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1645 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1646 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1647 uint64_t StackSize = MFI->getStackSize();
1648 bool HasFP = hasFP(MF);
1649 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1650 int64_t FPDelta = 0;
1652 if (IsWin64Prologue) {
1653 assert(!MFI->hasCalls() || (StackSize % 16) == 8);
1655 // Calculate required stack adjustment.
1656 uint64_t FrameSize = StackSize - SlotSize;
1657 // If required, include space for extra hidden slot for stashing base pointer.
1658 if (X86FI->getRestoreBasePointer())
1659 FrameSize += SlotSize;
1660 uint64_t NumBytes = FrameSize - CSSize;
1662 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
1663 if (FI && FI == X86FI->getFAIndex())
1664 return -SEHFrameOffset;
1666 // FPDelta is the offset from the "traditional" FP location of the old base
1667 // pointer followed by return address and the location required by the
1668 // restricted Win64 prologue.
1669 // Add FPDelta to all offsets below that go through the frame pointer.
1670 FPDelta = FrameSize - SEHFrameOffset;
1671 assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
1672 "FPDelta isn't aligned per the Win64 ABI!");
1676 if (TRI->hasBasePointer(MF)) {
1677 assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
1679 // Skip the saved EBP.
1680 return Offset + SlotSize + FPDelta;
1682 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1683 return Offset + StackSize;
1685 } else if (TRI->needsStackRealignment(MF)) {
1687 // Skip the saved EBP.
1688 return Offset + SlotSize + FPDelta;
1690 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1691 return Offset + StackSize;
1693 // FIXME: Support tail calls
1696 return Offset + StackSize;
1698 // Skip the saved EBP.
1701 // Skip the RETADDR move area
1702 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1703 if (TailCallReturnAddrDelta < 0)
1704 Offset -= TailCallReturnAddrDelta;
1707 return Offset + FPDelta;
1710 // Simplified from getFrameIndexReference keeping only StackPointer cases
1711 int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
1713 unsigned &FrameReg) const {
1714 const MachineFrameInfo *MFI = MF.getFrameInfo();
1715 // Does not include any dynamic realign.
1716 const uint64_t StackSize = MFI->getStackSize();
1719 // LLVM arranges the stack as follows:
1724 // PUSH RBP <-- RBP points here
1726 // ~~~~~~~ <-- possible stack realignment (non-win64)
1729 // ... <-- RSP after prologue points here
1730 // ~~~~~~~ <-- possible stack realignment (win64)
1732 // if (hasVarSizedObjects()):
1733 // ... <-- "base pointer" (ESI/RBX) points here
1735 // ... <-- RSP points here
1737 // Case 1: In the simple case of no stack realignment and no dynamic
1738 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
1739 // with fixed offsets from RSP.
1741 // Case 2: In the case of stack realignment with no dynamic allocas, fixed
1742 // stack objects are addressed with RBP and regular stack objects with RSP.
1744 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
1745 // to address stack arguments for outgoing calls and nothing else. The "base
1746 // pointer" points to local variables, and RBP points to fixed objects.
1748 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
1749 // answer we give is relative to the SP after the prologue, and not the
1750 // SP in the middle of the function.
1752 assert((!MFI->isFixedObjectIndex(FI) || !TRI->needsStackRealignment(MF) ||
1753 STI.isTargetWin64()) &&
1754 "offset from fixed object to SP is not static");
1756 // We don't handle tail calls, and shouldn't be seeing them either.
1757 int TailCallReturnAddrDelta =
1758 MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
1759 assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
1763 // Fill in FrameReg output argument.
1764 FrameReg = TRI->getStackRegister();
1766 // This is how the math works out:
1768 // %rsp grows (i.e. gets lower) left to right. Each box below is
1769 // one word (eight bytes). Obj0 is the stack slot we're trying to
1772 // ----------------------------------
1773 // | BP | Obj0 | Obj1 | ... | ObjN |
1774 // ----------------------------------
1778 // A is the incoming stack pointer.
1779 // (B - A) is the local area offset (-8 for x86-64) [1]
1780 // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
1782 // |(E - B)| is the StackSize (absolute value, positive). For a
1783 // stack that grown down, this works out to be (B - E). [3]
1785 // E is also the value of %rsp after stack has been set up, and we
1786 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
1787 // (C - E) == (C - A) - (B - A) + (B - E)
1788 // { Using [1], [2] and [3] above }
1789 // == getObjectOffset - LocalAreaOffset + StackSize
1792 // Get the Offset from the StackPointer
1793 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1795 return Offset + StackSize;
1798 bool X86FrameLowering::assignCalleeSavedSpillSlots(
1799 MachineFunction &MF, const TargetRegisterInfo *TRI,
1800 std::vector<CalleeSavedInfo> &CSI) const {
1801 MachineFrameInfo *MFI = MF.getFrameInfo();
1802 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1804 unsigned CalleeSavedFrameSize = 0;
1805 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
1808 // emitPrologue always spills frame register the first thing.
1809 SpillSlotOffset -= SlotSize;
1810 MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1812 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
1813 // the frame register, we can delete it from CSI list and not have to worry
1814 // about avoiding it later.
1815 unsigned FPReg = TRI->getFrameRegister(MF);
1816 for (unsigned i = 0; i < CSI.size(); ++i) {
1817 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
1818 CSI.erase(CSI.begin() + i);
1824 // Assign slots for GPRs. It increases frame size.
1825 for (unsigned i = CSI.size(); i != 0; --i) {
1826 unsigned Reg = CSI[i - 1].getReg();
1828 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1831 SpillSlotOffset -= SlotSize;
1832 CalleeSavedFrameSize += SlotSize;
1834 int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1835 CSI[i - 1].setFrameIdx(SlotIndex);
1838 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
1840 // Assign slots for XMMs.
1841 for (unsigned i = CSI.size(); i != 0; --i) {
1842 unsigned Reg = CSI[i - 1].getReg();
1843 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1846 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1848 SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
1850 SpillSlotOffset -= RC->getSize();
1852 MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
1853 CSI[i - 1].setFrameIdx(SlotIndex);
1854 MFI->ensureMaxAlignment(RC->getAlignment());
1860 bool X86FrameLowering::spillCalleeSavedRegisters(
1861 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1862 const std::vector<CalleeSavedInfo> &CSI,
1863 const TargetRegisterInfo *TRI) const {
1864 DebugLoc DL = MBB.findDebugLoc(MI);
1866 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
1867 // for us, and there are no XMM CSRs on Win32.
1868 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
1871 // Push GPRs. It increases frame size.
1872 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1873 for (unsigned i = CSI.size(); i != 0; --i) {
1874 unsigned Reg = CSI[i - 1].getReg();
1876 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1878 // Add the callee-saved register as live-in. It's killed at the spill.
1881 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1882 .setMIFlag(MachineInstr::FrameSetup);
1885 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1886 // It can be done by spilling XMMs to stack frame.
1887 for (unsigned i = CSI.size(); i != 0; --i) {
1888 unsigned Reg = CSI[i-1].getReg();
1889 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1891 // Add the callee-saved register as live-in. It's killed at the spill.
1893 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1895 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
1898 MI->setFlag(MachineInstr::FrameSetup);
1905 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1906 MachineBasicBlock::iterator MI,
1907 const std::vector<CalleeSavedInfo> &CSI,
1908 const TargetRegisterInfo *TRI) const {
1912 if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
1913 // Don't restore CSRs in 32-bit EH funclets. Matches
1914 // spillCalleeSavedRegisters.
1917 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
1918 // funclets. emitEpilogue transforms these to normal jumps.
1919 if (MI->getOpcode() == X86::CATCHRET) {
1920 const Function *Func = MBB.getParent()->getFunction();
1921 bool IsSEH = isAsynchronousEHPersonality(
1922 classifyEHPersonality(Func->getPersonalityFn()));
1928 DebugLoc DL = MBB.findDebugLoc(MI);
1930 // Reload XMMs from stack frame.
1931 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1932 unsigned Reg = CSI[i].getReg();
1933 if (X86::GR64RegClass.contains(Reg) ||
1934 X86::GR32RegClass.contains(Reg))
1937 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1938 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
1942 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1943 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1944 unsigned Reg = CSI[i].getReg();
1945 if (!X86::GR64RegClass.contains(Reg) &&
1946 !X86::GR32RegClass.contains(Reg))
1949 BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
1950 .setMIFlag(MachineInstr::FrameDestroy);
1955 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
1956 BitVector &SavedRegs,
1957 RegScavenger *RS) const {
1958 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1960 MachineFrameInfo *MFI = MF.getFrameInfo();
1962 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1963 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1965 if (TailCallReturnAddrDelta < 0) {
1966 // create RETURNADDR area
1975 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1976 TailCallReturnAddrDelta - SlotSize, true);
1979 // Spill the BasePtr if it's used.
1980 if (TRI->hasBasePointer(MF)) {
1981 SavedRegs.set(TRI->getBaseRegister());
1983 // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
1984 if (MF.getMMI().hasEHFunclets()) {
1985 int FI = MFI->CreateSpillStackObject(SlotSize, SlotSize);
1986 X86FI->setHasSEHFramePtrSave(true);
1987 X86FI->setSEHFramePtrSaveIndex(FI);
1993 HasNestArgument(const MachineFunction *MF) {
1994 const Function *F = MF->getFunction();
1995 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1997 if (I->hasNestAttr())
2003 /// GetScratchRegister - Get a temp register for performing work in the
2004 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
2005 /// and the properties of the function either one or two registers will be
2006 /// needed. Set primary to true for the first register, false for the second.
2008 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
2009 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
2012 if (CallingConvention == CallingConv::HiPE) {
2014 return Primary ? X86::R14 : X86::R13;
2016 return Primary ? X86::EBX : X86::EDI;
2021 return Primary ? X86::R11 : X86::R12;
2023 return Primary ? X86::R11D : X86::R12D;
2026 bool IsNested = HasNestArgument(&MF);
2028 if (CallingConvention == CallingConv::X86_FastCall ||
2029 CallingConvention == CallingConv::Fast) {
2031 report_fatal_error("Segmented stacks does not support fastcall with "
2032 "nested function.");
2033 return Primary ? X86::EAX : X86::ECX;
2036 return Primary ? X86::EDX : X86::EAX;
2037 return Primary ? X86::ECX : X86::EAX;
2040 // The stack limit in the TCB is set to this many bytes above the actual stack
2042 static const uint64_t kSplitStackAvailable = 256;
2044 void X86FrameLowering::adjustForSegmentedStacks(
2045 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2046 MachineFrameInfo *MFI = MF.getFrameInfo();
2048 unsigned TlsReg, TlsOffset;
2051 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2052 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2053 "Scratch register is live-in");
2055 if (MF.getFunction()->isVarArg())
2056 report_fatal_error("Segmented stacks do not support vararg functions.");
2057 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
2058 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
2059 !STI.isTargetDragonFly())
2060 report_fatal_error("Segmented stacks not supported on this platform.");
2062 // Eventually StackSize will be calculated by a link-time pass; which will
2063 // also decide whether checking code needs to be injected into this particular
2065 StackSize = MFI->getStackSize();
2067 // Do not generate a prologue for functions with a stack of size zero
2071 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
2072 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
2073 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2074 bool IsNested = false;
2076 // We need to know if the function has a nest argument only in 64 bit mode.
2078 IsNested = HasNestArgument(&MF);
2080 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
2081 // allocMBB needs to be last (terminating) instruction.
2083 for (const auto &LI : PrologueMBB.liveins()) {
2084 allocMBB->addLiveIn(LI);
2085 checkMBB->addLiveIn(LI);
2089 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
2091 MF.push_front(allocMBB);
2092 MF.push_front(checkMBB);
2094 // When the frame size is less than 256 we just compare the stack
2095 // boundary directly to the value of the stack pointer, per gcc.
2096 bool CompareStackPointer = StackSize < kSplitStackAvailable;
2098 // Read the limit off the current stacklet off the stack_guard location.
2100 if (STI.isTargetLinux()) {
2102 TlsOffset = IsLP64 ? 0x70 : 0x40;
2103 } else if (STI.isTargetDarwin()) {
2105 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
2106 } else if (STI.isTargetWin64()) {
2108 TlsOffset = 0x28; // pvArbitrary, reserved for application use
2109 } else if (STI.isTargetFreeBSD()) {
2112 } else if (STI.isTargetDragonFly()) {
2114 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
2116 report_fatal_error("Segmented stacks not supported on this platform.");
2119 if (CompareStackPointer)
2120 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
2122 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
2123 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2125 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
2126 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2128 if (STI.isTargetLinux()) {
2131 } else if (STI.isTargetDarwin()) {
2133 TlsOffset = 0x48 + 90*4;
2134 } else if (STI.isTargetWin32()) {
2136 TlsOffset = 0x14; // pvArbitrary, reserved for application use
2137 } else if (STI.isTargetDragonFly()) {
2139 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
2140 } else if (STI.isTargetFreeBSD()) {
2141 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
2143 report_fatal_error("Segmented stacks not supported on this platform.");
2146 if (CompareStackPointer)
2147 ScratchReg = X86::ESP;
2149 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
2150 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2152 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
2153 STI.isTargetDragonFly()) {
2154 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
2155 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2156 } else if (STI.isTargetDarwin()) {
2158 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
2159 unsigned ScratchReg2;
2161 if (CompareStackPointer) {
2162 // The primary scratch register is available for holding the TLS offset.
2163 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2164 SaveScratch2 = false;
2166 // Need to use a second register to hold the TLS offset
2167 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
2169 // Unfortunately, with fastcc the second scratch register may hold an
2171 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
2174 // If Scratch2 is live-in then it needs to be saved.
2175 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
2176 "Scratch register is live-in and not saved");
2179 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
2180 .addReg(ScratchReg2, RegState::Kill);
2182 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
2184 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
2186 .addReg(ScratchReg2).addImm(1).addReg(0)
2191 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
2195 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
2196 // It jumps to normal execution of the function body.
2197 BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&PrologueMBB);
2199 // On 32 bit we first push the arguments size and then the frame size. On 64
2200 // bit, we pass the stack frame size in r10 and the argument size in r11.
2202 // Functions with nested arguments use R10, so it needs to be saved across
2203 // the call to _morestack
2205 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
2206 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
2207 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
2208 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
2209 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
2212 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
2214 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
2216 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
2217 .addImm(X86FI->getArgumentStackSize());
2219 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2220 .addImm(X86FI->getArgumentStackSize());
2221 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2225 // __morestack is in libgcc
2226 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
2227 // Under the large code model, we cannot assume that __morestack lives
2228 // within 2^31 bytes of the call site, so we cannot use pc-relative
2229 // addressing. We cannot perform the call via a temporary register,
2230 // as the rax register may be used to store the static chain, and all
2231 // other suitable registers may be either callee-save or used for
2232 // parameter passing. We cannot use the stack at this point either
2233 // because __morestack manipulates the stack directly.
2235 // To avoid these issues, perform an indirect call via a read-only memory
2236 // location containing the address.
2238 // This solution is not perfect, as it assumes that the .rodata section
2239 // is laid out within 2^31 bytes of each function body, but this seems
2240 // to be sufficient for JIT.
2241 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
2245 .addExternalSymbol("__morestack_addr")
2247 MF.getMMI().setUsesMorestackAddr(true);
2250 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
2251 .addExternalSymbol("__morestack");
2253 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
2254 .addExternalSymbol("__morestack");
2258 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
2260 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
2262 allocMBB->addSuccessor(&PrologueMBB);
2264 checkMBB->addSuccessor(allocMBB);
2265 checkMBB->addSuccessor(&PrologueMBB);
2272 /// Erlang programs may need a special prologue to handle the stack size they
2273 /// might need at runtime. That is because Erlang/OTP does not implement a C
2274 /// stack but uses a custom implementation of hybrid stack/heap architecture.
2275 /// (for more information see Eric Stenman's Ph.D. thesis:
2276 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
2279 /// temp0 = sp - MaxStack
2280 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
2284 /// call inc_stack # doubles the stack space
2285 /// temp0 = sp - MaxStack
2286 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
2287 void X86FrameLowering::adjustForHiPEPrologue(
2288 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2289 MachineFrameInfo *MFI = MF.getFrameInfo();
2291 // HiPE-specific values
2292 const unsigned HipeLeafWords = 24;
2293 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
2294 const unsigned Guaranteed = HipeLeafWords * SlotSize;
2295 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
2296 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
2297 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
2299 assert(STI.isTargetLinux() &&
2300 "HiPE prologue is only supported on Linux operating systems.");
2302 // Compute the largest caller's frame that is needed to fit the callees'
2303 // frames. This 'MaxStack' is computed from:
2305 // a) the fixed frame size, which is the space needed for all spilled temps,
2306 // b) outgoing on-stack parameter areas, and
2307 // c) the minimum stack space this function needs to make available for the
2308 // functions it calls (a tunable ABI property).
2309 if (MFI->hasCalls()) {
2310 unsigned MoreStackForCalls = 0;
2312 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
2313 MBBI != MBBE; ++MBBI)
2314 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
2319 // Get callee operand.
2320 const MachineOperand &MO = MI->getOperand(0);
2322 // Only take account of global function calls (no closures etc.).
2326 const Function *F = dyn_cast<Function>(MO.getGlobal());
2330 // Do not update 'MaxStack' for primitive and built-in functions
2331 // (encoded with names either starting with "erlang."/"bif_" or not
2332 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
2333 // "_", such as the BIF "suspend_0") as they are executed on another
2335 if (F->getName().find("erlang.") != StringRef::npos ||
2336 F->getName().find("bif_") != StringRef::npos ||
2337 F->getName().find_first_of("._") == StringRef::npos)
2340 unsigned CalleeStkArity =
2341 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
2342 if (HipeLeafWords - 1 > CalleeStkArity)
2343 MoreStackForCalls = std::max(MoreStackForCalls,
2344 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
2346 MaxStack += MoreStackForCalls;
2349 // If the stack frame needed is larger than the guaranteed then runtime checks
2350 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
2351 if (MaxStack > Guaranteed) {
2352 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
2353 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
2355 for (const auto &LI : PrologueMBB.liveins()) {
2356 stackCheckMBB->addLiveIn(LI);
2357 incStackMBB->addLiveIn(LI);
2360 MF.push_front(incStackMBB);
2361 MF.push_front(stackCheckMBB);
2363 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
2364 unsigned LEAop, CMPop, CALLop;
2368 LEAop = X86::LEA64r;
2369 CMPop = X86::CMP64rm;
2370 CALLop = X86::CALL64pcrel32;
2371 SPLimitOffset = 0x90;
2375 LEAop = X86::LEA32r;
2376 CMPop = X86::CMP32rm;
2377 CALLop = X86::CALLpcrel32;
2378 SPLimitOffset = 0x4c;
2381 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2382 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2383 "HiPE prologue scratch register is live-in");
2385 // Create new MBB for StackCheck:
2386 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
2387 SPReg, false, -MaxStack);
2388 // SPLimitOffset is in a fixed heap location (pointed by BP).
2389 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
2390 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2391 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&PrologueMBB);
2393 // Create new MBB for IncStack:
2394 BuildMI(incStackMBB, DL, TII.get(CALLop)).
2395 addExternalSymbol("inc_stack_0");
2396 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
2397 SPReg, false, -MaxStack);
2398 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
2399 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2400 BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
2402 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
2403 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
2404 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
2405 incStackMBB->addSuccessor(incStackMBB, {1, 100});
2412 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
2413 MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
2418 if (Offset % SlotSize)
2421 int NumPops = Offset / SlotSize;
2422 // This is only worth it if we have at most 2 pops.
2423 if (NumPops != 1 && NumPops != 2)
2426 // Handle only the trivial case where the adjustment directly follows
2427 // a call. This is the most common one, anyway.
2428 if (MBBI == MBB.begin())
2430 MachineBasicBlock::iterator Prev = std::prev(MBBI);
2431 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
2435 unsigned FoundRegs = 0;
2437 auto RegMask = Prev->getOperand(1);
2440 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
2441 // Try to find up to NumPops free registers.
2442 for (auto Candidate : RegClass) {
2444 // Poor man's liveness:
2445 // Since we're immediately after a call, any register that is clobbered
2446 // by the call and not defined by it can be considered dead.
2447 if (!RegMask.clobbersPhysReg(Candidate))
2451 for (const MachineOperand &MO : Prev->implicit_operands()) {
2452 if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) {
2461 Regs[FoundRegs++] = Candidate;
2462 if (FoundRegs == (unsigned)NumPops)
2469 // If we found only one free register, but need two, reuse the same one twice.
2470 while (FoundRegs < (unsigned)NumPops)
2471 Regs[FoundRegs++] = Regs[0];
2473 for (int i = 0; i < NumPops; ++i)
2474 BuildMI(MBB, MBBI, DL,
2475 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
2480 void X86FrameLowering::
2481 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2482 MachineBasicBlock::iterator I) const {
2483 bool reserveCallFrame = hasReservedCallFrame(MF);
2484 unsigned Opcode = I->getOpcode();
2485 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
2486 DebugLoc DL = I->getDebugLoc();
2487 uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
2488 uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
2491 if (!reserveCallFrame) {
2492 // If the stack pointer can be changed after prologue, turn the
2493 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
2494 // adjcallstackdown instruction into 'add ESP, <amt>'
2496 // We need to keep the stack aligned properly. To do this, we round the
2497 // amount of space needed for the outgoing arguments up to the next
2498 // alignment boundary.
2499 unsigned StackAlign = getStackAlignment();
2500 Amount = RoundUpToAlignment(Amount, StackAlign);
2502 MachineModuleInfo &MMI = MF.getMMI();
2503 const Function *Fn = MF.getFunction();
2504 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2505 bool DwarfCFI = !WindowsCFI &&
2506 (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
2508 // If we have any exception handlers in this function, and we adjust
2509 // the SP before calls, we may need to indicate this to the unwinder
2510 // using GNU_ARGS_SIZE. Note that this may be necessary even when
2511 // Amount == 0, because the preceding function may have set a non-0
2513 // TODO: We don't need to reset this between subsequent functions,
2514 // if it didn't change.
2515 bool HasDwarfEHHandlers = !WindowsCFI &&
2516 !MF.getMMI().getLandingPads().empty();
2518 if (HasDwarfEHHandlers && !isDestroy &&
2519 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
2520 BuildCFI(MBB, I, DL,
2521 MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
2526 // Factor out the amount that gets handled inside the sequence
2527 // (Pushes of argument for frame setup, callee pops for frame destroy)
2528 Amount -= InternalAmt;
2530 // TODO: This is needed only if we require precise CFA.
2531 // If this is a callee-pop calling convention, emit a CFA adjust for
2532 // the amount the callee popped.
2533 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
2534 BuildCFI(MBB, I, DL,
2535 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
2538 // Add Amount to SP to destroy a frame, and subtract to setup.
2539 int Offset = isDestroy ? Amount : -Amount;
2541 if (!(Fn->optForMinSize() &&
2542 adjustStackWithPops(MBB, I, DL, Offset)))
2543 BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
2546 if (DwarfCFI && !hasFP(MF)) {
2547 // If we don't have FP, but need to generate unwind information,
2548 // we need to set the correct CFA offset after the stack adjustment.
2549 // How much we adjust the CFA offset depends on whether we're emitting
2550 // CFI only for EH purposes or for debugging. EH only requires the CFA
2551 // offset to be correct at each call site, while for debugging we want
2552 // it to be more precise.
2553 int CFAOffset = Amount;
2554 // TODO: When not using precise CFA, we also need to adjust for the
2555 // InternalAmt here.
2558 CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
2559 BuildCFI(MBB, I, DL,
2560 MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
2567 if (isDestroy && InternalAmt) {
2568 // If we are performing frame pointer elimination and if the callee pops
2569 // something off the stack pointer, add it back. We do this until we have
2570 // more advanced stack pointer tracking ability.
2571 // We are not tracking the stack pointer adjustment by the callee, so make
2572 // sure we restore the stack pointer immediately after the call, there may
2573 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
2574 MachineBasicBlock::iterator B = MBB.begin();
2575 while (I != B && !std::prev(I)->isCall())
2577 BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
2581 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2582 assert(MBB.getParent() && "Block is not attached to a function!");
2584 // Win64 has strict requirements in terms of epilogue and we are
2585 // not taking a chance at messing with them.
2586 // I.e., unless this block is already an exit block, we can't use
2587 // it as an epilogue.
2588 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
2591 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
2594 // If we cannot use LEA to adjust SP, we may need to use ADD, which
2595 // clobbers the EFLAGS. Check that we do not need to preserve it,
2596 // otherwise, conservatively assume this is not
2597 // safe to insert the epilogue here.
2598 return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
2601 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
2602 // If we may need to emit frameless compact unwind information, give
2603 // up as this is currently broken: PR25614.
2604 return MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF);
2607 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
2608 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
2609 DebugLoc DL, bool RestoreSP) const {
2610 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
2611 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
2612 assert(STI.is32Bit() && !Uses64BitFramePtr &&
2613 "restoring EBP/ESI on non-32-bit target");
2615 MachineFunction &MF = *MBB.getParent();
2616 unsigned FramePtr = TRI->getFrameRegister(MF);
2617 unsigned BasePtr = TRI->getBaseRegister();
2618 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
2619 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2620 MachineFrameInfo *MFI = MF.getFrameInfo();
2622 // FIXME: Don't set FrameSetup flag in catchret case.
2624 int FI = FuncInfo.EHRegNodeFrameIndex;
2625 int EHRegSize = MFI->getObjectSize(FI);
2628 // MOV32rm -EHRegSize(%ebp), %esp
2629 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
2630 X86::EBP, true, -EHRegSize)
2631 .setMIFlag(MachineInstr::FrameSetup);
2635 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg);
2636 int EndOffset = -EHRegOffset - EHRegSize;
2637 FuncInfo.EHRegNodeEndOffset = EndOffset;
2639 if (UsedReg == FramePtr) {
2640 // ADD $offset, %ebp
2641 unsigned ADDri = getADDriOpcode(false, EndOffset);
2642 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
2645 .setMIFlag(MachineInstr::FrameSetup)
2648 assert(EndOffset >= 0 &&
2649 "end of registration object above normal EBP position!");
2650 } else if (UsedReg == BasePtr) {
2651 // LEA offset(%ebp), %esi
2652 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
2653 FramePtr, false, EndOffset)
2654 .setMIFlag(MachineInstr::FrameSetup);
2655 // MOV32rm SavedEBPOffset(%esi), %ebp
2656 assert(X86FI->getHasSEHFramePtrSave());
2658 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
2659 assert(UsedReg == BasePtr);
2660 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
2661 UsedReg, true, Offset)
2662 .setMIFlag(MachineInstr::FrameSetup);
2664 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
2669 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
2670 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
2671 unsigned Offset = 16;
2672 // RBP is immediately pushed.
2674 // All callee-saved registers are then pushed.
2675 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
2676 // Every funclet allocates enough stack space for the largest outgoing call.
2677 Offset += getWinEHFuncletFrameSize(MF);
2681 void X86FrameLowering::processFunctionBeforeFrameFinalized(
2682 MachineFunction &MF, RegScavenger *RS) const {
2683 // If this function isn't doing Win64-style C++ EH, we don't need to do
2685 const Function *Fn = MF.getFunction();
2686 if (!STI.is64Bit() || !MF.getMMI().hasEHFunclets() ||
2687 classifyEHPersonality(Fn->getPersonalityFn()) != EHPersonality::MSVC_CXX)
2690 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
2691 // relative to RSP after the prologue. Find the offset of the last fixed
2692 // object, so that we can allocate a slot immediately following it. If there
2693 // were no fixed objects, use offset -SlotSize, which is immediately after the
2694 // return address. Fixed objects have negative frame indices.
2695 MachineFrameInfo *MFI = MF.getFrameInfo();
2696 int64_t MinFixedObjOffset = -SlotSize;
2697 for (int I = MFI->getObjectIndexBegin(); I < 0; ++I)
2698 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI->getObjectOffset(I));
2700 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
2702 MFI->CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
2703 MF.getWinEHFuncInfo()->UnwindHelpFrameIdx = UnwindHelpFI;
2705 // Store -2 into UnwindHelp on function entry. We have to scan forwards past
2706 // other frame setup instructions.
2707 MachineBasicBlock &MBB = MF.front();
2708 auto MBBI = MBB.begin();
2709 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
2712 DebugLoc DL = MBB.findDebugLoc(MBBI);
2713 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),