1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/Analysis/LibCallSemantics.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/Target/TargetOptions.h"
33 #include "llvm/Support/Debug.h"
38 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
39 unsigned StackAlignOverride)
40 : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
41 STI.is64Bit() ? -8 : -4),
42 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
43 // Cache a bunch of frame-related predicates for this subtarget.
44 SlotSize = TRI->getSlotSize();
45 Is64Bit = STI.is64Bit();
46 IsLP64 = STI.isTarget64BitLP64();
47 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
48 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
49 StackPtr = TRI->getStackRegister();
52 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
53 return !MF.getFrameInfo()->hasVarSizedObjects() &&
54 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
57 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
58 /// call frame pseudos can be simplified. Having a FP, as in the default
59 /// implementation, is not sufficient here since we can't always use it.
60 /// Use a more nuanced condition.
62 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
63 return hasReservedCallFrame(MF) ||
64 (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
65 TRI->hasBasePointer(MF);
68 // needsFrameIndexResolution - Do we need to perform FI resolution for
69 // this function. Normally, this is required only when the function
70 // has any stack objects. However, FI resolution actually has another job,
71 // not apparent from the title - it resolves callframesetup/destroy
72 // that were not simplified earlier.
73 // So, this is required for x86 functions that have push sequences even
74 // when there are no stack objects.
76 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
77 return MF.getFrameInfo()->hasStackObjects() ||
78 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
81 /// hasFP - Return true if the specified function should have a dedicated frame
82 /// pointer register. This is true if the function has variable sized allocas
83 /// or if frame pointer elimination is disabled.
84 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
85 const MachineFrameInfo *MFI = MF.getFrameInfo();
86 const MachineModuleInfo &MMI = MF.getMMI();
88 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
89 TRI->needsStackRealignment(MF) ||
90 MFI->hasVarSizedObjects() ||
91 MFI->isFrameAddressTaken() || MFI->hasOpaqueSPAdjustment() ||
92 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
93 MMI.callsUnwindInit() || MMI.hasEHFunclets() || MMI.callsEHReturn() ||
94 MFI->hasStackMap() || MFI->hasPatchPoint());
97 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
100 return X86::SUB64ri8;
101 return X86::SUB64ri32;
104 return X86::SUB32ri8;
109 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
112 return X86::ADD64ri8;
113 return X86::ADD64ri32;
116 return X86::ADD32ri8;
121 static unsigned getSUBrrOpcode(unsigned isLP64) {
122 return isLP64 ? X86::SUB64rr : X86::SUB32rr;
125 static unsigned getADDrrOpcode(unsigned isLP64) {
126 return isLP64 ? X86::ADD64rr : X86::ADD32rr;
129 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
132 return X86::AND64ri8;
133 return X86::AND64ri32;
136 return X86::AND32ri8;
140 static unsigned getLEArOpcode(unsigned IsLP64) {
141 return IsLP64 ? X86::LEA64r : X86::LEA32r;
144 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
145 /// when it reaches the "return" instruction. We can then pop a stack object
146 /// to this register without worry about clobbering it.
147 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
148 MachineBasicBlock::iterator &MBBI,
149 const X86RegisterInfo *TRI,
151 const MachineFunction *MF = MBB.getParent();
152 const Function *F = MF->getFunction();
153 if (!F || MF->getMMI().callsEHReturn())
156 const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
158 unsigned Opc = MBBI->getOpcode();
165 case X86::TCRETURNdi:
166 case X86::TCRETURNri:
167 case X86::TCRETURNmi:
168 case X86::TCRETURNdi64:
169 case X86::TCRETURNri64:
170 case X86::TCRETURNmi64:
172 case X86::EH_RETURN64: {
173 SmallSet<uint16_t, 8> Uses;
174 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
175 MachineOperand &MO = MBBI->getOperand(i);
176 if (!MO.isReg() || MO.isDef())
178 unsigned Reg = MO.getReg();
181 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
185 for (auto CS : AvailableRegs)
186 if (!Uses.count(CS) && CS != X86::RIP)
194 static bool isEAXLiveIn(MachineFunction &MF) {
195 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
196 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
197 unsigned Reg = II->first;
199 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
200 Reg == X86::AH || Reg == X86::AL)
207 /// Check whether or not the terminators of \p MBB needs to read EFLAGS.
208 static bool terminatorsNeedFlagsAsInput(const MachineBasicBlock &MBB) {
209 for (const MachineInstr &MI : MBB.terminators()) {
210 bool BreakNext = false;
211 for (const MachineOperand &MO : MI.operands()) {
214 unsigned Reg = MO.getReg();
215 if (Reg != X86::EFLAGS)
218 // This terminator needs an eflag that is not defined
219 // by a previous terminator.
230 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
231 /// stack pointer by a constant value.
232 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
233 MachineBasicBlock::iterator &MBBI,
234 int64_t NumBytes, bool InEpilogue) const {
235 bool isSub = NumBytes < 0;
236 uint64_t Offset = isSub ? -NumBytes : NumBytes;
238 uint64_t Chunk = (1LL << 31) - 1;
239 DebugLoc DL = MBB.findDebugLoc(MBBI);
242 if (Offset > Chunk) {
243 // Rather than emit a long series of instructions for large offsets,
244 // load the offset into a register and do one sub/add
247 if (isSub && !isEAXLiveIn(*MBB.getParent()))
248 Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
250 Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
253 unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
254 BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
257 ? getSUBrrOpcode(Is64Bit)
258 : getADDrrOpcode(Is64Bit);
259 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
262 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
268 uint64_t ThisVal = std::min(Offset, Chunk);
269 if (ThisVal == (Is64Bit ? 8 : 4)) {
270 // Use push / pop instead.
272 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
273 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
276 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
277 : (Is64Bit ? X86::POP64r : X86::POP32r);
278 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
279 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
281 MI->setFlag(MachineInstr::FrameSetup);
283 MI->setFlag(MachineInstr::FrameDestroy);
289 MachineInstrBuilder MI = BuildStackAdjustment(
290 MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
292 MI.setMIFlag(MachineInstr::FrameSetup);
294 MI.setMIFlag(MachineInstr::FrameDestroy);
300 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
301 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
302 int64_t Offset, bool InEpilogue) const {
303 assert(Offset != 0 && "zero offset stack adjustment requested");
305 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
309 UseLEA = STI.useLeaForSP();
311 // If we can use LEA for SP but we shouldn't, check that none
312 // of the terminators uses the eflags. Otherwise we will insert
313 // a ADD that will redefine the eflags and break the condition.
314 // Alternatively, we could move the ADD, but this may not be possible
315 // and is an optimization anyway.
316 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
317 if (UseLEA && !STI.useLeaForSP())
318 UseLEA = terminatorsNeedFlagsAsInput(MBB);
319 // If that assert breaks, that means we do not do the right thing
320 // in canUseAsEpilogue.
321 assert((UseLEA || !terminatorsNeedFlagsAsInput(MBB)) &&
322 "We shouldn't have allowed this insertion point");
325 MachineInstrBuilder MI;
327 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
328 TII.get(getLEArOpcode(Uses64BitFramePtr)),
330 StackPtr, false, Offset);
332 bool IsSub = Offset < 0;
333 uint64_t AbsOffset = IsSub ? -Offset : Offset;
334 unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
335 : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
336 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
339 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
344 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
345 MachineBasicBlock::iterator &MBBI,
346 bool doMergeWithPrevious) const {
347 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
348 (!doMergeWithPrevious && MBBI == MBB.end()))
351 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
352 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
354 unsigned Opc = PI->getOpcode();
357 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
358 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
359 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
360 PI->getOperand(0).getReg() == StackPtr){
361 Offset += PI->getOperand(2).getImm();
363 if (!doMergeWithPrevious) MBBI = NI;
364 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
365 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
366 PI->getOperand(0).getReg() == StackPtr) {
367 Offset -= PI->getOperand(2).getImm();
369 if (!doMergeWithPrevious) MBBI = NI;
375 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
376 MachineBasicBlock::iterator MBBI, DebugLoc DL,
377 MCCFIInstruction CFIInst) const {
378 MachineFunction &MF = *MBB.getParent();
379 unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
380 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
381 .addCFIIndex(CFIIndex);
385 X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
386 MachineBasicBlock::iterator MBBI,
388 MachineFunction &MF = *MBB.getParent();
389 MachineFrameInfo *MFI = MF.getFrameInfo();
390 MachineModuleInfo &MMI = MF.getMMI();
391 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
393 // Add callee saved registers to move list.
394 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
395 if (CSI.empty()) return;
397 // Calculate offsets.
398 for (std::vector<CalleeSavedInfo>::const_iterator
399 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
400 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
401 unsigned Reg = I->getReg();
403 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
404 BuildCFI(MBB, MBBI, DL,
405 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
409 /// usesTheStack - This function checks if any of the users of EFLAGS
410 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
411 /// to use the stack, and if we don't adjust the stack we clobber the first
413 /// See X86InstrInfo::copyPhysReg.
414 static bool usesTheStack(const MachineFunction &MF) {
415 const MachineRegisterInfo &MRI = MF.getRegInfo();
417 for (MachineRegisterInfo::reg_instr_iterator
418 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
426 MachineInstr *X86FrameLowering::emitStackProbe(MachineFunction &MF,
427 MachineBasicBlock &MBB,
428 MachineBasicBlock::iterator MBBI,
430 bool InProlog) const {
431 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
432 if (STI.isTargetWindowsCoreCLR()) {
434 return emitStackProbeInlineStub(MF, MBB, MBBI, DL, true);
436 return emitStackProbeInline(MF, MBB, MBBI, DL, false);
439 return emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
443 void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
444 MachineBasicBlock &PrologMBB) const {
445 const StringRef ChkStkStubSymbol = "__chkstk_stub";
446 MachineInstr *ChkStkStub = nullptr;
448 for (MachineInstr &MI : PrologMBB) {
449 if (MI.isCall() && MI.getOperand(0).isSymbol() &&
450 ChkStkStubSymbol == MI.getOperand(0).getSymbolName()) {
456 if (ChkStkStub != nullptr) {
457 MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
458 assert(std::prev(MBBI).operator==(ChkStkStub) &&
459 "MBBI expected after __chkstk_stub.");
460 DebugLoc DL = PrologMBB.findDebugLoc(MBBI);
461 emitStackProbeInline(MF, PrologMBB, MBBI, DL, true);
462 ChkStkStub->eraseFromParent();
466 MachineInstr *X86FrameLowering::emitStackProbeInline(
467 MachineFunction &MF, MachineBasicBlock &MBB,
468 MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
469 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
470 assert(STI.is64Bit() && "different expansion needed for 32 bit");
471 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
472 const TargetInstrInfo &TII = *STI.getInstrInfo();
473 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
475 // RAX contains the number of bytes of desired stack adjustment.
476 // The handling here assumes this value has already been updated so as to
477 // maintain stack alignment.
479 // We need to exit with RSP modified by this amount and execute suitable
480 // page touches to notify the OS that we're growing the stack responsibly.
481 // All stack probing must be done without modifying RSP.
487 // Flags, TestReg = CopyReg - SizeReg
488 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg
489 // LimitReg = gs magic thread env access
490 // if FinalReg >= LimitReg goto ContinueMBB
492 // RoundReg = page address of FinalReg
494 // LoopReg = PHI(LimitReg,ProbeReg)
495 // ProbeReg = LoopReg - PageSize
497 // if (ProbeReg > RoundReg) goto LoopMBB
500 // [rest of original MBB]
502 // Set up the new basic blocks
503 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
504 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
505 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
507 MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
508 MF.insert(MBBIter, RoundMBB);
509 MF.insert(MBBIter, LoopMBB);
510 MF.insert(MBBIter, ContinueMBB);
512 // Split MBB and move the tail portion down to ContinueMBB.
513 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
514 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
515 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
517 // Some useful constants
518 const int64_t ThreadEnvironmentStackLimit = 0x10;
519 const int64_t PageSize = 0x1000;
520 const int64_t PageMask = ~(PageSize - 1);
522 // Registers we need. For the normal case we use virtual
523 // registers. For the prolog expansion we use RAX, RCX and RDX.
524 MachineRegisterInfo &MRI = MF.getRegInfo();
525 const TargetRegisterClass *RegClass = &X86::GR64RegClass;
526 const unsigned SizeReg = InProlog ? (unsigned)X86::RAX
527 : MRI.createVirtualRegister(RegClass),
528 ZeroReg = InProlog ? (unsigned)X86::RCX
529 : MRI.createVirtualRegister(RegClass),
530 CopyReg = InProlog ? (unsigned)X86::RDX
531 : MRI.createVirtualRegister(RegClass),
532 TestReg = InProlog ? (unsigned)X86::RDX
533 : MRI.createVirtualRegister(RegClass),
534 FinalReg = InProlog ? (unsigned)X86::RDX
535 : MRI.createVirtualRegister(RegClass),
536 RoundedReg = InProlog ? (unsigned)X86::RDX
537 : MRI.createVirtualRegister(RegClass),
538 LimitReg = InProlog ? (unsigned)X86::RCX
539 : MRI.createVirtualRegister(RegClass),
540 JoinReg = InProlog ? (unsigned)X86::RCX
541 : MRI.createVirtualRegister(RegClass),
542 ProbeReg = InProlog ? (unsigned)X86::RCX
543 : MRI.createVirtualRegister(RegClass);
545 // SP-relative offsets where we can save RCX and RDX.
546 int64_t RCXShadowSlot = 0;
547 int64_t RDXShadowSlot = 0;
549 // If inlining in the prolog, save RCX and RDX.
550 // Future optimization: don't save or restore if not live in.
552 // Compute the offsets. We need to account for things already
553 // pushed onto the stack at this point: return address, frame
554 // pointer (if used), and callee saves.
555 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
556 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
557 const bool HasFP = hasFP(MF);
558 RCXShadowSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
559 RDXShadowSlot = RCXShadowSlot + 8;
561 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
564 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
568 // Not in the prolog. Copy RAX to a virtual reg.
569 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
572 // Add code to MBB to check for overflow and set the new target stack pointer
574 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
575 .addReg(ZeroReg, RegState::Undef)
576 .addReg(ZeroReg, RegState::Undef);
577 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
578 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
581 BuildMI(&MBB, DL, TII.get(X86::CMOVB64rr), FinalReg)
585 // FinalReg now holds final stack pointer value, or zero if
586 // allocation would overflow. Compare against the current stack
587 // limit from the thread environment block. Note this limit is the
588 // lowest touched page on the stack, not the point at which the OS
589 // will cause an overflow exception, so this is just an optimization
590 // to avoid unnecessarily touching pages that are below the current
591 // SP but already commited to the stack by the OS.
592 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
596 .addImm(ThreadEnvironmentStackLimit)
598 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
599 // Jump if the desired stack pointer is at or above the stack limit.
600 BuildMI(&MBB, DL, TII.get(X86::JAE_1)).addMBB(ContinueMBB);
602 // Add code to roundMBB to round the final stack pointer to a page boundary.
603 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
606 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
608 // LimitReg now holds the current stack limit, RoundedReg page-rounded
609 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
610 // and probe until we reach RoundedReg.
612 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
619 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
622 // Probe by storing a byte onto the stack.
623 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
630 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
633 BuildMI(LoopMBB, DL, TII.get(X86::JNE_1)).addMBB(LoopMBB);
635 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
637 // If in prolog, restore RDX and RCX.
639 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm),
641 X86::RSP, false, RCXShadowSlot);
642 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm),
644 X86::RSP, false, RDXShadowSlot);
647 // Now that the probing is done, add code to continueMBB to update
648 // the stack pointer for real.
649 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
653 // Add the control flow edges we need.
654 MBB.addSuccessor(ContinueMBB);
655 MBB.addSuccessor(RoundMBB);
656 RoundMBB->addSuccessor(LoopMBB);
657 LoopMBB->addSuccessor(ContinueMBB);
658 LoopMBB->addSuccessor(LoopMBB);
660 // Mark all the instructions added to the prolog as frame setup.
662 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
663 BeforeMBBI->setFlag(MachineInstr::FrameSetup);
665 for (MachineInstr &MI : *RoundMBB) {
666 MI.setFlag(MachineInstr::FrameSetup);
668 for (MachineInstr &MI : *LoopMBB) {
669 MI.setFlag(MachineInstr::FrameSetup);
671 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
672 CMBBI != ContinueMBBI; ++CMBBI) {
673 CMBBI->setFlag(MachineInstr::FrameSetup);
677 // Possible TODO: physreg liveness for InProlog case.
682 MachineInstr *X86FrameLowering::emitStackProbeCall(
683 MachineFunction &MF, MachineBasicBlock &MBB,
684 MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
685 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
689 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
691 CallOp = X86::CALLpcrel32;
695 if (STI.isTargetCygMing()) {
696 Symbol = "___chkstk_ms";
700 } else if (STI.isTargetCygMing())
705 MachineInstrBuilder CI;
706 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
708 // All current stack probes take AX and SP as input, clobber flags, and
709 // preserve all registers. x86_64 probes leave RSP unmodified.
710 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
711 // For the large code model, we have to call through a register. Use R11,
712 // as it is scratch in all supported calling conventions.
713 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
714 .addExternalSymbol(Symbol);
715 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
717 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
720 unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
721 unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
722 CI.addReg(AX, RegState::Implicit)
723 .addReg(SP, RegState::Implicit)
724 .addReg(AX, RegState::Define | RegState::Implicit)
725 .addReg(SP, RegState::Define | RegState::Implicit)
726 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
729 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
730 // themselves. It also does not clobber %rax so we can reuse it when
732 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
738 // Apply the frame setup flag to all inserted instrs.
739 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
740 ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
746 MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
747 MachineFunction &MF, MachineBasicBlock &MBB,
748 MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
750 assert(InProlog && "ChkStkStub called outside prolog!");
752 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
753 .addExternalSymbol("__chkstk_stub");
758 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
759 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
760 // and might require smaller successive adjustments.
761 const uint64_t Win64MaxSEHOffset = 128;
762 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
763 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
764 return SEHFrameOffset & -16;
767 // If we're forcing a stack realignment we can't rely on just the frame
768 // info, we need to know the ABI stack alignment as well in case we
769 // have a call out. Otherwise just make sure we have some alignment - we'll
770 // go with the minimum SlotSize.
771 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
772 const MachineFrameInfo *MFI = MF.getFrameInfo();
773 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
774 unsigned StackAlign = getStackAlignment();
775 if (MF.getFunction()->hasFnAttribute("stackrealign")) {
777 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
778 else if (MaxAlign < SlotSize)
784 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
785 MachineBasicBlock::iterator MBBI,
786 DebugLoc DL, unsigned Reg,
787 uint64_t MaxAlign) const {
788 uint64_t Val = -MaxAlign;
789 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
790 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
793 .setMIFlag(MachineInstr::FrameSetup);
795 // The EFLAGS implicit def is dead.
796 MI->getOperand(3).setIsDead();
799 /// emitPrologue - Push callee-saved registers onto the stack, which
800 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
801 /// space for local variables. Also emit labels used by the exception handler to
802 /// generate the exception handling frames.
805 Here's a gist of what gets emitted:
807 ; Establish frame pointer, if needed
810 .cfi_def_cfa_offset 16
811 .cfi_offset %rbp, -16
814 .cfi_def_cfa_register %rbp
816 ; Spill general-purpose registers
817 [for all callee-saved GPRs]
820 .cfi_def_cfa_offset (offset from RETADDR)
823 ; If the required stack alignment > default stack alignment
824 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
825 ; of unknown size in the stack frame.
826 [if stack needs re-alignment]
829 ; Allocate space for locals
830 [if target is Windows and allocated space > 4096 bytes]
831 ; Windows needs special care for allocations larger
834 call ___chkstk_ms/___chkstk
840 .seh_stackalloc (size of XMM spill slots)
841 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
846 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
847 ; they may get spilled on any platform, if the current function
848 ; calls @llvm.eh.unwind.init
850 [for all callee-saved XMM registers]
851 movaps %<xmm reg>, -MMM(%rbp)
852 [for all callee-saved XMM registers]
853 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
854 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
856 [for all callee-saved XMM registers]
857 movaps %<xmm reg>, KKK(%rsp)
858 [for all callee-saved XMM registers]
859 .seh_savexmm %<xmm reg>, KKK
863 [if needs base pointer]
865 [if needs to restore base pointer]
870 [for all callee-saved registers]
871 .cfi_offset %<reg>, (offset from %rbp)
873 .cfi_def_cfa_offset (offset from RETADDR)
874 [for all callee-saved registers]
875 .cfi_offset %<reg>, (offset from %rsp)
878 - .seh directives are emitted only for Windows 64 ABI
879 - .cfi directives are emitted for all other ABIs
880 - for 32-bit code, substitute %e?? registers for %r??
883 void X86FrameLowering::emitPrologue(MachineFunction &MF,
884 MachineBasicBlock &MBB) const {
885 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
886 "MF used frame lowering for wrong subtarget");
887 MachineBasicBlock::iterator MBBI = MBB.begin();
888 MachineFrameInfo *MFI = MF.getFrameInfo();
889 const Function *Fn = MF.getFunction();
890 MachineModuleInfo &MMI = MF.getMMI();
891 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
892 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
893 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
894 bool IsFunclet = MBB.isEHFuncletEntry();
895 bool FnHasClrFunclet =
896 MMI.hasEHFunclets() &&
897 classifyEHPersonality(Fn->getPersonalityFn()) == EHPersonality::CoreCLR;
898 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
899 bool HasFP = hasFP(MF);
900 bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
901 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
902 bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
904 !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
905 unsigned FramePtr = TRI->getFrameRegister(MF);
906 const unsigned MachineFramePtr =
907 STI.isTarget64BitILP32()
908 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
910 unsigned BasePtr = TRI->getBaseRegister();
912 // Debug location must be unknown since the first debug location is used
913 // to determine the end of the prologue.
916 // Add RETADDR move area to callee saved frame size.
917 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
918 if (TailCallReturnAddrDelta && IsWin64Prologue)
919 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
921 if (TailCallReturnAddrDelta < 0)
922 X86FI->setCalleeSavedFrameSize(
923 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
925 bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
927 // The default stack probe size is 4096 if the function has no stackprobesize
929 unsigned StackProbeSize = 4096;
930 if (Fn->hasFnAttribute("stack-probe-size"))
931 Fn->getFnAttribute("stack-probe-size")
933 .getAsInteger(0, StackProbeSize);
935 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
936 // function, and use up to 128 bytes of stack space, don't have a frame
937 // pointer, calls, or dynamic alloca then we do not need to adjust the
938 // stack pointer (we fit in the Red Zone). We also check that we don't
939 // push and pop from the stack.
940 if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
941 !TRI->needsStackRealignment(MF) &&
942 !MFI->hasVarSizedObjects() && // No dynamic alloca.
943 !MFI->adjustsStack() && // No calls.
944 !IsWin64CC && // Win64 has no Red Zone
945 !usesTheStack(MF) && // Don't push and pop.
946 !MF.shouldSplitStack()) { // Regular stack
947 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
948 if (HasFP) MinSize += SlotSize;
949 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
950 MFI->setStackSize(StackSize);
953 // Insert stack pointer adjustment for later moving of return addr. Only
954 // applies to tail call optimized functions where the callee argument stack
955 // size is bigger than the callers.
956 if (TailCallReturnAddrDelta < 0) {
957 BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
958 /*InEpilogue=*/false)
959 .setMIFlag(MachineInstr::FrameSetup);
962 // Mapping for machine moves:
964 // DST: VirtualFP AND
965 // SRC: VirtualFP => DW_CFA_def_cfa_offset
966 // ELSE => DW_CFA_def_cfa
968 // SRC: VirtualFP AND
969 // DST: Register => DW_CFA_def_cfa_register
972 // OFFSET < 0 => DW_CFA_offset_extended_sf
973 // REG < 64 => DW_CFA_offset + Reg
974 // ELSE => DW_CFA_offset_extended
976 uint64_t NumBytes = 0;
977 int stackGrowth = -SlotSize;
979 // Find the funclet establisher parameter
980 unsigned Establisher = X86::NoRegister;
982 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
984 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
986 if (IsWin64Prologue && IsFunclet & !IsClrFunclet) {
987 // Immediately spill establisher into the home slot.
988 // The runtime cares about this.
989 // MOV64mr %rdx, 16(%rsp)
990 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
991 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
993 .setMIFlag(MachineInstr::FrameSetup);
994 MBB.addLiveIn(Establisher);
998 // Calculate required stack adjustment.
999 uint64_t FrameSize = StackSize - SlotSize;
1000 // If required, include space for extra hidden slot for stashing base pointer.
1001 if (X86FI->getRestoreBasePointer())
1002 FrameSize += SlotSize;
1004 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
1006 // Callee-saved registers are pushed on stack before the stack is realigned.
1007 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1008 NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
1010 // Get the offset of the stack slot for the EBP register, which is
1011 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
1012 // Update the frame offset adjustment.
1014 MFI->setOffsetAdjustment(-NumBytes);
1016 assert(MFI->getOffsetAdjustment() == -(int)NumBytes &&
1017 "should calculate same local variable offset for funclets");
1019 // Save EBP/RBP into the appropriate stack slot.
1020 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
1021 .addReg(MachineFramePtr, RegState::Kill)
1022 .setMIFlag(MachineInstr::FrameSetup);
1024 if (NeedsDwarfCFI) {
1025 // Mark the place where EBP/RBP was saved.
1026 // Define the current CFA rule to use the provided offset.
1028 BuildCFI(MBB, MBBI, DL,
1029 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
1031 // Change the rule for the FramePtr to be an "offset" rule.
1032 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1033 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
1034 nullptr, DwarfFramePtr, 2 * stackGrowth));
1038 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1040 .setMIFlag(MachineInstr::FrameSetup);
1043 if (!IsWin64Prologue && !IsFunclet) {
1044 // Update EBP with the new base value.
1045 BuildMI(MBB, MBBI, DL,
1046 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
1049 .setMIFlag(MachineInstr::FrameSetup);
1051 if (NeedsDwarfCFI) {
1052 // Mark effective beginning of when frame pointer becomes valid.
1053 // Define the current CFA to use the EBP/RBP register.
1054 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1055 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
1056 nullptr, DwarfFramePtr));
1060 // Mark the FramePtr as live-in in every block. Don't do this again for
1061 // funclet prologues.
1063 for (MachineBasicBlock &EveryMBB : MF)
1064 EveryMBB.addLiveIn(MachineFramePtr);
1067 assert(!IsFunclet && "funclets without FPs not yet implemented");
1068 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1071 // For EH funclets, only allocate enough space for outgoing calls. Save the
1072 // NumBytes value that we would've used for the parent frame.
1073 unsigned ParentFrameNumBytes = NumBytes;
1075 NumBytes = getWinEHFuncletFrameSize(MF);
1077 // Skip the callee-saved push instructions.
1078 bool PushedRegs = false;
1079 int StackOffset = 2 * stackGrowth;
1081 while (MBBI != MBB.end() &&
1082 MBBI->getFlag(MachineInstr::FrameSetup) &&
1083 (MBBI->getOpcode() == X86::PUSH32r ||
1084 MBBI->getOpcode() == X86::PUSH64r)) {
1086 unsigned Reg = MBBI->getOperand(0).getReg();
1089 if (!HasFP && NeedsDwarfCFI) {
1090 // Mark callee-saved push instruction.
1091 // Define the current CFA rule to use the provided offset.
1093 BuildCFI(MBB, MBBI, DL,
1094 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
1095 StackOffset += stackGrowth;
1099 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
1100 MachineInstr::FrameSetup);
1104 // Realign stack after we pushed callee-saved registers (so that we'll be
1105 // able to calculate their offsets from the frame pointer).
1106 // Don't do this for Win64, it needs to realign the stack after the prologue.
1107 if (!IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) {
1108 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1109 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1112 // If there is an SUB32ri of ESP immediately before this instruction, merge
1113 // the two. This can be the case when tail call elimination is enabled and
1114 // the callee has more arguments then the caller.
1115 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
1117 // Adjust stack pointer: ESP -= numbytes.
1119 // Windows and cygwin/mingw require a prologue helper routine when allocating
1120 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
1121 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
1122 // stack and adjust the stack pointer in one go. The 64-bit version of
1123 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
1124 // responsible for adjusting the stack pointer. Touching the stack at 4K
1125 // increments is necessary to ensure that the guard pages used by the OS
1126 // virtual memory manager are allocated in correct sequence.
1127 uint64_t AlignedNumBytes = NumBytes;
1128 if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
1129 AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
1130 if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
1131 // Check whether EAX is livein for this function.
1132 bool isEAXAlive = isEAXLiveIn(MF);
1135 // Sanity check that EAX is not livein for this function.
1136 // It should not be, so throw an assert.
1137 assert(!Is64Bit && "EAX is livein in x64 case!");
1140 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1141 .addReg(X86::EAX, RegState::Kill)
1142 .setMIFlag(MachineInstr::FrameSetup);
1146 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1147 // Function prologue is responsible for adjusting the stack pointer.
1148 if (isUInt<32>(NumBytes)) {
1149 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1151 .setMIFlag(MachineInstr::FrameSetup);
1152 } else if (isInt<32>(NumBytes)) {
1153 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
1155 .setMIFlag(MachineInstr::FrameSetup);
1157 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
1159 .setMIFlag(MachineInstr::FrameSetup);
1162 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
1163 // We'll also use 4 already allocated bytes for EAX.
1164 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1165 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1166 .setMIFlag(MachineInstr::FrameSetup);
1169 // Call __chkstk, __chkstk_ms, or __alloca.
1170 emitStackProbe(MF, MBB, MBBI, DL, true);
1175 addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
1176 StackPtr, false, NumBytes - 4);
1177 MI->setFlag(MachineInstr::FrameSetup);
1178 MBB.insert(MBBI, MI);
1180 } else if (NumBytes) {
1181 emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
1184 if (NeedsWinCFI && NumBytes)
1185 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
1187 .setMIFlag(MachineInstr::FrameSetup);
1189 int SEHFrameOffset = 0;
1190 unsigned SPOrEstablisher;
1193 // The establisher parameter passed to a CLR funclet is actually a pointer
1194 // to the (mostly empty) frame of its nearest enclosing funclet; we have
1195 // to find the root function establisher frame by loading the PSPSym from
1196 // the intermediate frame.
1197 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1198 MachinePointerInfo NoInfo;
1199 MBB.addLiveIn(Establisher);
1200 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
1201 Establisher, false, PSPSlotOffset)
1202 .addMemOperand(MF.getMachineMemOperand(
1203 NoInfo, MachineMemOperand::MOLoad, SlotSize, SlotSize));
1205 // Save the root establisher back into the current funclet's (mostly
1206 // empty) frame, in case a sub-funclet or the GC needs it.
1207 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
1208 false, PSPSlotOffset)
1209 .addReg(Establisher)
1211 MF.getMachineMemOperand(NoInfo, MachineMemOperand::MOStore |
1212 MachineMemOperand::MOVolatile,
1213 SlotSize, SlotSize));
1215 SPOrEstablisher = Establisher;
1217 SPOrEstablisher = StackPtr;
1220 if (IsWin64Prologue && HasFP) {
1221 // Set RBP to a small fixed offset from RSP. In the funclet case, we base
1222 // this calculation on the incoming establisher, which holds the value of
1223 // RSP from the parent frame at the end of the prologue.
1224 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
1226 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
1227 SPOrEstablisher, false, SEHFrameOffset);
1229 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
1230 .addReg(SPOrEstablisher);
1232 // If this is not a funclet, emit the CFI describing our frame pointer.
1233 if (NeedsWinCFI && !IsFunclet)
1234 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1236 .addImm(SEHFrameOffset)
1237 .setMIFlag(MachineInstr::FrameSetup);
1238 } else if (IsFunclet && STI.is32Bit()) {
1239 // Reset EBP / ESI to something good for funclets.
1240 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
1241 // If we're a catch funclet, we can be returned to via catchret. Save ESP
1242 // into the registration node so that the runtime will restore it for us.
1243 if (!MBB.isCleanupFuncletEntry()) {
1244 assert(classifyEHPersonality(Fn->getPersonalityFn()) ==
1245 EHPersonality::MSVC_CXX);
1247 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
1248 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg);
1249 // ESP is the first field, so no extra displacement is needed.
1250 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
1256 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
1257 const MachineInstr *FrameInstr = &*MBBI;
1262 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
1263 if (X86::FR64RegClass.contains(Reg)) {
1264 unsigned IgnoredFrameReg;
1265 int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg);
1266 Offset += SEHFrameOffset;
1268 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
1271 .setMIFlag(MachineInstr::FrameSetup);
1278 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
1279 .setMIFlag(MachineInstr::FrameSetup);
1281 if (FnHasClrFunclet && !IsFunclet) {
1282 // Save the so-called Initial-SP (i.e. the value of the stack pointer
1283 // immediately after the prolog) into the PSPSlot so that funclets
1284 // and the GC can recover it.
1285 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1286 auto PSPInfo = MachinePointerInfo::getFixedStack(
1287 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
1288 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
1291 .addMemOperand(MF.getMachineMemOperand(
1292 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1293 SlotSize, SlotSize));
1296 // Realign stack after we spilled callee-saved registers (so that we'll be
1297 // able to calculate their offsets from the frame pointer).
1298 // Win64 requires aligning the stack after the prologue.
1299 if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
1300 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1301 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
1304 // We already dealt with stack realignment and funclets above.
1305 if (IsFunclet && STI.is32Bit())
1308 // If we need a base pointer, set it up here. It's whatever the value
1309 // of the stack pointer is at this point. Any variable size objects
1310 // will be allocated after this, so we can still use the base pointer
1311 // to reference locals.
1312 if (TRI->hasBasePointer(MF)) {
1313 // Update the base pointer with the current stack pointer.
1314 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
1315 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
1316 .addReg(SPOrEstablisher)
1317 .setMIFlag(MachineInstr::FrameSetup);
1318 if (X86FI->getRestoreBasePointer()) {
1319 // Stash value of base pointer. Saving RSP instead of EBP shortens
1320 // dependence chain. Used by SjLj EH.
1321 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1322 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
1323 FramePtr, true, X86FI->getRestoreBasePointerOffset())
1324 .addReg(SPOrEstablisher)
1325 .setMIFlag(MachineInstr::FrameSetup);
1328 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
1329 // Stash the value of the frame pointer relative to the base pointer for
1330 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1331 // it recovers the frame pointer from the base pointer rather than the
1332 // other way around.
1333 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1336 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
1337 assert(UsedReg == BasePtr);
1338 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1340 .setMIFlag(MachineInstr::FrameSetup);
1344 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1345 // Mark end of stack pointer adjustment.
1346 if (!HasFP && NumBytes) {
1347 // Define the current CFA rule to use the provided offset.
1349 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
1350 nullptr, -StackSize + stackGrowth));
1353 // Emit DWARF info specifying the offsets of the callee-saved registers.
1355 emitCalleeSavedFrameMoves(MBB, MBBI, DL);
1359 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1360 const MachineFunction &MF) const {
1361 // We can't use LEA instructions for adjusting the stack pointer if this is a
1362 // leaf function in the Win64 ABI. Only ADD instructions may be used to
1363 // deallocate the stack.
1364 // This means that we can use LEA for SP in two situations:
1365 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1366 // 2. We *have* a frame pointer which means we are permitted to use LEA.
1367 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1370 static bool isFuncletReturnInstr(MachineInstr *MI) {
1371 switch (MI->getOpcode()) {
1373 case X86::CLEANUPRET:
1378 llvm_unreachable("impossible");
1381 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
1382 // stack. It holds a pointer to the bottom of the root function frame. The
1383 // establisher frame pointer passed to a nested funclet may point to the
1384 // (mostly empty) frame of its parent funclet, but it will need to find
1385 // the frame of the root function to access locals. To facilitate this,
1386 // every funclet copies the pointer to the bottom of the root function
1387 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
1388 // same offset for the PSPSym in the root function frame that's used in the
1389 // funclets' frames allows each funclet to dynamically accept any ancestor
1390 // frame as its establisher argument (the runtime doesn't guarantee the
1391 // immediate parent for some reason lost to history), and also allows the GC,
1392 // which uses the PSPSym for some bookkeeping, to find it in any funclet's
1393 // frame with only a single offset reported for the entire method.
1395 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
1396 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
1397 // getFrameIndexReferenceFromSP has an out ref parameter for the stack
1398 // pointer register; pass a dummy that we ignore
1400 int Offset = getFrameIndexReferenceFromSP(MF, Info.PSPSymFrameIdx, SPReg);
1401 assert(Offset >= 0);
1402 return static_cast<unsigned>(Offset);
1406 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1407 // This is the size of the pushed CSRs.
1409 MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
1410 // This is the amount of stack a funclet needs to allocate.
1412 EHPersonality Personality =
1413 classifyEHPersonality(MF.getFunction()->getPersonalityFn());
1414 if (Personality == EHPersonality::CoreCLR) {
1415 // CLR funclets need to hold enough space to include the PSPSym, at the
1416 // same offset from the stack pointer (immediately after the prolog) as it
1417 // resides at in the main function.
1418 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
1420 // Other funclets just need enough stack for outgoing call arguments.
1421 UsedSize = MF.getFrameInfo()->getMaxCallFrameSize();
1423 // RBP is not included in the callee saved register block. After pushing RBP,
1424 // everything is 16 byte aligned. Everything we allocate before an outgoing
1425 // call must also be 16 byte aligned.
1426 unsigned FrameSizeMinusRBP =
1427 RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
1428 // Subtract out the size of the callee saved registers. This is how much stack
1429 // each funclet will allocate.
1430 return FrameSizeMinusRBP - CSSize;
1433 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1434 MachineBasicBlock &MBB) const {
1435 const MachineFrameInfo *MFI = MF.getFrameInfo();
1436 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1437 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1439 if (MBBI != MBB.end())
1440 DL = MBBI->getDebugLoc();
1441 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
1442 const bool Is64BitILP32 = STI.isTarget64BitILP32();
1443 unsigned FramePtr = TRI->getFrameRegister(MF);
1444 unsigned MachineFramePtr =
1445 Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
1448 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1450 IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
1451 bool IsFunclet = isFuncletReturnInstr(MBBI);
1452 MachineBasicBlock *TargetMBB = nullptr;
1454 // Get the number of bytes to allocate from the FrameInfo.
1455 uint64_t StackSize = MFI->getStackSize();
1456 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1457 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1458 uint64_t NumBytes = 0;
1460 if (MBBI->getOpcode() == X86::CATCHRET) {
1461 // SEH shouldn't use catchret.
1462 assert(!isAsynchronousEHPersonality(
1463 classifyEHPersonality(MF.getFunction()->getPersonalityFn())) &&
1464 "SEH should not use CATCHRET");
1466 NumBytes = getWinEHFuncletFrameSize(MF);
1467 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1468 TargetMBB = MBBI->getOperand(0).getMBB();
1471 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1473 .setMIFlag(MachineInstr::FrameDestroy);
1474 } else if (MBBI->getOpcode() == X86::CLEANUPRET) {
1475 NumBytes = getWinEHFuncletFrameSize(MF);
1476 assert(hasFP(MF) && "EH funclets without FP not yet implemented");
1477 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1479 .setMIFlag(MachineInstr::FrameDestroy);
1480 } else if (hasFP(MF)) {
1481 // Calculate required stack adjustment.
1482 uint64_t FrameSize = StackSize - SlotSize;
1483 NumBytes = FrameSize - CSSize;
1485 // Callee-saved registers were pushed on stack before the stack was
1487 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1488 NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
1491 BuildMI(MBB, MBBI, DL,
1492 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr)
1493 .setMIFlag(MachineInstr::FrameDestroy);
1495 NumBytes = StackSize - CSSize;
1497 uint64_t SEHStackAllocAmt = NumBytes;
1499 // Skip the callee-saved pop instructions.
1500 while (MBBI != MBB.begin()) {
1501 MachineBasicBlock::iterator PI = std::prev(MBBI);
1502 unsigned Opc = PI->getOpcode();
1504 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1505 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1506 Opc != X86::DBG_VALUE && !PI->isTerminator())
1511 MachineBasicBlock::iterator FirstCSPop = MBBI;
1514 // Fill EAX/RAX with the address of the target block.
1515 unsigned ReturnReg = STI.is64Bit() ? X86::RAX : X86::EAX;
1516 if (STI.is64Bit()) {
1517 // LEA64r TargetMBB(%rip), %rax
1518 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::LEA64r), ReturnReg)
1525 // MOV32ri $TargetMBB, %eax
1526 BuildMI(MBB, FirstCSPop, DL, TII.get(X86::MOV32ri), ReturnReg)
1529 // Record that we've taken the address of TargetMBB and no longer just
1530 // reference it in a terminator.
1531 TargetMBB->setHasAddressTaken();
1534 if (MBBI != MBB.end())
1535 DL = MBBI->getDebugLoc();
1537 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1538 // instruction, merge the two instructions.
1539 if (NumBytes || MFI->hasVarSizedObjects())
1540 NumBytes += mergeSPUpdates(MBB, MBBI, true);
1542 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1543 // slot before popping them off! Same applies for the case, when stack was
1544 // realigned. Don't do this if this was a funclet epilogue, since the funclets
1545 // will not do realignment or dynamic stack allocation.
1546 if ((TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) &&
1548 if (TRI->needsStackRealignment(MF))
1550 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
1551 uint64_t LEAAmount =
1552 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
1554 // There are only two legal forms of epilogue:
1555 // - add SEHAllocationSize, %rsp
1556 // - lea SEHAllocationSize(%FramePtr), %rsp
1558 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
1559 // However, we may use this sequence if we have a frame pointer because the
1560 // effects of the prologue can safely be undone.
1561 if (LEAAmount != 0) {
1562 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
1563 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
1564 FramePtr, false, LEAAmount);
1567 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
1568 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
1572 } else if (NumBytes) {
1573 // Adjust stack pointer back: ESP += numbytes.
1574 emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
1578 // Windows unwinder will not invoke function's exception handler if IP is
1579 // either in prologue or in epilogue. This behavior causes a problem when a
1580 // call immediately precedes an epilogue, because the return address points
1581 // into the epilogue. To cope with that, we insert an epilogue marker here,
1582 // then replace it with a 'nop' if it ends up immediately after a CALL in the
1583 // final emitted code.
1585 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
1587 // Add the return addr area delta back since we are not tail calling.
1588 int Offset = -1 * X86FI->getTCReturnAddrDelta();
1589 assert(Offset >= 0 && "TCDelta should never be positive");
1591 MBBI = MBB.getFirstTerminator();
1593 // Check for possible merge with preceding ADD instruction.
1594 Offset += mergeSPUpdates(MBB, MBBI, true);
1595 emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
1599 // NOTE: this only has a subset of the full frame index logic. In
1600 // particular, the FI < 0 and AfterFPPop logic is handled in
1601 // X86RegisterInfo::eliminateFrameIndex, but not here. Possibly
1602 // (probably?) it should be moved into here.
1603 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1604 unsigned &FrameReg) const {
1605 const MachineFrameInfo *MFI = MF.getFrameInfo();
1607 // We can't calculate offset from frame pointer if the stack is realigned,
1608 // so enforce usage of stack/base pointer. The base pointer is used when we
1609 // have dynamic allocas in addition to dynamic realignment.
1610 if (TRI->hasBasePointer(MF))
1611 FrameReg = TRI->getBaseRegister();
1612 else if (TRI->needsStackRealignment(MF))
1613 FrameReg = TRI->getStackRegister();
1615 FrameReg = TRI->getFrameRegister(MF);
1617 // Offset will hold the offset from the stack pointer at function entry to the
1619 // We need to factor in additional offsets applied during the prologue to the
1620 // frame, base, and stack pointer depending on which is used.
1621 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1622 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1623 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1624 uint64_t StackSize = MFI->getStackSize();
1625 bool HasFP = hasFP(MF);
1626 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1627 int64_t FPDelta = 0;
1629 if (IsWin64Prologue) {
1630 assert(!MFI->hasCalls() || (StackSize % 16) == 8);
1632 // Calculate required stack adjustment.
1633 uint64_t FrameSize = StackSize - SlotSize;
1634 // If required, include space for extra hidden slot for stashing base pointer.
1635 if (X86FI->getRestoreBasePointer())
1636 FrameSize += SlotSize;
1637 uint64_t NumBytes = FrameSize - CSSize;
1639 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
1640 if (FI && FI == X86FI->getFAIndex())
1641 return -SEHFrameOffset;
1643 // FPDelta is the offset from the "traditional" FP location of the old base
1644 // pointer followed by return address and the location required by the
1645 // restricted Win64 prologue.
1646 // Add FPDelta to all offsets below that go through the frame pointer.
1647 FPDelta = FrameSize - SEHFrameOffset;
1648 assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
1649 "FPDelta isn't aligned per the Win64 ABI!");
1653 if (TRI->hasBasePointer(MF)) {
1654 assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
1656 // Skip the saved EBP.
1657 return Offset + SlotSize + FPDelta;
1659 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1660 return Offset + StackSize;
1662 } else if (TRI->needsStackRealignment(MF)) {
1664 // Skip the saved EBP.
1665 return Offset + SlotSize + FPDelta;
1667 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1668 return Offset + StackSize;
1670 // FIXME: Support tail calls
1673 return Offset + StackSize;
1675 // Skip the saved EBP.
1678 // Skip the RETADDR move area
1679 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1680 if (TailCallReturnAddrDelta < 0)
1681 Offset -= TailCallReturnAddrDelta;
1684 return Offset + FPDelta;
1687 // Simplified from getFrameIndexReference keeping only StackPointer cases
1688 int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
1690 unsigned &FrameReg) const {
1691 const MachineFrameInfo *MFI = MF.getFrameInfo();
1692 // Does not include any dynamic realign.
1693 const uint64_t StackSize = MFI->getStackSize();
1696 // LLVM arranges the stack as follows:
1701 // PUSH RBP <-- RBP points here
1703 // ~~~~~~~ <-- possible stack realignment (non-win64)
1706 // ... <-- RSP after prologue points here
1707 // ~~~~~~~ <-- possible stack realignment (win64)
1709 // if (hasVarSizedObjects()):
1710 // ... <-- "base pointer" (ESI/RBX) points here
1712 // ... <-- RSP points here
1714 // Case 1: In the simple case of no stack realignment and no dynamic
1715 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
1716 // with fixed offsets from RSP.
1718 // Case 2: In the case of stack realignment with no dynamic allocas, fixed
1719 // stack objects are addressed with RBP and regular stack objects with RSP.
1721 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
1722 // to address stack arguments for outgoing calls and nothing else. The "base
1723 // pointer" points to local variables, and RBP points to fixed objects.
1725 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
1726 // answer we give is relative to the SP after the prologue, and not the
1727 // SP in the middle of the function.
1729 assert((!MFI->isFixedObjectIndex(FI) || !TRI->needsStackRealignment(MF) ||
1730 STI.isTargetWin64()) &&
1731 "offset from fixed object to SP is not static");
1733 // We don't handle tail calls, and shouldn't be seeing them either.
1734 int TailCallReturnAddrDelta =
1735 MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
1736 assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
1740 // Fill in FrameReg output argument.
1741 FrameReg = TRI->getStackRegister();
1743 // This is how the math works out:
1745 // %rsp grows (i.e. gets lower) left to right. Each box below is
1746 // one word (eight bytes). Obj0 is the stack slot we're trying to
1749 // ----------------------------------
1750 // | BP | Obj0 | Obj1 | ... | ObjN |
1751 // ----------------------------------
1755 // A is the incoming stack pointer.
1756 // (B - A) is the local area offset (-8 for x86-64) [1]
1757 // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
1759 // |(E - B)| is the StackSize (absolute value, positive). For a
1760 // stack that grown down, this works out to be (B - E). [3]
1762 // E is also the value of %rsp after stack has been set up, and we
1763 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
1764 // (C - E) == (C - A) - (B - A) + (B - E)
1765 // { Using [1], [2] and [3] above }
1766 // == getObjectOffset - LocalAreaOffset + StackSize
1769 // Get the Offset from the StackPointer
1770 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1772 return Offset + StackSize;
1775 bool X86FrameLowering::assignCalleeSavedSpillSlots(
1776 MachineFunction &MF, const TargetRegisterInfo *TRI,
1777 std::vector<CalleeSavedInfo> &CSI) const {
1778 MachineFrameInfo *MFI = MF.getFrameInfo();
1779 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1781 unsigned CalleeSavedFrameSize = 0;
1782 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
1785 // emitPrologue always spills frame register the first thing.
1786 SpillSlotOffset -= SlotSize;
1787 MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1789 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
1790 // the frame register, we can delete it from CSI list and not have to worry
1791 // about avoiding it later.
1792 unsigned FPReg = TRI->getFrameRegister(MF);
1793 for (unsigned i = 0; i < CSI.size(); ++i) {
1794 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
1795 CSI.erase(CSI.begin() + i);
1801 // Assign slots for GPRs. It increases frame size.
1802 for (unsigned i = CSI.size(); i != 0; --i) {
1803 unsigned Reg = CSI[i - 1].getReg();
1805 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1808 SpillSlotOffset -= SlotSize;
1809 CalleeSavedFrameSize += SlotSize;
1811 int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1812 CSI[i - 1].setFrameIdx(SlotIndex);
1815 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
1817 // Assign slots for XMMs.
1818 for (unsigned i = CSI.size(); i != 0; --i) {
1819 unsigned Reg = CSI[i - 1].getReg();
1820 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1823 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1825 SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
1827 SpillSlotOffset -= RC->getSize();
1829 MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
1830 CSI[i - 1].setFrameIdx(SlotIndex);
1831 MFI->ensureMaxAlignment(RC->getAlignment());
1837 bool X86FrameLowering::spillCalleeSavedRegisters(
1838 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1839 const std::vector<CalleeSavedInfo> &CSI,
1840 const TargetRegisterInfo *TRI) const {
1841 DebugLoc DL = MBB.findDebugLoc(MI);
1843 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
1844 // for us, and there are no XMM CSRs on Win32.
1845 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
1848 // Push GPRs. It increases frame size.
1849 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1850 for (unsigned i = CSI.size(); i != 0; --i) {
1851 unsigned Reg = CSI[i - 1].getReg();
1853 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1855 // Add the callee-saved register as live-in. It's killed at the spill.
1858 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1859 .setMIFlag(MachineInstr::FrameSetup);
1862 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1863 // It can be done by spilling XMMs to stack frame.
1864 for (unsigned i = CSI.size(); i != 0; --i) {
1865 unsigned Reg = CSI[i-1].getReg();
1866 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1868 // Add the callee-saved register as live-in. It's killed at the spill.
1870 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1872 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
1875 MI->setFlag(MachineInstr::FrameSetup);
1882 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1883 MachineBasicBlock::iterator MI,
1884 const std::vector<CalleeSavedInfo> &CSI,
1885 const TargetRegisterInfo *TRI) const {
1889 if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
1890 // Don't restore CSRs in 32-bit EH funclets. Matches
1891 // spillCalleeSavedRegisters.
1894 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
1895 // funclets. emitEpilogue transforms these to normal jumps.
1896 if (MI->getOpcode() == X86::CATCHRET) {
1897 const Function *Func = MBB.getParent()->getFunction();
1898 bool IsSEH = isAsynchronousEHPersonality(
1899 classifyEHPersonality(Func->getPersonalityFn()));
1905 DebugLoc DL = MBB.findDebugLoc(MI);
1907 // Reload XMMs from stack frame.
1908 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1909 unsigned Reg = CSI[i].getReg();
1910 if (X86::GR64RegClass.contains(Reg) ||
1911 X86::GR32RegClass.contains(Reg))
1914 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1915 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
1919 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1920 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1921 unsigned Reg = CSI[i].getReg();
1922 if (!X86::GR64RegClass.contains(Reg) &&
1923 !X86::GR32RegClass.contains(Reg))
1926 BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
1927 .setMIFlag(MachineInstr::FrameDestroy);
1932 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
1933 BitVector &SavedRegs,
1934 RegScavenger *RS) const {
1935 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1937 MachineFrameInfo *MFI = MF.getFrameInfo();
1939 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1940 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1942 if (TailCallReturnAddrDelta < 0) {
1943 // create RETURNADDR area
1952 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1953 TailCallReturnAddrDelta - SlotSize, true);
1956 // Spill the BasePtr if it's used.
1957 if (TRI->hasBasePointer(MF)) {
1958 SavedRegs.set(TRI->getBaseRegister());
1960 // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
1961 if (MF.getMMI().hasEHFunclets()) {
1962 int FI = MFI->CreateSpillStackObject(SlotSize, SlotSize);
1963 X86FI->setHasSEHFramePtrSave(true);
1964 X86FI->setSEHFramePtrSaveIndex(FI);
1970 HasNestArgument(const MachineFunction *MF) {
1971 const Function *F = MF->getFunction();
1972 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1974 if (I->hasNestAttr())
1980 /// GetScratchRegister - Get a temp register for performing work in the
1981 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1982 /// and the properties of the function either one or two registers will be
1983 /// needed. Set primary to true for the first register, false for the second.
1985 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
1986 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1989 if (CallingConvention == CallingConv::HiPE) {
1991 return Primary ? X86::R14 : X86::R13;
1993 return Primary ? X86::EBX : X86::EDI;
1998 return Primary ? X86::R11 : X86::R12;
2000 return Primary ? X86::R11D : X86::R12D;
2003 bool IsNested = HasNestArgument(&MF);
2005 if (CallingConvention == CallingConv::X86_FastCall ||
2006 CallingConvention == CallingConv::Fast) {
2008 report_fatal_error("Segmented stacks does not support fastcall with "
2009 "nested function.");
2010 return Primary ? X86::EAX : X86::ECX;
2013 return Primary ? X86::EDX : X86::EAX;
2014 return Primary ? X86::ECX : X86::EAX;
2017 // The stack limit in the TCB is set to this many bytes above the actual stack
2019 static const uint64_t kSplitStackAvailable = 256;
2021 void X86FrameLowering::adjustForSegmentedStacks(
2022 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2023 MachineFrameInfo *MFI = MF.getFrameInfo();
2025 unsigned TlsReg, TlsOffset;
2028 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2029 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2030 "Scratch register is live-in");
2032 if (MF.getFunction()->isVarArg())
2033 report_fatal_error("Segmented stacks do not support vararg functions.");
2034 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
2035 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
2036 !STI.isTargetDragonFly())
2037 report_fatal_error("Segmented stacks not supported on this platform.");
2039 // Eventually StackSize will be calculated by a link-time pass; which will
2040 // also decide whether checking code needs to be injected into this particular
2042 StackSize = MFI->getStackSize();
2044 // Do not generate a prologue for functions with a stack of size zero
2048 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
2049 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
2050 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2051 bool IsNested = false;
2053 // We need to know if the function has a nest argument only in 64 bit mode.
2055 IsNested = HasNestArgument(&MF);
2057 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
2058 // allocMBB needs to be last (terminating) instruction.
2060 for (const auto &LI : PrologueMBB.liveins()) {
2061 allocMBB->addLiveIn(LI);
2062 checkMBB->addLiveIn(LI);
2066 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
2068 MF.push_front(allocMBB);
2069 MF.push_front(checkMBB);
2071 // When the frame size is less than 256 we just compare the stack
2072 // boundary directly to the value of the stack pointer, per gcc.
2073 bool CompareStackPointer = StackSize < kSplitStackAvailable;
2075 // Read the limit off the current stacklet off the stack_guard location.
2077 if (STI.isTargetLinux()) {
2079 TlsOffset = IsLP64 ? 0x70 : 0x40;
2080 } else if (STI.isTargetDarwin()) {
2082 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
2083 } else if (STI.isTargetWin64()) {
2085 TlsOffset = 0x28; // pvArbitrary, reserved for application use
2086 } else if (STI.isTargetFreeBSD()) {
2089 } else if (STI.isTargetDragonFly()) {
2091 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
2093 report_fatal_error("Segmented stacks not supported on this platform.");
2096 if (CompareStackPointer)
2097 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
2099 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
2100 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2102 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
2103 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2105 if (STI.isTargetLinux()) {
2108 } else if (STI.isTargetDarwin()) {
2110 TlsOffset = 0x48 + 90*4;
2111 } else if (STI.isTargetWin32()) {
2113 TlsOffset = 0x14; // pvArbitrary, reserved for application use
2114 } else if (STI.isTargetDragonFly()) {
2116 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
2117 } else if (STI.isTargetFreeBSD()) {
2118 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
2120 report_fatal_error("Segmented stacks not supported on this platform.");
2123 if (CompareStackPointer)
2124 ScratchReg = X86::ESP;
2126 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
2127 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2129 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
2130 STI.isTargetDragonFly()) {
2131 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
2132 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2133 } else if (STI.isTargetDarwin()) {
2135 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
2136 unsigned ScratchReg2;
2138 if (CompareStackPointer) {
2139 // The primary scratch register is available for holding the TLS offset.
2140 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2141 SaveScratch2 = false;
2143 // Need to use a second register to hold the TLS offset
2144 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
2146 // Unfortunately, with fastcc the second scratch register may hold an
2148 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
2151 // If Scratch2 is live-in then it needs to be saved.
2152 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
2153 "Scratch register is live-in and not saved");
2156 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
2157 .addReg(ScratchReg2, RegState::Kill);
2159 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
2161 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
2163 .addReg(ScratchReg2).addImm(1).addReg(0)
2168 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
2172 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
2173 // It jumps to normal execution of the function body.
2174 BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&PrologueMBB);
2176 // On 32 bit we first push the arguments size and then the frame size. On 64
2177 // bit, we pass the stack frame size in r10 and the argument size in r11.
2179 // Functions with nested arguments use R10, so it needs to be saved across
2180 // the call to _morestack
2182 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
2183 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
2184 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
2185 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
2186 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
2189 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
2191 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
2193 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
2194 .addImm(X86FI->getArgumentStackSize());
2196 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2197 .addImm(X86FI->getArgumentStackSize());
2198 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2202 // __morestack is in libgcc
2203 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
2204 // Under the large code model, we cannot assume that __morestack lives
2205 // within 2^31 bytes of the call site, so we cannot use pc-relative
2206 // addressing. We cannot perform the call via a temporary register,
2207 // as the rax register may be used to store the static chain, and all
2208 // other suitable registers may be either callee-save or used for
2209 // parameter passing. We cannot use the stack at this point either
2210 // because __morestack manipulates the stack directly.
2212 // To avoid these issues, perform an indirect call via a read-only memory
2213 // location containing the address.
2215 // This solution is not perfect, as it assumes that the .rodata section
2216 // is laid out within 2^31 bytes of each function body, but this seems
2217 // to be sufficient for JIT.
2218 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
2222 .addExternalSymbol("__morestack_addr")
2224 MF.getMMI().setUsesMorestackAddr(true);
2227 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
2228 .addExternalSymbol("__morestack");
2230 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
2231 .addExternalSymbol("__morestack");
2235 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
2237 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
2239 allocMBB->addSuccessor(&PrologueMBB);
2241 checkMBB->addSuccessor(allocMBB);
2242 checkMBB->addSuccessor(&PrologueMBB);
2249 /// Erlang programs may need a special prologue to handle the stack size they
2250 /// might need at runtime. That is because Erlang/OTP does not implement a C
2251 /// stack but uses a custom implementation of hybrid stack/heap architecture.
2252 /// (for more information see Eric Stenman's Ph.D. thesis:
2253 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
2256 /// temp0 = sp - MaxStack
2257 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
2261 /// call inc_stack # doubles the stack space
2262 /// temp0 = sp - MaxStack
2263 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
2264 void X86FrameLowering::adjustForHiPEPrologue(
2265 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2266 MachineFrameInfo *MFI = MF.getFrameInfo();
2268 // HiPE-specific values
2269 const unsigned HipeLeafWords = 24;
2270 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
2271 const unsigned Guaranteed = HipeLeafWords * SlotSize;
2272 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
2273 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
2274 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
2276 assert(STI.isTargetLinux() &&
2277 "HiPE prologue is only supported on Linux operating systems.");
2279 // Compute the largest caller's frame that is needed to fit the callees'
2280 // frames. This 'MaxStack' is computed from:
2282 // a) the fixed frame size, which is the space needed for all spilled temps,
2283 // b) outgoing on-stack parameter areas, and
2284 // c) the minimum stack space this function needs to make available for the
2285 // functions it calls (a tunable ABI property).
2286 if (MFI->hasCalls()) {
2287 unsigned MoreStackForCalls = 0;
2289 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
2290 MBBI != MBBE; ++MBBI)
2291 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
2296 // Get callee operand.
2297 const MachineOperand &MO = MI->getOperand(0);
2299 // Only take account of global function calls (no closures etc.).
2303 const Function *F = dyn_cast<Function>(MO.getGlobal());
2307 // Do not update 'MaxStack' for primitive and built-in functions
2308 // (encoded with names either starting with "erlang."/"bif_" or not
2309 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
2310 // "_", such as the BIF "suspend_0") as they are executed on another
2312 if (F->getName().find("erlang.") != StringRef::npos ||
2313 F->getName().find("bif_") != StringRef::npos ||
2314 F->getName().find_first_of("._") == StringRef::npos)
2317 unsigned CalleeStkArity =
2318 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
2319 if (HipeLeafWords - 1 > CalleeStkArity)
2320 MoreStackForCalls = std::max(MoreStackForCalls,
2321 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
2323 MaxStack += MoreStackForCalls;
2326 // If the stack frame needed is larger than the guaranteed then runtime checks
2327 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
2328 if (MaxStack > Guaranteed) {
2329 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
2330 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
2332 for (const auto &LI : PrologueMBB.liveins()) {
2333 stackCheckMBB->addLiveIn(LI);
2334 incStackMBB->addLiveIn(LI);
2337 MF.push_front(incStackMBB);
2338 MF.push_front(stackCheckMBB);
2340 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
2341 unsigned LEAop, CMPop, CALLop;
2345 LEAop = X86::LEA64r;
2346 CMPop = X86::CMP64rm;
2347 CALLop = X86::CALL64pcrel32;
2348 SPLimitOffset = 0x90;
2352 LEAop = X86::LEA32r;
2353 CMPop = X86::CMP32rm;
2354 CALLop = X86::CALLpcrel32;
2355 SPLimitOffset = 0x4c;
2358 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2359 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2360 "HiPE prologue scratch register is live-in");
2362 // Create new MBB for StackCheck:
2363 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
2364 SPReg, false, -MaxStack);
2365 // SPLimitOffset is in a fixed heap location (pointed by BP).
2366 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
2367 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2368 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&PrologueMBB);
2370 // Create new MBB for IncStack:
2371 BuildMI(incStackMBB, DL, TII.get(CALLop)).
2372 addExternalSymbol("inc_stack_0");
2373 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
2374 SPReg, false, -MaxStack);
2375 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
2376 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2377 BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
2379 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
2380 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
2381 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
2382 incStackMBB->addSuccessor(incStackMBB, {1, 100});
2389 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
2390 MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
2395 if (Offset % SlotSize)
2398 int NumPops = Offset / SlotSize;
2399 // This is only worth it if we have at most 2 pops.
2400 if (NumPops != 1 && NumPops != 2)
2403 // Handle only the trivial case where the adjustment directly follows
2404 // a call. This is the most common one, anyway.
2405 if (MBBI == MBB.begin())
2407 MachineBasicBlock::iterator Prev = std::prev(MBBI);
2408 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
2412 unsigned FoundRegs = 0;
2414 auto RegMask = Prev->getOperand(1);
2417 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
2418 // Try to find up to NumPops free registers.
2419 for (auto Candidate : RegClass) {
2421 // Poor man's liveness:
2422 // Since we're immediately after a call, any register that is clobbered
2423 // by the call and not defined by it can be considered dead.
2424 if (!RegMask.clobbersPhysReg(Candidate))
2428 for (const MachineOperand &MO : Prev->implicit_operands()) {
2429 if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) {
2438 Regs[FoundRegs++] = Candidate;
2439 if (FoundRegs == (unsigned)NumPops)
2446 // If we found only one free register, but need two, reuse the same one twice.
2447 while (FoundRegs < (unsigned)NumPops)
2448 Regs[FoundRegs++] = Regs[0];
2450 for (int i = 0; i < NumPops; ++i)
2451 BuildMI(MBB, MBBI, DL,
2452 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
2457 void X86FrameLowering::
2458 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2459 MachineBasicBlock::iterator I) const {
2460 bool reserveCallFrame = hasReservedCallFrame(MF);
2461 unsigned Opcode = I->getOpcode();
2462 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
2463 DebugLoc DL = I->getDebugLoc();
2464 uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
2465 uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
2468 if (!reserveCallFrame) {
2469 // If the stack pointer can be changed after prologue, turn the
2470 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
2471 // adjcallstackdown instruction into 'add ESP, <amt>'
2473 // We need to keep the stack aligned properly. To do this, we round the
2474 // amount of space needed for the outgoing arguments up to the next
2475 // alignment boundary.
2476 unsigned StackAlign = getStackAlignment();
2477 Amount = RoundUpToAlignment(Amount, StackAlign);
2479 MachineModuleInfo &MMI = MF.getMMI();
2480 const Function *Fn = MF.getFunction();
2481 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2482 bool DwarfCFI = !WindowsCFI &&
2483 (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
2485 // If we have any exception handlers in this function, and we adjust
2486 // the SP before calls, we may need to indicate this to the unwinder
2487 // using GNU_ARGS_SIZE. Note that this may be necessary even when
2488 // Amount == 0, because the preceding function may have set a non-0
2490 // TODO: We don't need to reset this between subsequent functions,
2491 // if it didn't change.
2492 bool HasDwarfEHHandlers = !WindowsCFI &&
2493 !MF.getMMI().getLandingPads().empty();
2495 if (HasDwarfEHHandlers && !isDestroy &&
2496 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
2497 BuildCFI(MBB, I, DL,
2498 MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
2503 // Factor out the amount that gets handled inside the sequence
2504 // (Pushes of argument for frame setup, callee pops for frame destroy)
2505 Amount -= InternalAmt;
2507 // If this is a callee-pop calling convention, and we're emitting precise
2508 // SP-based CFI, emit a CFA adjust for the amount the callee popped.
2509 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF) &&
2510 MMI.usePreciseUnwindInfo())
2511 BuildCFI(MBB, I, DL,
2512 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
2515 // Add Amount to SP to destroy a frame, and subtract to setup.
2516 int Offset = isDestroy ? Amount : -Amount;
2518 if (!(Fn->optForMinSize() &&
2519 adjustStackWithPops(MBB, I, DL, Offset)))
2520 BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
2523 if (DwarfCFI && !hasFP(MF)) {
2524 // If we don't have FP, but need to generate unwind information,
2525 // we need to set the correct CFA offset after the stack adjustment.
2526 // How much we adjust the CFA offset depends on whether we're emitting
2527 // CFI only for EH purposes or for debugging. EH only requires the CFA
2528 // offset to be correct at each call site, while for debugging we want
2529 // it to be more precise.
2530 int CFAOffset = Amount;
2531 if (!MMI.usePreciseUnwindInfo())
2532 CFAOffset += InternalAmt;
2533 CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
2534 BuildCFI(MBB, I, DL,
2535 MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
2541 if (isDestroy && InternalAmt) {
2542 // If we are performing frame pointer elimination and if the callee pops
2543 // something off the stack pointer, add it back. We do this until we have
2544 // more advanced stack pointer tracking ability.
2545 // We are not tracking the stack pointer adjustment by the callee, so make
2546 // sure we restore the stack pointer immediately after the call, there may
2547 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
2548 MachineBasicBlock::iterator B = MBB.begin();
2549 while (I != B && !std::prev(I)->isCall())
2551 BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
2555 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2556 assert(MBB.getParent() && "Block is not attached to a function!");
2558 // Win64 has strict requirements in terms of epilogue and we are
2559 // not taking a chance at messing with them.
2560 // I.e., unless this block is already an exit block, we can't use
2561 // it as an epilogue.
2562 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
2565 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
2568 // If we cannot use LEA to adjust SP, we may need to use ADD, which
2569 // clobbers the EFLAGS. Check that none of the terminators reads the
2570 // EFLAGS, and if one uses it, conservatively assume this is not
2571 // safe to insert the epilogue here.
2572 return !terminatorsNeedFlagsAsInput(MBB);
2575 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
2576 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
2577 DebugLoc DL, bool RestoreSP) const {
2578 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
2579 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
2580 assert(STI.is32Bit() && !Uses64BitFramePtr &&
2581 "restoring EBP/ESI on non-32-bit target");
2583 MachineFunction &MF = *MBB.getParent();
2584 unsigned FramePtr = TRI->getFrameRegister(MF);
2585 unsigned BasePtr = TRI->getBaseRegister();
2586 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
2587 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2588 MachineFrameInfo *MFI = MF.getFrameInfo();
2590 // FIXME: Don't set FrameSetup flag in catchret case.
2592 int FI = FuncInfo.EHRegNodeFrameIndex;
2593 int EHRegSize = MFI->getObjectSize(FI);
2596 // MOV32rm -EHRegSize(%ebp), %esp
2597 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
2598 X86::EBP, true, -EHRegSize)
2599 .setMIFlag(MachineInstr::FrameSetup);
2603 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg);
2604 int EndOffset = -EHRegOffset - EHRegSize;
2605 FuncInfo.EHRegNodeEndOffset = EndOffset;
2607 if (UsedReg == FramePtr) {
2608 // ADD $offset, %ebp
2609 unsigned ADDri = getADDriOpcode(false, EndOffset);
2610 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
2613 .setMIFlag(MachineInstr::FrameSetup)
2616 assert(EndOffset >= 0 &&
2617 "end of registration object above normal EBP position!");
2618 } else if (UsedReg == BasePtr) {
2619 // LEA offset(%ebp), %esi
2620 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
2621 FramePtr, false, EndOffset)
2622 .setMIFlag(MachineInstr::FrameSetup);
2623 // MOV32rm SavedEBPOffset(%esi), %ebp
2624 assert(X86FI->getHasSEHFramePtrSave());
2626 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
2627 assert(UsedReg == BasePtr);
2628 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
2629 UsedReg, true, Offset)
2630 .setMIFlag(MachineInstr::FrameSetup);
2632 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
2637 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
2638 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
2639 unsigned Offset = 16;
2640 // RBP is immediately pushed.
2642 // All callee-saved registers are then pushed.
2643 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
2644 // Every funclet allocates enough stack space for the largest outgoing call.
2645 Offset += getWinEHFuncletFrameSize(MF);
2649 void X86FrameLowering::processFunctionBeforeFrameFinalized(
2650 MachineFunction &MF, RegScavenger *RS) const {
2651 // If this function isn't doing Win64-style C++ EH, we don't need to do
2653 const Function *Fn = MF.getFunction();
2654 if (!STI.is64Bit() || !MF.getMMI().hasEHFunclets() ||
2655 classifyEHPersonality(Fn->getPersonalityFn()) != EHPersonality::MSVC_CXX)
2658 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
2659 // relative to RSP after the prologue. Find the offset of the last fixed
2660 // object, so that we can allocate a slot immediately following it. If there
2661 // were no fixed objects, use offset -SlotSize, which is immediately after the
2662 // return address. Fixed objects have negative frame indices.
2663 MachineFrameInfo *MFI = MF.getFrameInfo();
2664 int64_t MinFixedObjOffset = -SlotSize;
2665 for (int I = MFI->getObjectIndexBegin(); I < 0; ++I)
2666 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI->getObjectOffset(I));
2668 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
2670 MFI->CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
2671 MF.getWinEHFuncInfo()->UnwindHelpFrameIdx = UnwindHelpFI;
2673 // Store -2 into UnwindHelp on function entry. We have to scan forwards past
2674 // other frame setup instructions.
2675 MachineBasicBlock &MBB = MF.front();
2676 auto MBBI = MBB.begin();
2677 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
2680 DebugLoc DL = MBB.findDebugLoc(MBBI);
2681 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),