1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Target/TargetOptions.h"
35 // FIXME: completely move here.
36 extern cl::opt<bool> ForceStackAlign;
38 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
39 return !MF.getFrameInfo()->hasVarSizedObjects();
42 /// hasFP - Return true if the specified function should have a dedicated frame
43 /// pointer register. This is true if the function has variable sized allocas
44 /// or if frame pointer elimination is disabled.
45 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
46 const MachineFrameInfo *MFI = MF.getFrameInfo();
47 const MachineModuleInfo &MMI = MF.getMMI();
48 const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
50 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
51 RegInfo->needsStackRealignment(MF) ||
52 MFI->hasVarSizedObjects() ||
53 MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
55 MMI.callsUnwindInit() || MMI.callsEHReturn());
58 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
62 return X86::SUB64ri32;
70 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
74 return X86::ADD64ri32;
82 static unsigned getLEArOpcode(unsigned IsLP64) {
83 return IsLP64 ? X86::LEA64r : X86::LEA32r;
86 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
87 /// when it reaches the "return" instruction. We can then pop a stack object
88 /// to this register without worry about clobbering it.
89 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
90 MachineBasicBlock::iterator &MBBI,
91 const TargetRegisterInfo &TRI,
93 const MachineFunction *MF = MBB.getParent();
94 const Function *F = MF->getFunction();
95 if (!F || MF->getMMI().callsEHReturn())
98 static const uint16_t CallerSavedRegs32Bit[] = {
99 X86::EAX, X86::EDX, X86::ECX, 0
102 static const uint16_t CallerSavedRegs64Bit[] = {
103 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
104 X86::R8, X86::R9, X86::R10, X86::R11, 0
107 unsigned Opc = MBBI->getOpcode();
114 case X86::TCRETURNdi:
115 case X86::TCRETURNri:
116 case X86::TCRETURNmi:
117 case X86::TCRETURNdi64:
118 case X86::TCRETURNri64:
119 case X86::TCRETURNmi64:
121 case X86::EH_RETURN64: {
122 SmallSet<uint16_t, 8> Uses;
123 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
124 MachineOperand &MO = MBBI->getOperand(i);
125 if (!MO.isReg() || MO.isDef())
127 unsigned Reg = MO.getReg();
130 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
134 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
136 if (!Uses.count(*CS))
145 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
146 /// stack pointer by a constant value.
148 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
149 unsigned StackPtr, int64_t NumBytes,
150 bool Is64Bit, bool IsLP64, bool UseLEA,
151 const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
152 bool isSub = NumBytes < 0;
153 uint64_t Offset = isSub ? -NumBytes : NumBytes;
156 Opc = getLEArOpcode(IsLP64);
159 ? getSUBriOpcode(IsLP64, Offset)
160 : getADDriOpcode(IsLP64, Offset);
162 uint64_t Chunk = (1LL << 31) - 1;
163 DebugLoc DL = MBB.findDebugLoc(MBBI);
166 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
167 if (ThisVal == (Is64Bit ? 8 : 4)) {
168 // Use push / pop instead.
170 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
171 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
174 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
175 : (Is64Bit ? X86::POP64r : X86::POP32r);
176 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
177 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
179 MI->setFlag(MachineInstr::FrameSetup);
185 MachineInstr *MI = nullptr;
188 MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
189 StackPtr, false, isSub ? -ThisVal : ThisVal);
191 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
194 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
198 MI->setFlag(MachineInstr::FrameSetup);
204 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
206 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
207 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
208 if (MBBI == MBB.begin()) return;
210 MachineBasicBlock::iterator PI = std::prev(MBBI);
211 unsigned Opc = PI->getOpcode();
212 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
213 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
214 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
215 PI->getOperand(0).getReg() == StackPtr) {
217 *NumBytes += PI->getOperand(2).getImm();
219 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
220 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
221 PI->getOperand(0).getReg() == StackPtr) {
223 *NumBytes -= PI->getOperand(2).getImm();
228 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower
231 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
232 MachineBasicBlock::iterator &MBBI,
233 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
234 // FIXME: THIS ISN'T RUN!!!
237 if (MBBI == MBB.end()) return;
239 MachineBasicBlock::iterator NI = std::next(MBBI);
240 if (NI == MBB.end()) return;
242 unsigned Opc = NI->getOpcode();
243 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
244 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
245 NI->getOperand(0).getReg() == StackPtr) {
247 *NumBytes -= NI->getOperand(2).getImm();
250 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
251 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
252 NI->getOperand(0).getReg() == StackPtr) {
254 *NumBytes += NI->getOperand(2).getImm();
260 /// mergeSPUpdates - Checks the instruction before/after the passed
261 /// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
262 /// the stack adjustment is returned as a positive value for ADD/LEA and a
263 /// negative for SUB.
264 static int mergeSPUpdates(MachineBasicBlock &MBB,
265 MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
266 bool doMergeWithPrevious) {
267 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
268 (!doMergeWithPrevious && MBBI == MBB.end()))
271 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
272 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
274 unsigned Opc = PI->getOpcode();
277 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
278 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
279 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
280 PI->getOperand(0).getReg() == StackPtr){
281 Offset += PI->getOperand(2).getImm();
283 if (!doMergeWithPrevious) MBBI = NI;
284 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
285 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
286 PI->getOperand(0).getReg() == StackPtr) {
287 Offset -= PI->getOperand(2).getImm();
289 if (!doMergeWithPrevious) MBBI = NI;
295 static bool isEAXLiveIn(MachineFunction &MF) {
296 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
297 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
298 unsigned Reg = II->first;
300 if (Reg == X86::EAX || Reg == X86::AX ||
301 Reg == X86::AH || Reg == X86::AL)
308 void X86FrameLowering::emitCalleeSavedFrameMoves(
309 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
310 unsigned FramePtr) const {
311 MachineFunction &MF = *MBB.getParent();
312 MachineFrameInfo *MFI = MF.getFrameInfo();
313 MachineModuleInfo &MMI = MF.getMMI();
314 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
315 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
317 // Add callee saved registers to move list.
318 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
319 if (CSI.empty()) return;
321 const X86RegisterInfo *RegInfo =
322 static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
323 bool HasFP = hasFP(MF);
325 // Calculate amount of bytes used for return address storing.
326 int stackGrowth = -RegInfo->getSlotSize();
328 // FIXME: This is dirty hack. The code itself is pretty mess right now.
329 // It should be rewritten from scratch and generalized sometimes.
331 // Determine maximum offset (minimum due to stack growth).
332 int64_t MaxOffset = 0;
333 for (std::vector<CalleeSavedInfo>::const_iterator
334 I = CSI.begin(), E = CSI.end(); I != E; ++I)
335 MaxOffset = std::min(MaxOffset,
336 MFI->getObjectOffset(I->getFrameIdx()));
338 // Calculate offsets.
339 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
340 for (std::vector<CalleeSavedInfo>::const_iterator
341 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
342 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
343 unsigned Reg = I->getReg();
344 Offset = MaxOffset - Offset + saveAreaOffset;
346 // Don't output a new machine move if we're re-saving the frame
347 // pointer. This happens when the PrologEpilogInserter has inserted an extra
348 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
349 // generates one when frame pointers are used. If we generate a "machine
350 // move" for this extra "PUSH", the linker will lose track of the fact that
351 // the frame pointer should have the value of the first "PUSH" when it's
354 // FIXME: This looks inelegant. It's possibly correct, but it's covering up
355 // another bug. I.e., one where we generate a prolog like this:
363 // The immediate re-push of EBP is unnecessary. At the least, it's an
364 // optimization bug. EBP can be used as a scratch register in certain
365 // cases, but probably not when we have a frame pointer.
366 if (HasFP && FramePtr == Reg)
369 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
371 MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
373 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
374 .addCFIIndex(CFIIndex);
378 /// usesTheStack - This function checks if any of the users of EFLAGS
379 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
380 /// to use the stack, and if we don't adjust the stack we clobber the first
382 /// See X86InstrInfo::copyPhysReg.
383 static bool usesTheStack(const MachineFunction &MF) {
384 const MachineRegisterInfo &MRI = MF.getRegInfo();
386 for (MachineRegisterInfo::reg_instr_iterator
387 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
395 /// emitPrologue - Push callee-saved registers onto the stack, which
396 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
397 /// space for local variables. Also emit labels used by the exception handler to
398 /// generate the exception handling frames.
399 void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
400 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
401 MachineBasicBlock::iterator MBBI = MBB.begin();
402 MachineFrameInfo *MFI = MF.getFrameInfo();
403 const Function *Fn = MF.getFunction();
404 const X86RegisterInfo *RegInfo =
405 static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
406 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
407 MachineModuleInfo &MMI = MF.getMMI();
408 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
409 bool needsFrameMoves = MMI.hasDebugInfo() ||
410 Fn->needsUnwindTableEntry();
411 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
412 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
413 bool HasFP = hasFP(MF);
414 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
415 bool Is64Bit = STI.is64Bit();
416 bool IsLP64 = STI.isTarget64BitLP64();
417 bool IsWin64 = STI.isTargetWin64();
418 bool UseLEA = STI.useLeaForSP();
419 unsigned StackAlign = getStackAlignment();
420 unsigned SlotSize = RegInfo->getSlotSize();
421 unsigned FramePtr = RegInfo->getFrameRegister(MF);
422 unsigned StackPtr = RegInfo->getStackRegister();
423 unsigned BasePtr = RegInfo->getBaseRegister();
426 // If we're forcing a stack realignment we can't rely on just the frame
427 // info, we need to know the ABI stack alignment as well in case we
428 // have a call out. Otherwise just make sure we have some alignment - we'll
429 // go with the minimum SlotSize.
430 if (ForceStackAlign) {
432 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
433 else if (MaxAlign < SlotSize)
437 // Add RETADDR move area to callee saved frame size.
438 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
439 if (TailCallReturnAddrDelta < 0)
440 X86FI->setCalleeSavedFrameSize(
441 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
443 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
444 // function, and use up to 128 bytes of stack space, don't have a frame
445 // pointer, calls, or dynamic alloca then we do not need to adjust the
446 // stack pointer (we fit in the Red Zone). We also check that we don't
447 // push and pop from the stack.
448 if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
449 Attribute::NoRedZone) &&
450 !RegInfo->needsStackRealignment(MF) &&
451 !MFI->hasVarSizedObjects() && // No dynamic alloca.
452 !MFI->adjustsStack() && // No calls.
453 !IsWin64 && // Win64 has no Red Zone
454 !usesTheStack(MF) && // Don't push and pop.
455 !MF.shouldSplitStack()) { // Regular stack
456 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
457 if (HasFP) MinSize += SlotSize;
458 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
459 MFI->setStackSize(StackSize);
462 // Insert stack pointer adjustment for later moving of return addr. Only
463 // applies to tail call optimized functions where the callee argument stack
464 // size is bigger than the callers.
465 if (TailCallReturnAddrDelta < 0) {
467 BuildMI(MBB, MBBI, DL,
468 TII.get(getSUBriOpcode(IsLP64, -TailCallReturnAddrDelta)),
471 .addImm(-TailCallReturnAddrDelta)
472 .setMIFlag(MachineInstr::FrameSetup);
473 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
476 // Mapping for machine moves:
478 // DST: VirtualFP AND
479 // SRC: VirtualFP => DW_CFA_def_cfa_offset
480 // ELSE => DW_CFA_def_cfa
482 // SRC: VirtualFP AND
483 // DST: Register => DW_CFA_def_cfa_register
486 // OFFSET < 0 => DW_CFA_offset_extended_sf
487 // REG < 64 => DW_CFA_offset + Reg
488 // ELSE => DW_CFA_offset_extended
490 uint64_t NumBytes = 0;
491 int stackGrowth = -SlotSize;
494 // Calculate required stack adjustment.
495 uint64_t FrameSize = StackSize - SlotSize;
496 if (RegInfo->needsStackRealignment(MF)) {
497 // Callee-saved registers are pushed on stack before the stack
499 FrameSize -= X86FI->getCalleeSavedFrameSize();
500 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
502 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
505 // Get the offset of the stack slot for the EBP register, which is
506 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
507 // Update the frame offset adjustment.
508 MFI->setOffsetAdjustment(-NumBytes);
510 // Save EBP/RBP into the appropriate stack slot.
511 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
512 .addReg(FramePtr, RegState::Kill)
513 .setMIFlag(MachineInstr::FrameSetup);
515 if (needsFrameMoves) {
516 // Mark the place where EBP/RBP was saved.
517 // Define the current CFA rule to use the provided offset.
519 unsigned CFIIndex = MMI.addFrameInst(
520 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
521 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
522 .addCFIIndex(CFIIndex);
524 // Change the rule for the FramePtr to be an "offset" rule.
525 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
526 CFIIndex = MMI.addFrameInst(
527 MCCFIInstruction::createOffset(nullptr,
528 DwarfFramePtr, 2 * stackGrowth));
529 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
530 .addCFIIndex(CFIIndex);
533 // Update EBP with the new base value.
534 BuildMI(MBB, MBBI, DL,
535 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
537 .setMIFlag(MachineInstr::FrameSetup);
539 if (needsFrameMoves) {
540 // Mark effective beginning of when frame pointer becomes valid.
541 // Define the current CFA to use the EBP/RBP register.
542 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
543 unsigned CFIIndex = MMI.addFrameInst(
544 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
545 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
546 .addCFIIndex(CFIIndex);
549 // Mark the FramePtr as live-in in every block except the entry.
550 for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
552 I->addLiveIn(FramePtr);
554 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
557 // Skip the callee-saved push instructions.
558 bool PushedRegs = false;
559 int StackOffset = 2 * stackGrowth;
561 while (MBBI != MBB.end() &&
562 (MBBI->getOpcode() == X86::PUSH32r ||
563 MBBI->getOpcode() == X86::PUSH64r)) {
565 MBBI->setFlag(MachineInstr::FrameSetup);
568 if (!HasFP && needsFrameMoves) {
569 // Mark callee-saved push instruction.
570 // Define the current CFA rule to use the provided offset.
572 unsigned CFIIndex = MMI.addFrameInst(
573 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
574 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
575 .addCFIIndex(CFIIndex);
576 StackOffset += stackGrowth;
580 // Realign stack after we pushed callee-saved registers (so that we'll be
581 // able to calculate their offsets from the frame pointer).
583 // NOTE: We push the registers before realigning the stack, so
584 // vector callee-saved (xmm) registers may be saved w/o proper
585 // alignment in this way. However, currently these regs are saved in
586 // stack slots (see X86FrameLowering::spillCalleeSavedRegisters()), so
587 // this shouldn't be a problem.
588 if (RegInfo->needsStackRealignment(MF)) {
589 assert(HasFP && "There should be a frame pointer if stack is realigned.");
591 BuildMI(MBB, MBBI, DL,
592 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr)
595 .setMIFlag(MachineInstr::FrameSetup);
597 // The EFLAGS implicit def is dead.
598 MI->getOperand(3).setIsDead();
601 // If there is an SUB32ri of ESP immediately before this instruction, merge
602 // the two. This can be the case when tail call elimination is enabled and
603 // the callee has more arguments then the caller.
604 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
606 // If there is an ADD32ri or SUB32ri of ESP immediately after this
607 // instruction, merge the two instructions.
608 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
610 // Adjust stack pointer: ESP -= numbytes.
612 // Windows and cygwin/mingw require a prologue helper routine when allocating
613 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
614 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
615 // stack and adjust the stack pointer in one go. The 64-bit version of
616 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
617 // responsible for adjusting the stack pointer. Touching the stack at 4K
618 // increments is necessary to ensure that the guard pages used by the OS
619 // virtual memory manager are allocated in correct sequence.
620 if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetMacho()) {
621 const char *StackProbeSymbol;
624 if (STI.isTargetCygMing()) {
625 StackProbeSymbol = "___chkstk_ms";
627 StackProbeSymbol = "__chkstk";
629 } else if (STI.isTargetCygMing())
630 StackProbeSymbol = "_alloca";
632 StackProbeSymbol = "_chkstk";
634 // Check whether EAX is livein for this function.
635 bool isEAXAlive = isEAXLiveIn(MF);
638 // Sanity check that EAX is not livein for this function.
639 // It should not be, so throw an assert.
640 assert(!Is64Bit && "EAX is livein in x64 case!");
643 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
644 .addReg(X86::EAX, RegState::Kill)
645 .setMIFlag(MachineInstr::FrameSetup);
649 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
650 // Function prologue is responsible for adjusting the stack pointer.
651 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
653 .setMIFlag(MachineInstr::FrameSetup);
655 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
656 // We'll also use 4 already allocated bytes for EAX.
657 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
658 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
659 .setMIFlag(MachineInstr::FrameSetup);
662 BuildMI(MBB, MBBI, DL,
663 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32))
664 .addExternalSymbol(StackProbeSymbol)
665 .addReg(StackPtr, RegState::Define | RegState::Implicit)
666 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
667 .setMIFlag(MachineInstr::FrameSetup);
670 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
671 // themself. It also does not clobber %rax so we can reuse it when
673 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
676 .setMIFlag(MachineInstr::FrameSetup);
680 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
682 StackPtr, false, NumBytes - 4);
683 MI->setFlag(MachineInstr::FrameSetup);
684 MBB.insert(MBBI, MI);
687 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64,
688 UseLEA, TII, *RegInfo);
690 // If we need a base pointer, set it up here. It's whatever the value
691 // of the stack pointer is at this point. Any variable size objects
692 // will be allocated after this, so we can still use the base pointer
693 // to reference locals.
694 if (RegInfo->hasBasePointer(MF)) {
695 // Update the frame pointer with the current stack pointer.
696 unsigned Opc = Is64Bit ? X86::MOV64rr : X86::MOV32rr;
697 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
699 .setMIFlag(MachineInstr::FrameSetup);
702 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
703 // Mark end of stack pointer adjustment.
704 if (!HasFP && NumBytes) {
705 // Define the current CFA rule to use the provided offset.
707 unsigned CFIIndex = MMI.addFrameInst(
708 MCCFIInstruction::createDefCfaOffset(nullptr,
709 -StackSize + stackGrowth));
711 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
712 .addCFIIndex(CFIIndex);
715 // Emit DWARF info specifying the offsets of the callee-saved registers.
717 emitCalleeSavedFrameMoves(MBB, MBBI, DL, HasFP ? FramePtr : StackPtr);
721 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
722 MachineBasicBlock &MBB) const {
723 const MachineFrameInfo *MFI = MF.getFrameInfo();
724 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
725 const X86RegisterInfo *RegInfo =
726 static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
727 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
728 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
729 assert(MBBI != MBB.end() && "Returning block has no instructions");
730 unsigned RetOpcode = MBBI->getOpcode();
731 DebugLoc DL = MBBI->getDebugLoc();
732 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
733 bool Is64Bit = STI.is64Bit();
734 bool IsLP64 = STI.isTarget64BitLP64();
735 bool UseLEA = STI.useLeaForSP();
736 unsigned StackAlign = getStackAlignment();
737 unsigned SlotSize = RegInfo->getSlotSize();
738 unsigned FramePtr = RegInfo->getFrameRegister(MF);
739 unsigned StackPtr = RegInfo->getStackRegister();
743 llvm_unreachable("Can only insert epilog into returning blocks");
748 case X86::TCRETURNdi:
749 case X86::TCRETURNri:
750 case X86::TCRETURNmi:
751 case X86::TCRETURNdi64:
752 case X86::TCRETURNri64:
753 case X86::TCRETURNmi64:
755 case X86::EH_RETURN64:
756 break; // These are ok
759 // Get the number of bytes to allocate from the FrameInfo.
760 uint64_t StackSize = MFI->getStackSize();
761 uint64_t MaxAlign = MFI->getMaxAlignment();
762 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
763 uint64_t NumBytes = 0;
765 // If we're forcing a stack realignment we can't rely on just the frame
766 // info, we need to know the ABI stack alignment as well in case we
767 // have a call out. Otherwise just make sure we have some alignment - we'll
768 // go with the minimum.
769 if (ForceStackAlign) {
771 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
773 MaxAlign = MaxAlign ? MaxAlign : 4;
777 // Calculate required stack adjustment.
778 uint64_t FrameSize = StackSize - SlotSize;
779 if (RegInfo->needsStackRealignment(MF)) {
780 // Callee-saved registers were pushed on stack before the stack
783 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
785 NumBytes = FrameSize - CSSize;
789 BuildMI(MBB, MBBI, DL,
790 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
792 NumBytes = StackSize - CSSize;
795 // Skip the callee-saved pop instructions.
796 while (MBBI != MBB.begin()) {
797 MachineBasicBlock::iterator PI = std::prev(MBBI);
798 unsigned Opc = PI->getOpcode();
800 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
806 MachineBasicBlock::iterator FirstCSPop = MBBI;
808 DL = MBBI->getDebugLoc();
810 // If there is an ADD32ri or SUB32ri of ESP immediately before this
811 // instruction, merge the two instructions.
812 if (NumBytes || MFI->hasVarSizedObjects())
813 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
815 // If dynamic alloca is used, then reset esp to point to the last callee-saved
816 // slot before popping them off! Same applies for the case, when stack was
818 if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
819 if (RegInfo->needsStackRealignment(MF))
822 unsigned Opc = getLEArOpcode(IsLP64);
823 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
824 FramePtr, false, -CSSize);
826 unsigned Opc = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
827 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
830 } else if (NumBytes) {
831 // Adjust stack pointer back: ESP += numbytes.
832 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, IsLP64, UseLEA,
836 // We're returning from function via eh_return.
837 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
838 MBBI = MBB.getLastNonDebugInstr();
839 MachineOperand &DestAddr = MBBI->getOperand(0);
840 assert(DestAddr.isReg() && "Offset should be in register!");
841 BuildMI(MBB, MBBI, DL,
842 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
843 StackPtr).addReg(DestAddr.getReg());
844 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
845 RetOpcode == X86::TCRETURNmi ||
846 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
847 RetOpcode == X86::TCRETURNmi64) {
848 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
849 // Tail call return: adjust the stack pointer and jump to callee.
850 MBBI = MBB.getLastNonDebugInstr();
851 MachineOperand &JumpTarget = MBBI->getOperand(0);
852 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
853 assert(StackAdjust.isImm() && "Expecting immediate value.");
855 // Adjust stack pointer.
856 int StackAdj = StackAdjust.getImm();
857 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
859 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
861 // Incoporate the retaddr area.
862 Offset = StackAdj-MaxTCDelta;
863 assert(Offset >= 0 && "Offset should never be negative");
866 // Check for possible merge with preceding ADD instruction.
867 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
868 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, IsLP64,
869 UseLEA, TII, *RegInfo);
872 // Jump to label or value in register.
873 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
874 MachineInstrBuilder MIB =
875 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
876 ? X86::TAILJMPd : X86::TAILJMPd64));
877 if (JumpTarget.isGlobal())
878 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
879 JumpTarget.getTargetFlags());
881 assert(JumpTarget.isSymbol());
882 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
883 JumpTarget.getTargetFlags());
885 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
886 MachineInstrBuilder MIB =
887 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
888 ? X86::TAILJMPm : X86::TAILJMPm64));
889 for (unsigned i = 0; i != 5; ++i)
890 MIB.addOperand(MBBI->getOperand(i));
891 } else if (RetOpcode == X86::TCRETURNri64) {
892 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
893 addReg(JumpTarget.getReg(), RegState::Kill);
895 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
896 addReg(JumpTarget.getReg(), RegState::Kill);
899 MachineInstr *NewMI = std::prev(MBBI);
900 NewMI->copyImplicitOps(MF, MBBI);
902 // Delete the pseudo instruction TCRETURN.
904 } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
905 RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
906 (X86FI->getTCReturnAddrDelta() < 0)) {
907 // Add the return addr area delta back since we are not tail calling.
908 int delta = -1*X86FI->getTCReturnAddrDelta();
909 MBBI = MBB.getLastNonDebugInstr();
911 // Check for possible merge with preceding ADD instruction.
912 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
913 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, IsLP64, UseLEA, TII,
918 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
920 const X86RegisterInfo *RegInfo =
921 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
922 const MachineFrameInfo *MFI = MF.getFrameInfo();
923 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
924 uint64_t StackSize = MFI->getStackSize();
926 if (RegInfo->hasBasePointer(MF)) {
927 assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
929 // Skip the saved EBP.
930 return Offset + RegInfo->getSlotSize();
932 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
933 return Offset + StackSize;
935 } else if (RegInfo->needsStackRealignment(MF)) {
937 // Skip the saved EBP.
938 return Offset + RegInfo->getSlotSize();
940 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
941 return Offset + StackSize;
943 // FIXME: Support tail calls
946 return Offset + StackSize;
948 // Skip the saved EBP.
949 Offset += RegInfo->getSlotSize();
951 // Skip the RETADDR move area
952 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
953 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
954 if (TailCallReturnAddrDelta < 0)
955 Offset -= TailCallReturnAddrDelta;
961 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
962 unsigned &FrameReg) const {
963 const X86RegisterInfo *RegInfo =
964 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
965 // We can't calculate offset from frame pointer if the stack is realigned,
966 // so enforce usage of stack/base pointer. The base pointer is used when we
967 // have dynamic allocas in addition to dynamic realignment.
968 if (RegInfo->hasBasePointer(MF))
969 FrameReg = RegInfo->getBaseRegister();
970 else if (RegInfo->needsStackRealignment(MF))
971 FrameReg = RegInfo->getStackRegister();
973 FrameReg = RegInfo->getFrameRegister(MF);
974 return getFrameIndexOffset(MF, FI);
977 bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
978 MachineBasicBlock::iterator MI,
979 const std::vector<CalleeSavedInfo> &CSI,
980 const TargetRegisterInfo *TRI) const {
984 DebugLoc DL = MBB.findDebugLoc(MI);
986 MachineFunction &MF = *MBB.getParent();
987 const X86RegisterInfo *RegInfo =
988 static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
989 unsigned SlotSize = RegInfo->getSlotSize();
990 unsigned FPReg = TRI->getFrameRegister(MF);
991 unsigned CalleeFrameSize = 0;
993 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
994 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
995 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
997 // Push GPRs. It increases frame size.
998 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
999 for (unsigned i = CSI.size(); i != 0; --i) {
1000 unsigned Reg = CSI[i-1].getReg();
1001 if (!X86::GR64RegClass.contains(Reg) &&
1002 !X86::GR32RegClass.contains(Reg))
1004 // Add the callee-saved register as live-in. It's killed at the spill.
1007 // X86RegisterInfo::emitPrologue will handle spilling of frame register.
1009 CalleeFrameSize += SlotSize;
1010 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1011 .setMIFlag(MachineInstr::FrameSetup);
1014 X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
1016 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1017 // It can be done by spilling XMMs to stack frame.
1018 // Note that only Win64 ABI might spill XMMs.
1019 for (unsigned i = CSI.size(); i != 0; --i) {
1020 unsigned Reg = CSI[i-1].getReg();
1021 if (X86::GR64RegClass.contains(Reg) ||
1022 X86::GR32RegClass.contains(Reg))
1024 // Add the callee-saved register as live-in. It's killed at the spill.
1026 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1027 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
1034 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1035 MachineBasicBlock::iterator MI,
1036 const std::vector<CalleeSavedInfo> &CSI,
1037 const TargetRegisterInfo *TRI) const {
1041 DebugLoc DL = MBB.findDebugLoc(MI);
1043 MachineFunction &MF = *MBB.getParent();
1044 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1045 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1047 // Reload XMMs from stack frame.
1048 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1049 unsigned Reg = CSI[i].getReg();
1050 if (X86::GR64RegClass.contains(Reg) ||
1051 X86::GR32RegClass.contains(Reg))
1053 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1054 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
1059 unsigned FPReg = TRI->getFrameRegister(MF);
1060 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1061 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1062 unsigned Reg = CSI[i].getReg();
1063 if (!X86::GR64RegClass.contains(Reg) &&
1064 !X86::GR32RegClass.contains(Reg))
1067 // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
1069 BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
1075 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
1076 RegScavenger *RS) const {
1077 MachineFrameInfo *MFI = MF.getFrameInfo();
1078 const X86RegisterInfo *RegInfo =
1079 static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
1080 unsigned SlotSize = RegInfo->getSlotSize();
1082 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1083 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1085 if (TailCallReturnAddrDelta < 0) {
1086 // create RETURNADDR area
1095 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1096 TailCallReturnAddrDelta - SlotSize, true);
1100 assert((TailCallReturnAddrDelta <= 0) &&
1101 "The Delta should always be zero or negative");
1102 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
1104 // Create a frame entry for the EBP register that must be saved.
1105 int FrameIdx = MFI->CreateFixedObject(SlotSize,
1107 TFI.getOffsetOfLocalArea() +
1108 TailCallReturnAddrDelta,
1110 assert(FrameIdx == MFI->getObjectIndexBegin() &&
1111 "Slot for EBP register must be last in order to be found!");
1115 // Spill the BasePtr if it's used.
1116 if (RegInfo->hasBasePointer(MF))
1117 MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
1121 HasNestArgument(const MachineFunction *MF) {
1122 const Function *F = MF->getFunction();
1123 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1125 if (I->hasNestAttr())
1131 /// GetScratchRegister - Get a temp register for performing work in the
1132 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1133 /// and the properties of the function either one or two registers will be
1134 /// needed. Set primary to true for the first register, false for the second.
1136 GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) {
1137 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1140 if (CallingConvention == CallingConv::HiPE) {
1142 return Primary ? X86::R14 : X86::R13;
1144 return Primary ? X86::EBX : X86::EDI;
1148 return Primary ? X86::R11 : X86::R12;
1150 bool IsNested = HasNestArgument(&MF);
1152 if (CallingConvention == CallingConv::X86_FastCall ||
1153 CallingConvention == CallingConv::Fast) {
1155 report_fatal_error("Segmented stacks does not support fastcall with "
1156 "nested function.");
1157 return Primary ? X86::EAX : X86::ECX;
1160 return Primary ? X86::EDX : X86::EAX;
1161 return Primary ? X86::ECX : X86::EAX;
1164 // The stack limit in the TCB is set to this many bytes above the actual stack
1166 static const uint64_t kSplitStackAvailable = 256;
1169 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
1170 MachineBasicBlock &prologueMBB = MF.front();
1171 MachineFrameInfo *MFI = MF.getFrameInfo();
1172 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1174 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1175 bool Is64Bit = STI.is64Bit();
1176 unsigned TlsReg, TlsOffset;
1179 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1180 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1181 "Scratch register is live-in");
1183 if (MF.getFunction()->isVarArg())
1184 report_fatal_error("Segmented stacks do not support vararg functions.");
1185 if (!STI.isTargetLinux() && !STI.isTargetDarwin() &&
1186 !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD())
1187 report_fatal_error("Segmented stacks not supported on this platform.");
1189 // Eventually StackSize will be calculated by a link-time pass; which will
1190 // also decide whether checking code needs to be injected into this particular
1192 StackSize = MFI->getStackSize();
1194 // Do not generate a prologue for functions with a stack of size zero
1198 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1199 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1200 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1201 bool IsNested = false;
1203 // We need to know if the function has a nest argument only in 64 bit mode.
1205 IsNested = HasNestArgument(&MF);
1207 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1208 // allocMBB needs to be last (terminating) instruction.
1210 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
1211 e = prologueMBB.livein_end(); i != e; i++) {
1212 allocMBB->addLiveIn(*i);
1213 checkMBB->addLiveIn(*i);
1217 allocMBB->addLiveIn(X86::R10);
1219 MF.push_front(allocMBB);
1220 MF.push_front(checkMBB);
1222 // When the frame size is less than 256 we just compare the stack
1223 // boundary directly to the value of the stack pointer, per gcc.
1224 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1226 // Read the limit off the current stacklet off the stack_guard location.
1228 if (STI.isTargetLinux()) {
1231 } else if (STI.isTargetDarwin()) {
1233 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1234 } else if (STI.isTargetWin64()) {
1236 TlsOffset = 0x28; // pvArbitrary, reserved for application use
1237 } else if (STI.isTargetFreeBSD()) {
1241 report_fatal_error("Segmented stacks not supported on this platform.");
1244 if (CompareStackPointer)
1245 ScratchReg = X86::RSP;
1247 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
1248 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1250 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
1251 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1253 if (STI.isTargetLinux()) {
1256 } else if (STI.isTargetDarwin()) {
1258 TlsOffset = 0x48 + 90*4;
1259 } else if (STI.isTargetWin32()) {
1261 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1262 } else if (STI.isTargetFreeBSD()) {
1263 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1265 report_fatal_error("Segmented stacks not supported on this platform.");
1268 if (CompareStackPointer)
1269 ScratchReg = X86::ESP;
1271 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1272 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1274 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64()) {
1275 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1276 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1277 } else if (STI.isTargetDarwin()) {
1279 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
1280 unsigned ScratchReg2;
1282 if (CompareStackPointer) {
1283 // The primary scratch register is available for holding the TLS offset.
1284 ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
1285 SaveScratch2 = false;
1287 // Need to use a second register to hold the TLS offset
1288 ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
1290 // Unfortunately, with fastcc the second scratch register may hold an
1292 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1295 // If Scratch2 is live-in then it needs to be saved.
1296 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1297 "Scratch register is live-in and not saved");
1300 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1301 .addReg(ScratchReg2, RegState::Kill);
1303 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1305 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1307 .addReg(ScratchReg2).addImm(1).addReg(0)
1312 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1316 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1317 // It jumps to normal execution of the function body.
1318 BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
1320 // On 32 bit we first push the arguments size and then the frame size. On 64
1321 // bit, we pass the stack frame size in r10 and the argument size in r11.
1323 // Functions with nested arguments use R10, so it needs to be saved across
1324 // the call to _morestack
1327 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10);
1329 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10)
1331 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11)
1332 .addImm(X86FI->getArgumentStackSize());
1333 MF.getRegInfo().setPhysRegUsed(X86::R10);
1334 MF.getRegInfo().setPhysRegUsed(X86::R11);
1336 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1337 .addImm(X86FI->getArgumentStackSize());
1338 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1342 // __morestack is in libgcc
1344 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1345 .addExternalSymbol("__morestack");
1347 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1348 .addExternalSymbol("__morestack");
1351 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1353 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1355 allocMBB->addSuccessor(&prologueMBB);
1357 checkMBB->addSuccessor(allocMBB);
1358 checkMBB->addSuccessor(&prologueMBB);
1365 /// Erlang programs may need a special prologue to handle the stack size they
1366 /// might need at runtime. That is because Erlang/OTP does not implement a C
1367 /// stack but uses a custom implementation of hybrid stack/heap architecture.
1368 /// (for more information see Eric Stenman's Ph.D. thesis:
1369 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1372 /// temp0 = sp - MaxStack
1373 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1377 /// call inc_stack # doubles the stack space
1378 /// temp0 = sp - MaxStack
1379 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1380 void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
1381 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1382 MachineFrameInfo *MFI = MF.getFrameInfo();
1383 const unsigned SlotSize =
1384 static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo())
1386 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1387 const bool Is64Bit = STI.is64Bit();
1389 // HiPE-specific values
1390 const unsigned HipeLeafWords = 24;
1391 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1392 const unsigned Guaranteed = HipeLeafWords * SlotSize;
1393 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1394 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1395 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1397 assert(STI.isTargetLinux() &&
1398 "HiPE prologue is only supported on Linux operating systems.");
1400 // Compute the largest caller's frame that is needed to fit the callees'
1401 // frames. This 'MaxStack' is computed from:
1403 // a) the fixed frame size, which is the space needed for all spilled temps,
1404 // b) outgoing on-stack parameter areas, and
1405 // c) the minimum stack space this function needs to make available for the
1406 // functions it calls (a tunable ABI property).
1407 if (MFI->hasCalls()) {
1408 unsigned MoreStackForCalls = 0;
1410 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1411 MBBI != MBBE; ++MBBI)
1412 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1417 // Get callee operand.
1418 const MachineOperand &MO = MI->getOperand(0);
1420 // Only take account of global function calls (no closures etc.).
1424 const Function *F = dyn_cast<Function>(MO.getGlobal());
1428 // Do not update 'MaxStack' for primitive and built-in functions
1429 // (encoded with names either starting with "erlang."/"bif_" or not
1430 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1431 // "_", such as the BIF "suspend_0") as they are executed on another
1433 if (F->getName().find("erlang.") != StringRef::npos ||
1434 F->getName().find("bif_") != StringRef::npos ||
1435 F->getName().find_first_of("._") == StringRef::npos)
1438 unsigned CalleeStkArity =
1439 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1440 if (HipeLeafWords - 1 > CalleeStkArity)
1441 MoreStackForCalls = std::max(MoreStackForCalls,
1442 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1444 MaxStack += MoreStackForCalls;
1447 // If the stack frame needed is larger than the guaranteed then runtime checks
1448 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1449 if (MaxStack > Guaranteed) {
1450 MachineBasicBlock &prologueMBB = MF.front();
1451 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1452 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1454 for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),
1455 E = prologueMBB.livein_end(); I != E; I++) {
1456 stackCheckMBB->addLiveIn(*I);
1457 incStackMBB->addLiveIn(*I);
1460 MF.push_front(incStackMBB);
1461 MF.push_front(stackCheckMBB);
1463 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1464 unsigned LEAop, CMPop, CALLop;
1468 LEAop = X86::LEA64r;
1469 CMPop = X86::CMP64rm;
1470 CALLop = X86::CALL64pcrel32;
1471 SPLimitOffset = 0x90;
1475 LEAop = X86::LEA32r;
1476 CMPop = X86::CMP32rm;
1477 CALLop = X86::CALLpcrel32;
1478 SPLimitOffset = 0x4c;
1481 ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1482 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1483 "HiPE prologue scratch register is live-in");
1485 // Create new MBB for StackCheck:
1486 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
1487 SPReg, false, -MaxStack);
1488 // SPLimitOffset is in a fixed heap location (pointed by BP).
1489 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
1490 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1491 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB);
1493 // Create new MBB for IncStack:
1494 BuildMI(incStackMBB, DL, TII.get(CALLop)).
1495 addExternalSymbol("inc_stack_0");
1496 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
1497 SPReg, false, -MaxStack);
1498 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
1499 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1500 BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB);
1502 stackCheckMBB->addSuccessor(&prologueMBB, 99);
1503 stackCheckMBB->addSuccessor(incStackMBB, 1);
1504 incStackMBB->addSuccessor(&prologueMBB, 99);
1505 incStackMBB->addSuccessor(incStackMBB, 1);
1512 void X86FrameLowering::
1513 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1514 MachineBasicBlock::iterator I) const {
1515 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1516 const X86RegisterInfo &RegInfo =
1517 *static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
1518 unsigned StackPtr = RegInfo.getStackRegister();
1519 bool reseveCallFrame = hasReservedCallFrame(MF);
1520 int Opcode = I->getOpcode();
1521 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
1522 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1523 bool IsLP64 = STI.isTarget64BitLP64();
1524 DebugLoc DL = I->getDebugLoc();
1525 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
1526 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
1529 if (!reseveCallFrame) {
1530 // If the stack pointer can be changed after prologue, turn the
1531 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
1532 // adjcallstackdown instruction into 'add ESP, <amt>'
1533 // TODO: consider using push / pop instead of sub + store / add
1537 // We need to keep the stack aligned properly. To do this, we round the
1538 // amount of space needed for the outgoing arguments up to the next
1539 // alignment boundary.
1540 unsigned StackAlign =
1541 MF.getTarget().getFrameLowering()->getStackAlignment();
1542 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
1544 MachineInstr *New = nullptr;
1545 if (Opcode == TII.getCallFrameSetupOpcode()) {
1546 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
1551 assert(Opcode == TII.getCallFrameDestroyOpcode());
1553 // Factor out the amount the callee already popped.
1554 Amount -= CalleeAmt;
1557 unsigned Opc = getADDriOpcode(IsLP64, Amount);
1558 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1559 .addReg(StackPtr).addImm(Amount);
1564 // The EFLAGS implicit def is dead.
1565 New->getOperand(3).setIsDead();
1567 // Replace the pseudo instruction with a new instruction.
1574 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
1575 // If we are performing frame pointer elimination and if the callee pops
1576 // something off the stack pointer, add it back. We do this until we have
1577 // more advanced stack pointer tracking ability.
1578 unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
1579 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1580 .addReg(StackPtr).addImm(CalleeAmt);
1582 // The EFLAGS implicit def is dead.
1583 New->getOperand(3).setIsDead();
1585 // We are not tracking the stack pointer adjustment by the callee, so make
1586 // sure we restore the stack pointer immediately after the call, there may
1587 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
1588 MachineBasicBlock::iterator B = MBB.begin();
1589 while (I != B && !std::prev(I)->isCall())