1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/Debug.h"
36 // FIXME: completely move here.
37 extern cl::opt<bool> ForceStackAlign;
39 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
40 return !MF.getFrameInfo()->hasVarSizedObjects();
43 /// hasFP - Return true if the specified function should have a dedicated frame
44 /// pointer register. This is true if the function has variable sized allocas
45 /// or if frame pointer elimination is disabled.
46 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
47 const MachineFrameInfo *MFI = MF.getFrameInfo();
48 const MachineModuleInfo &MMI = MF.getMMI();
49 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
51 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
52 RegInfo->needsStackRealignment(MF) ||
53 MFI->hasVarSizedObjects() ||
54 MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
55 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
56 MMI.callsUnwindInit() || MMI.callsEHReturn());
59 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
63 return X86::SUB64ri32;
71 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
75 return X86::ADD64ri32;
83 static unsigned getLEArOpcode(unsigned IsLP64) {
84 return IsLP64 ? X86::LEA64r : X86::LEA32r;
87 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
88 /// when it reaches the "return" instruction. We can then pop a stack object
89 /// to this register without worry about clobbering it.
90 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
91 MachineBasicBlock::iterator &MBBI,
92 const TargetRegisterInfo &TRI,
94 const MachineFunction *MF = MBB.getParent();
95 const Function *F = MF->getFunction();
96 if (!F || MF->getMMI().callsEHReturn())
99 static const uint16_t CallerSavedRegs32Bit[] = {
100 X86::EAX, X86::EDX, X86::ECX, 0
103 static const uint16_t CallerSavedRegs64Bit[] = {
104 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
105 X86::R8, X86::R9, X86::R10, X86::R11, 0
108 unsigned Opc = MBBI->getOpcode();
115 case X86::TCRETURNdi:
116 case X86::TCRETURNri:
117 case X86::TCRETURNmi:
118 case X86::TCRETURNdi64:
119 case X86::TCRETURNri64:
120 case X86::TCRETURNmi64:
122 case X86::EH_RETURN64: {
123 SmallSet<uint16_t, 8> Uses;
124 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
125 MachineOperand &MO = MBBI->getOperand(i);
126 if (!MO.isReg() || MO.isDef())
128 unsigned Reg = MO.getReg();
131 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
135 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
137 if (!Uses.count(*CS))
146 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
147 /// stack pointer by a constant value.
149 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
150 unsigned StackPtr, int64_t NumBytes,
151 bool Is64BitTarget, bool Is64BitStackPtr, bool UseLEA,
152 const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
153 bool isSub = NumBytes < 0;
154 uint64_t Offset = isSub ? -NumBytes : NumBytes;
157 Opc = getLEArOpcode(Is64BitStackPtr);
160 ? getSUBriOpcode(Is64BitStackPtr, Offset)
161 : getADDriOpcode(Is64BitStackPtr, Offset);
163 uint64_t Chunk = (1LL << 31) - 1;
164 DebugLoc DL = MBB.findDebugLoc(MBBI);
167 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
168 if (ThisVal == (Is64BitTarget ? 8 : 4)) {
169 // Use push / pop instead.
171 ? (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX)
172 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
175 ? (Is64BitTarget ? X86::PUSH64r : X86::PUSH32r)
176 : (Is64BitTarget ? X86::POP64r : X86::POP32r);
177 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
178 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
180 MI->setFlag(MachineInstr::FrameSetup);
186 MachineInstr *MI = nullptr;
189 MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
190 StackPtr, false, isSub ? -ThisVal : ThisVal);
192 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
195 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
199 MI->setFlag(MachineInstr::FrameSetup);
205 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
207 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
208 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
209 if (MBBI == MBB.begin()) return;
211 MachineBasicBlock::iterator PI = std::prev(MBBI);
212 unsigned Opc = PI->getOpcode();
213 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
214 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
215 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
216 PI->getOperand(0).getReg() == StackPtr) {
218 *NumBytes += PI->getOperand(2).getImm();
220 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
221 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
222 PI->getOperand(0).getReg() == StackPtr) {
224 *NumBytes -= PI->getOperand(2).getImm();
229 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower
232 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
233 MachineBasicBlock::iterator &MBBI,
234 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
235 // FIXME: THIS ISN'T RUN!!!
238 if (MBBI == MBB.end()) return;
240 MachineBasicBlock::iterator NI = std::next(MBBI);
241 if (NI == MBB.end()) return;
243 unsigned Opc = NI->getOpcode();
244 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
245 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
246 NI->getOperand(0).getReg() == StackPtr) {
248 *NumBytes -= NI->getOperand(2).getImm();
251 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
252 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
253 NI->getOperand(0).getReg() == StackPtr) {
255 *NumBytes += NI->getOperand(2).getImm();
261 /// mergeSPUpdates - Checks the instruction before/after the passed
262 /// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
263 /// the stack adjustment is returned as a positive value for ADD/LEA and a
264 /// negative for SUB.
265 static int mergeSPUpdates(MachineBasicBlock &MBB,
266 MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
267 bool doMergeWithPrevious) {
268 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
269 (!doMergeWithPrevious && MBBI == MBB.end()))
272 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
273 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
275 unsigned Opc = PI->getOpcode();
278 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
279 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
280 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
281 PI->getOperand(0).getReg() == StackPtr){
282 Offset += PI->getOperand(2).getImm();
284 if (!doMergeWithPrevious) MBBI = NI;
285 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
286 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
287 PI->getOperand(0).getReg() == StackPtr) {
288 Offset -= PI->getOperand(2).getImm();
290 if (!doMergeWithPrevious) MBBI = NI;
296 static bool isEAXLiveIn(MachineFunction &MF) {
297 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
298 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
299 unsigned Reg = II->first;
301 if (Reg == X86::EAX || Reg == X86::AX ||
302 Reg == X86::AH || Reg == X86::AL)
310 X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
311 MachineBasicBlock::iterator MBBI,
313 MachineFunction &MF = *MBB.getParent();
314 MachineFrameInfo *MFI = MF.getFrameInfo();
315 MachineModuleInfo &MMI = MF.getMMI();
316 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
317 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
319 // Add callee saved registers to move list.
320 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
321 if (CSI.empty()) return;
323 // Calculate offsets.
324 for (std::vector<CalleeSavedInfo>::const_iterator
325 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
326 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
327 unsigned Reg = I->getReg();
329 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
331 MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
333 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
334 .addCFIIndex(CFIIndex);
338 /// usesTheStack - This function checks if any of the users of EFLAGS
339 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
340 /// to use the stack, and if we don't adjust the stack we clobber the first
342 /// See X86InstrInfo::copyPhysReg.
343 static bool usesTheStack(const MachineFunction &MF) {
344 const MachineRegisterInfo &MRI = MF.getRegInfo();
346 for (MachineRegisterInfo::reg_instr_iterator
347 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
355 /// emitPrologue - Push callee-saved registers onto the stack, which
356 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
357 /// space for local variables. Also emit labels used by the exception handler to
358 /// generate the exception handling frames.
361 Here's a gist of what gets emitted:
363 ; Establish frame pointer, if needed
366 .cfi_def_cfa_offset 16
367 .cfi_offset %rbp, -16
370 .cfi_def_cfa_register %rbp
372 ; Spill general-purpose registers
373 [for all callee-saved GPRs]
376 .cfi_def_cfa_offset (offset from RETADDR)
379 ; If the required stack alignment > default stack alignment
380 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
381 ; of unknown size in the stack frame.
382 [if stack needs re-alignment]
385 ; Allocate space for locals
386 [if target is Windows and allocated space > 4096 bytes]
387 ; Windows needs special care for allocations larger
390 call ___chkstk_ms/___chkstk
396 .seh_stackalloc (size of XMM spill slots)
397 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
402 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
403 ; they may get spilled on any platform, if the current function
404 ; calls @llvm.eh.unwind.init
406 [for all callee-saved XMM registers]
407 movaps %<xmm reg>, -MMM(%rbp)
408 [for all callee-saved XMM registers]
409 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
410 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
412 [for all callee-saved XMM registers]
413 movaps %<xmm reg>, KKK(%rsp)
414 [for all callee-saved XMM registers]
415 .seh_savexmm %<xmm reg>, KKK
419 [if needs base pointer]
424 [for all callee-saved registers]
425 .cfi_offset %<reg>, (offset from %rbp)
427 .cfi_def_cfa_offset (offset from RETADDR)
428 [for all callee-saved registers]
429 .cfi_offset %<reg>, (offset from %rsp)
432 - .seh directives are emitted only for Windows 64 ABI
433 - .cfi directives are emitted for all other ABIs
434 - for 32-bit code, substitute %e?? registers for %r??
437 void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
438 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
439 MachineBasicBlock::iterator MBBI = MBB.begin();
440 MachineFrameInfo *MFI = MF.getFrameInfo();
441 const Function *Fn = MF.getFunction();
442 const X86RegisterInfo *RegInfo =
443 static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
444 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
445 MachineModuleInfo &MMI = MF.getMMI();
446 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
447 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
448 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
449 bool HasFP = hasFP(MF);
450 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
451 bool Is64Bit = STI.is64Bit();
452 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
453 const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
454 bool IsWin64 = STI.isTargetWin64();
456 MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() ==
457 ExceptionHandling::WinEH; // Not necessarily synonymous with IsWin64.
458 bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();
460 !IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
461 bool UseLEA = STI.useLeaForSP();
462 unsigned StackAlign = getStackAlignment();
463 unsigned SlotSize = RegInfo->getSlotSize();
464 unsigned FramePtr = RegInfo->getFrameRegister(MF);
465 const unsigned MachineFramePtr = STI.isTarget64BitILP32() ?
466 getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
467 unsigned StackPtr = RegInfo->getStackRegister();
468 unsigned BasePtr = RegInfo->getBaseRegister();
471 // If we're forcing a stack realignment we can't rely on just the frame
472 // info, we need to know the ABI stack alignment as well in case we
473 // have a call out. Otherwise just make sure we have some alignment - we'll
474 // go with the minimum SlotSize.
475 if (ForceStackAlign) {
477 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
478 else if (MaxAlign < SlotSize)
482 // Add RETADDR move area to callee saved frame size.
483 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
484 if (TailCallReturnAddrDelta < 0)
485 X86FI->setCalleeSavedFrameSize(
486 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
488 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
489 // function, and use up to 128 bytes of stack space, don't have a frame
490 // pointer, calls, or dynamic alloca then we do not need to adjust the
491 // stack pointer (we fit in the Red Zone). We also check that we don't
492 // push and pop from the stack.
493 if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
494 Attribute::NoRedZone) &&
495 !RegInfo->needsStackRealignment(MF) &&
496 !MFI->hasVarSizedObjects() && // No dynamic alloca.
497 !MFI->adjustsStack() && // No calls.
498 !IsWin64 && // Win64 has no Red Zone
499 !usesTheStack(MF) && // Don't push and pop.
500 !MF.shouldSplitStack()) { // Regular stack
501 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
502 if (HasFP) MinSize += SlotSize;
503 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
504 MFI->setStackSize(StackSize);
507 // Insert stack pointer adjustment for later moving of return addr. Only
508 // applies to tail call optimized functions where the callee argument stack
509 // size is bigger than the callers.
510 if (TailCallReturnAddrDelta < 0) {
512 BuildMI(MBB, MBBI, DL,
513 TII.get(getSUBriOpcode(Uses64BitFramePtr, -TailCallReturnAddrDelta)),
516 .addImm(-TailCallReturnAddrDelta)
517 .setMIFlag(MachineInstr::FrameSetup);
518 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
521 // Mapping for machine moves:
523 // DST: VirtualFP AND
524 // SRC: VirtualFP => DW_CFA_def_cfa_offset
525 // ELSE => DW_CFA_def_cfa
527 // SRC: VirtualFP AND
528 // DST: Register => DW_CFA_def_cfa_register
531 // OFFSET < 0 => DW_CFA_offset_extended_sf
532 // REG < 64 => DW_CFA_offset + Reg
533 // ELSE => DW_CFA_offset_extended
535 uint64_t NumBytes = 0;
536 int stackGrowth = -SlotSize;
539 // Calculate required stack adjustment.
540 uint64_t FrameSize = StackSize - SlotSize;
541 if (RegInfo->needsStackRealignment(MF)) {
542 // Callee-saved registers are pushed on stack before the stack
544 FrameSize -= X86FI->getCalleeSavedFrameSize();
545 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
547 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
550 // Get the offset of the stack slot for the EBP register, which is
551 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
552 // Update the frame offset adjustment.
553 MFI->setOffsetAdjustment(-NumBytes);
555 // Save EBP/RBP into the appropriate stack slot.
556 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
557 .addReg(MachineFramePtr, RegState::Kill)
558 .setMIFlag(MachineInstr::FrameSetup);
561 // Mark the place where EBP/RBP was saved.
562 // Define the current CFA rule to use the provided offset.
564 unsigned CFIIndex = MMI.addFrameInst(
565 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
566 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
567 .addCFIIndex(CFIIndex);
569 // Change the rule for the FramePtr to be an "offset" rule.
570 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
571 CFIIndex = MMI.addFrameInst(
572 MCCFIInstruction::createOffset(nullptr,
573 DwarfFramePtr, 2 * stackGrowth));
574 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
575 .addCFIIndex(CFIIndex);
579 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
581 .setMIFlag(MachineInstr::FrameSetup);
584 // Update EBP with the new base value.
585 BuildMI(MBB, MBBI, DL,
586 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), FramePtr)
588 .setMIFlag(MachineInstr::FrameSetup);
591 // Mark effective beginning of when frame pointer becomes valid.
592 // Define the current CFA to use the EBP/RBP register.
593 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
594 unsigned CFIIndex = MMI.addFrameInst(
595 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
596 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
597 .addCFIIndex(CFIIndex);
600 // Mark the FramePtr as live-in in every block.
601 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
602 I->addLiveIn(MachineFramePtr);
604 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
607 // Skip the callee-saved push instructions.
608 bool PushedRegs = false;
609 int StackOffset = 2 * stackGrowth;
611 while (MBBI != MBB.end() &&
612 (MBBI->getOpcode() == X86::PUSH32r ||
613 MBBI->getOpcode() == X86::PUSH64r)) {
615 unsigned Reg = MBBI->getOperand(0).getReg();
618 if (!HasFP && NeedsDwarfCFI) {
619 // Mark callee-saved push instruction.
620 // Define the current CFA rule to use the provided offset.
622 unsigned CFIIndex = MMI.addFrameInst(
623 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
624 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
625 .addCFIIndex(CFIIndex);
626 StackOffset += stackGrowth;
630 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
631 MachineInstr::FrameSetup);
635 // Realign stack after we pushed callee-saved registers (so that we'll be
636 // able to calculate their offsets from the frame pointer).
637 if (RegInfo->needsStackRealignment(MF)) {
638 assert(HasFP && "There should be a frame pointer if stack is realigned.");
640 BuildMI(MBB, MBBI, DL,
641 TII.get(Uses64BitFramePtr ? X86::AND64ri32 : X86::AND32ri), StackPtr)
644 .setMIFlag(MachineInstr::FrameSetup);
646 // The EFLAGS implicit def is dead.
647 MI->getOperand(3).setIsDead();
650 // If there is an SUB32ri of ESP immediately before this instruction, merge
651 // the two. This can be the case when tail call elimination is enabled and
652 // the callee has more arguments then the caller.
653 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
655 // If there is an ADD32ri or SUB32ri of ESP immediately after this
656 // instruction, merge the two instructions.
657 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
659 // Adjust stack pointer: ESP -= numbytes.
661 // Windows and cygwin/mingw require a prologue helper routine when allocating
662 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
663 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
664 // stack and adjust the stack pointer in one go. The 64-bit version of
665 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
666 // responsible for adjusting the stack pointer. Touching the stack at 4K
667 // increments is necessary to ensure that the guard pages used by the OS
668 // virtual memory manager are allocated in correct sequence.
669 if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetMacho()) {
670 const char *StackProbeSymbol;
673 if (STI.isTargetCygMing()) {
674 StackProbeSymbol = "___chkstk_ms";
676 StackProbeSymbol = "__chkstk";
678 } else if (STI.isTargetCygMing())
679 StackProbeSymbol = "_alloca";
681 StackProbeSymbol = "_chkstk";
683 // Check whether EAX is livein for this function.
684 bool isEAXAlive = isEAXLiveIn(MF);
687 // Sanity check that EAX is not livein for this function.
688 // It should not be, so throw an assert.
689 assert(!Is64Bit && "EAX is livein in x64 case!");
692 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
693 .addReg(X86::EAX, RegState::Kill)
694 .setMIFlag(MachineInstr::FrameSetup);
698 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
699 // Function prologue is responsible for adjusting the stack pointer.
700 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
702 .setMIFlag(MachineInstr::FrameSetup);
704 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
705 // We'll also use 4 already allocated bytes for EAX.
706 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
707 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
708 .setMIFlag(MachineInstr::FrameSetup);
711 BuildMI(MBB, MBBI, DL,
712 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32))
713 .addExternalSymbol(StackProbeSymbol)
714 .addReg(StackPtr, RegState::Define | RegState::Implicit)
715 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
716 .setMIFlag(MachineInstr::FrameSetup);
719 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
720 // themself. It also does not clobber %rax so we can reuse it when
722 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
725 .setMIFlag(MachineInstr::FrameSetup);
729 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
731 StackPtr, false, NumBytes - 4);
732 MI->setFlag(MachineInstr::FrameSetup);
733 MBB.insert(MBBI, MI);
735 } else if (NumBytes) {
736 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, Uses64BitFramePtr,
737 UseLEA, TII, *RegInfo);
740 int SEHFrameOffset = 0;
743 // We need to set frame base offset low enough such that all saved
744 // register offsets would be positive relative to it, but we can't
745 // just use NumBytes, because .seh_setframe offset must be <=240.
746 // So we pretend to have only allocated enough space to spill the
747 // non-volatile registers.
748 // We don't care about the rest of stack allocation, because unwinder
749 // will restore SP to (BP - SEHFrameOffset)
750 for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
751 int offset = MFI->getObjectOffset(Info.getFrameIdx());
752 SEHFrameOffset = std::max(SEHFrameOffset, abs(offset));
754 SEHFrameOffset += SEHFrameOffset % 16; // ensure alignmant
756 // This only needs to account for XMM spill slots, GPR slots
757 // are covered by the .seh_pushreg's emitted above.
758 unsigned Size = SEHFrameOffset - X86FI->getCalleeSavedFrameSize();
760 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
762 .setMIFlag(MachineInstr::FrameSetup);
765 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
767 .addImm(SEHFrameOffset)
768 .setMIFlag(MachineInstr::FrameSetup);
770 // SP will be the base register for restoring XMMs
772 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
774 .setMIFlag(MachineInstr::FrameSetup);
779 // Skip the rest of register spilling code
780 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
783 // Emit SEH info for non-GPRs
785 for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
786 unsigned Reg = Info.getReg();
787 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
789 assert(X86::FR64RegClass.contains(Reg) && "Unexpected register class");
791 int Offset = getFrameIndexOffset(MF, Info.getFrameIdx());
792 Offset += SEHFrameOffset;
794 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
797 .setMIFlag(MachineInstr::FrameSetup);
800 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
801 .setMIFlag(MachineInstr::FrameSetup);
804 // If we need a base pointer, set it up here. It's whatever the value
805 // of the stack pointer is at this point. Any variable size objects
806 // will be allocated after this, so we can still use the base pointer
807 // to reference locals.
808 if (RegInfo->hasBasePointer(MF)) {
809 // Update the base pointer with the current stack pointer.
810 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
811 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
813 .setMIFlag(MachineInstr::FrameSetup);
816 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
817 // Mark end of stack pointer adjustment.
818 if (!HasFP && NumBytes) {
819 // Define the current CFA rule to use the provided offset.
821 unsigned CFIIndex = MMI.addFrameInst(
822 MCCFIInstruction::createDefCfaOffset(nullptr,
823 -StackSize + stackGrowth));
825 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
826 .addCFIIndex(CFIIndex);
829 // Emit DWARF info specifying the offsets of the callee-saved registers.
831 emitCalleeSavedFrameMoves(MBB, MBBI, DL);
835 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
836 MachineBasicBlock &MBB) const {
837 const MachineFrameInfo *MFI = MF.getFrameInfo();
838 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
839 const X86RegisterInfo *RegInfo =
840 static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
841 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
842 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
843 assert(MBBI != MBB.end() && "Returning block has no instructions");
844 unsigned RetOpcode = MBBI->getOpcode();
845 DebugLoc DL = MBBI->getDebugLoc();
846 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
847 bool Is64Bit = STI.is64Bit();
848 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
849 const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
850 const bool Is64BitILP32 = STI.isTarget64BitILP32();
851 bool UseLEA = STI.useLeaForSP();
852 unsigned StackAlign = getStackAlignment();
853 unsigned SlotSize = RegInfo->getSlotSize();
854 unsigned FramePtr = RegInfo->getFrameRegister(MF);
855 unsigned MachineFramePtr = Is64BitILP32 ?
856 getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
857 unsigned StackPtr = RegInfo->getStackRegister();
860 MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() ==
861 ExceptionHandling::WinEH;
862 bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();
866 llvm_unreachable("Can only insert epilog into returning blocks");
871 case X86::TCRETURNdi:
872 case X86::TCRETURNri:
873 case X86::TCRETURNmi:
874 case X86::TCRETURNdi64:
875 case X86::TCRETURNri64:
876 case X86::TCRETURNmi64:
878 case X86::EH_RETURN64:
879 break; // These are ok
882 // Get the number of bytes to allocate from the FrameInfo.
883 uint64_t StackSize = MFI->getStackSize();
884 uint64_t MaxAlign = MFI->getMaxAlignment();
885 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
886 uint64_t NumBytes = 0;
888 // If we're forcing a stack realignment we can't rely on just the frame
889 // info, we need to know the ABI stack alignment as well in case we
890 // have a call out. Otherwise just make sure we have some alignment - we'll
891 // go with the minimum.
892 if (ForceStackAlign) {
894 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
896 MaxAlign = MaxAlign ? MaxAlign : 4;
900 // Calculate required stack adjustment.
901 uint64_t FrameSize = StackSize - SlotSize;
902 if (RegInfo->needsStackRealignment(MF)) {
903 // Callee-saved registers were pushed on stack before the stack
906 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
908 NumBytes = FrameSize - CSSize;
912 BuildMI(MBB, MBBI, DL,
913 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr);
915 NumBytes = StackSize - CSSize;
918 // Skip the callee-saved pop instructions.
919 while (MBBI != MBB.begin()) {
920 MachineBasicBlock::iterator PI = std::prev(MBBI);
921 unsigned Opc = PI->getOpcode();
923 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
929 MachineBasicBlock::iterator FirstCSPop = MBBI;
931 DL = MBBI->getDebugLoc();
933 // If there is an ADD32ri or SUB32ri of ESP immediately before this
934 // instruction, merge the two instructions.
935 if (NumBytes || MFI->hasVarSizedObjects())
936 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
938 // If dynamic alloca is used, then reset esp to point to the last callee-saved
939 // slot before popping them off! Same applies for the case, when stack was
941 if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
942 if (RegInfo->needsStackRealignment(MF))
945 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
946 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
947 FramePtr, false, -CSSize);
950 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
951 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
955 } else if (NumBytes) {
956 // Adjust stack pointer back: ESP += numbytes.
957 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr, UseLEA,
962 // Windows unwinder will not invoke function's exception handler if IP is
963 // either in prologue or in epilogue. This behavior causes a problem when a
964 // call immediately precedes an epilogue, because the return address points
965 // into the epilogue. To cope with that, we insert an epilogue marker here,
966 // then replace it with a 'nop' if it ends up immediately after a CALL in the
967 // final emitted code.
969 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
971 // We're returning from function via eh_return.
972 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
973 MBBI = MBB.getLastNonDebugInstr();
974 MachineOperand &DestAddr = MBBI->getOperand(0);
975 assert(DestAddr.isReg() && "Offset should be in register!");
976 BuildMI(MBB, MBBI, DL,
977 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
978 StackPtr).addReg(DestAddr.getReg());
979 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
980 RetOpcode == X86::TCRETURNmi ||
981 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
982 RetOpcode == X86::TCRETURNmi64) {
983 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
984 // Tail call return: adjust the stack pointer and jump to callee.
985 MBBI = MBB.getLastNonDebugInstr();
986 MachineOperand &JumpTarget = MBBI->getOperand(0);
987 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
988 assert(StackAdjust.isImm() && "Expecting immediate value.");
990 // Adjust stack pointer.
991 int StackAdj = StackAdjust.getImm();
992 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
994 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
996 // Incoporate the retaddr area.
997 Offset = StackAdj-MaxTCDelta;
998 assert(Offset >= 0 && "Offset should never be negative");
1001 // Check for possible merge with preceding ADD instruction.
1002 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1003 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,
1004 UseLEA, TII, *RegInfo);
1007 // Jump to label or value in register.
1008 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
1009 MachineInstrBuilder MIB =
1010 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
1011 ? X86::TAILJMPd : X86::TAILJMPd64));
1012 if (JumpTarget.isGlobal())
1013 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1014 JumpTarget.getTargetFlags());
1016 assert(JumpTarget.isSymbol());
1017 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
1018 JumpTarget.getTargetFlags());
1020 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
1021 MachineInstrBuilder MIB =
1022 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
1023 ? X86::TAILJMPm : X86::TAILJMPm64));
1024 for (unsigned i = 0; i != 5; ++i)
1025 MIB.addOperand(MBBI->getOperand(i));
1026 } else if (RetOpcode == X86::TCRETURNri64) {
1027 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
1028 addReg(JumpTarget.getReg(), RegState::Kill);
1030 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
1031 addReg(JumpTarget.getReg(), RegState::Kill);
1034 MachineInstr *NewMI = std::prev(MBBI);
1035 NewMI->copyImplicitOps(MF, MBBI);
1037 // Delete the pseudo instruction TCRETURN.
1039 } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
1040 RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
1041 (X86FI->getTCReturnAddrDelta() < 0)) {
1042 // Add the return addr area delta back since we are not tail calling.
1043 int delta = -1*X86FI->getTCReturnAddrDelta();
1044 MBBI = MBB.getLastNonDebugInstr();
1046 // Check for possible merge with preceding ADD instruction.
1047 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1048 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr, UseLEA, TII,
1053 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
1055 const X86RegisterInfo *RegInfo =
1056 static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
1057 const MachineFrameInfo *MFI = MF.getFrameInfo();
1058 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1059 uint64_t StackSize = MFI->getStackSize();
1061 if (RegInfo->hasBasePointer(MF)) {
1062 assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
1064 // Skip the saved EBP.
1065 return Offset + RegInfo->getSlotSize();
1067 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1068 return Offset + StackSize;
1070 } else if (RegInfo->needsStackRealignment(MF)) {
1072 // Skip the saved EBP.
1073 return Offset + RegInfo->getSlotSize();
1075 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1076 return Offset + StackSize;
1078 // FIXME: Support tail calls
1081 return Offset + StackSize;
1083 // Skip the saved EBP.
1084 Offset += RegInfo->getSlotSize();
1086 // Skip the RETADDR move area
1087 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1088 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1089 if (TailCallReturnAddrDelta < 0)
1090 Offset -= TailCallReturnAddrDelta;
1096 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1097 unsigned &FrameReg) const {
1098 const X86RegisterInfo *RegInfo =
1099 static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
1100 // We can't calculate offset from frame pointer if the stack is realigned,
1101 // so enforce usage of stack/base pointer. The base pointer is used when we
1102 // have dynamic allocas in addition to dynamic realignment.
1103 if (RegInfo->hasBasePointer(MF))
1104 FrameReg = RegInfo->getBaseRegister();
1105 else if (RegInfo->needsStackRealignment(MF))
1106 FrameReg = RegInfo->getStackRegister();
1108 FrameReg = RegInfo->getFrameRegister(MF);
1109 return getFrameIndexOffset(MF, FI);
1112 bool X86FrameLowering::assignCalleeSavedSpillSlots(
1113 MachineFunction &MF, const TargetRegisterInfo *TRI,
1114 std::vector<CalleeSavedInfo> &CSI) const {
1115 MachineFrameInfo *MFI = MF.getFrameInfo();
1116 const X86RegisterInfo *RegInfo =
1117 static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
1118 unsigned SlotSize = RegInfo->getSlotSize();
1119 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1121 unsigned CalleeSavedFrameSize = 0;
1122 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
1125 // emitPrologue always spills frame register the first thing.
1126 SpillSlotOffset -= SlotSize;
1127 MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1129 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
1130 // the frame register, we can delete it from CSI list and not have to worry
1131 // about avoiding it later.
1132 unsigned FPReg = RegInfo->getFrameRegister(MF);
1133 for (unsigned i = 0; i < CSI.size(); ++i) {
1134 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
1135 CSI.erase(CSI.begin() + i);
1141 // Assign slots for GPRs. It increases frame size.
1142 for (unsigned i = CSI.size(); i != 0; --i) {
1143 unsigned Reg = CSI[i - 1].getReg();
1145 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1148 SpillSlotOffset -= SlotSize;
1149 CalleeSavedFrameSize += SlotSize;
1151 int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1152 CSI[i - 1].setFrameIdx(SlotIndex);
1155 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
1157 // Assign slots for XMMs.
1158 for (unsigned i = CSI.size(); i != 0; --i) {
1159 unsigned Reg = CSI[i - 1].getReg();
1160 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1163 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
1165 SpillSlotOffset -= abs(SpillSlotOffset) % RC->getAlignment();
1167 SpillSlotOffset -= RC->getSize();
1169 MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
1170 CSI[i - 1].setFrameIdx(SlotIndex);
1171 MFI->ensureMaxAlignment(RC->getAlignment());
1177 bool X86FrameLowering::spillCalleeSavedRegisters(
1178 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1179 const std::vector<CalleeSavedInfo> &CSI,
1180 const TargetRegisterInfo *TRI) const {
1181 DebugLoc DL = MBB.findDebugLoc(MI);
1183 MachineFunction &MF = *MBB.getParent();
1184 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1185 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1187 // Push GPRs. It increases frame size.
1188 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1189 for (unsigned i = CSI.size(); i != 0; --i) {
1190 unsigned Reg = CSI[i - 1].getReg();
1192 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1194 // Add the callee-saved register as live-in. It's killed at the spill.
1197 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1198 .setMIFlag(MachineInstr::FrameSetup);
1201 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1202 // It can be done by spilling XMMs to stack frame.
1203 for (unsigned i = CSI.size(); i != 0; --i) {
1204 unsigned Reg = CSI[i-1].getReg();
1205 if (X86::GR64RegClass.contains(Reg) ||
1206 X86::GR32RegClass.contains(Reg))
1208 // Add the callee-saved register as live-in. It's killed at the spill.
1210 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1212 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
1215 MI->setFlag(MachineInstr::FrameSetup);
1222 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1223 MachineBasicBlock::iterator MI,
1224 const std::vector<CalleeSavedInfo> &CSI,
1225 const TargetRegisterInfo *TRI) const {
1229 DebugLoc DL = MBB.findDebugLoc(MI);
1231 MachineFunction &MF = *MBB.getParent();
1232 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1233 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1235 // Reload XMMs from stack frame.
1236 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1237 unsigned Reg = CSI[i].getReg();
1238 if (X86::GR64RegClass.contains(Reg) ||
1239 X86::GR32RegClass.contains(Reg))
1242 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1243 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
1247 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1248 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1249 unsigned Reg = CSI[i].getReg();
1250 if (!X86::GR64RegClass.contains(Reg) &&
1251 !X86::GR32RegClass.contains(Reg))
1254 BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
1260 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
1261 RegScavenger *RS) const {
1262 MachineFrameInfo *MFI = MF.getFrameInfo();
1263 const X86RegisterInfo *RegInfo =
1264 static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
1265 unsigned SlotSize = RegInfo->getSlotSize();
1267 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1268 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1270 if (TailCallReturnAddrDelta < 0) {
1271 // create RETURNADDR area
1280 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1281 TailCallReturnAddrDelta - SlotSize, true);
1284 // Spill the BasePtr if it's used.
1285 if (RegInfo->hasBasePointer(MF))
1286 MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
1290 HasNestArgument(const MachineFunction *MF) {
1291 const Function *F = MF->getFunction();
1292 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1294 if (I->hasNestAttr())
1300 /// GetScratchRegister - Get a temp register for performing work in the
1301 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1302 /// and the properties of the function either one or two registers will be
1303 /// needed. Set primary to true for the first register, false for the second.
1305 GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) {
1306 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1309 if (CallingConvention == CallingConv::HiPE) {
1311 return Primary ? X86::R14 : X86::R13;
1313 return Primary ? X86::EBX : X86::EDI;
1317 return Primary ? X86::R11 : X86::R12;
1319 bool IsNested = HasNestArgument(&MF);
1321 if (CallingConvention == CallingConv::X86_FastCall ||
1322 CallingConvention == CallingConv::Fast) {
1324 report_fatal_error("Segmented stacks does not support fastcall with "
1325 "nested function.");
1326 return Primary ? X86::EAX : X86::ECX;
1329 return Primary ? X86::EDX : X86::EAX;
1330 return Primary ? X86::ECX : X86::EAX;
1333 // The stack limit in the TCB is set to this many bytes above the actual stack
1335 static const uint64_t kSplitStackAvailable = 256;
1338 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
1339 MachineBasicBlock &prologueMBB = MF.front();
1340 MachineFrameInfo *MFI = MF.getFrameInfo();
1341 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1343 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1344 bool Is64Bit = STI.is64Bit();
1345 unsigned TlsReg, TlsOffset;
1348 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1349 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1350 "Scratch register is live-in");
1352 if (MF.getFunction()->isVarArg())
1353 report_fatal_error("Segmented stacks do not support vararg functions.");
1354 if (!STI.isTargetLinux() && !STI.isTargetDarwin() &&
1355 !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD())
1356 report_fatal_error("Segmented stacks not supported on this platform.");
1358 // Eventually StackSize will be calculated by a link-time pass; which will
1359 // also decide whether checking code needs to be injected into this particular
1361 StackSize = MFI->getStackSize();
1363 // Do not generate a prologue for functions with a stack of size zero
1367 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1368 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1369 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1370 bool IsNested = false;
1372 // We need to know if the function has a nest argument only in 64 bit mode.
1374 IsNested = HasNestArgument(&MF);
1376 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1377 // allocMBB needs to be last (terminating) instruction.
1379 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
1380 e = prologueMBB.livein_end(); i != e; i++) {
1381 allocMBB->addLiveIn(*i);
1382 checkMBB->addLiveIn(*i);
1386 allocMBB->addLiveIn(X86::R10);
1388 MF.push_front(allocMBB);
1389 MF.push_front(checkMBB);
1391 // When the frame size is less than 256 we just compare the stack
1392 // boundary directly to the value of the stack pointer, per gcc.
1393 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1395 // Read the limit off the current stacklet off the stack_guard location.
1397 if (STI.isTargetLinux()) {
1400 } else if (STI.isTargetDarwin()) {
1402 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1403 } else if (STI.isTargetWin64()) {
1405 TlsOffset = 0x28; // pvArbitrary, reserved for application use
1406 } else if (STI.isTargetFreeBSD()) {
1410 report_fatal_error("Segmented stacks not supported on this platform.");
1413 if (CompareStackPointer)
1414 ScratchReg = X86::RSP;
1416 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
1417 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1419 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
1420 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1422 if (STI.isTargetLinux()) {
1425 } else if (STI.isTargetDarwin()) {
1427 TlsOffset = 0x48 + 90*4;
1428 } else if (STI.isTargetWin32()) {
1430 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1431 } else if (STI.isTargetFreeBSD()) {
1432 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1434 report_fatal_error("Segmented stacks not supported on this platform.");
1437 if (CompareStackPointer)
1438 ScratchReg = X86::ESP;
1440 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1441 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1443 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64()) {
1444 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1445 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1446 } else if (STI.isTargetDarwin()) {
1448 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
1449 unsigned ScratchReg2;
1451 if (CompareStackPointer) {
1452 // The primary scratch register is available for holding the TLS offset.
1453 ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
1454 SaveScratch2 = false;
1456 // Need to use a second register to hold the TLS offset
1457 ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
1459 // Unfortunately, with fastcc the second scratch register may hold an
1461 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1464 // If Scratch2 is live-in then it needs to be saved.
1465 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1466 "Scratch register is live-in and not saved");
1469 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1470 .addReg(ScratchReg2, RegState::Kill);
1472 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1474 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1476 .addReg(ScratchReg2).addImm(1).addReg(0)
1481 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1485 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1486 // It jumps to normal execution of the function body.
1487 BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
1489 // On 32 bit we first push the arguments size and then the frame size. On 64
1490 // bit, we pass the stack frame size in r10 and the argument size in r11.
1492 // Functions with nested arguments use R10, so it needs to be saved across
1493 // the call to _morestack
1496 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10);
1498 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10)
1500 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11)
1501 .addImm(X86FI->getArgumentStackSize());
1502 MF.getRegInfo().setPhysRegUsed(X86::R10);
1503 MF.getRegInfo().setPhysRegUsed(X86::R11);
1505 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1506 .addImm(X86FI->getArgumentStackSize());
1507 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1511 // __morestack is in libgcc
1513 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1514 .addExternalSymbol("__morestack");
1516 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1517 .addExternalSymbol("__morestack");
1520 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1522 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1524 allocMBB->addSuccessor(&prologueMBB);
1526 checkMBB->addSuccessor(allocMBB);
1527 checkMBB->addSuccessor(&prologueMBB);
1534 /// Erlang programs may need a special prologue to handle the stack size they
1535 /// might need at runtime. That is because Erlang/OTP does not implement a C
1536 /// stack but uses a custom implementation of hybrid stack/heap architecture.
1537 /// (for more information see Eric Stenman's Ph.D. thesis:
1538 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1541 /// temp0 = sp - MaxStack
1542 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1546 /// call inc_stack # doubles the stack space
1547 /// temp0 = sp - MaxStack
1548 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1549 void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
1550 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1551 MachineFrameInfo *MFI = MF.getFrameInfo();
1552 const unsigned SlotSize =
1553 static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo())
1555 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1556 const bool Is64Bit = STI.is64Bit();
1558 // HiPE-specific values
1559 const unsigned HipeLeafWords = 24;
1560 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1561 const unsigned Guaranteed = HipeLeafWords * SlotSize;
1562 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1563 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1564 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1566 assert(STI.isTargetLinux() &&
1567 "HiPE prologue is only supported on Linux operating systems.");
1569 // Compute the largest caller's frame that is needed to fit the callees'
1570 // frames. This 'MaxStack' is computed from:
1572 // a) the fixed frame size, which is the space needed for all spilled temps,
1573 // b) outgoing on-stack parameter areas, and
1574 // c) the minimum stack space this function needs to make available for the
1575 // functions it calls (a tunable ABI property).
1576 if (MFI->hasCalls()) {
1577 unsigned MoreStackForCalls = 0;
1579 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1580 MBBI != MBBE; ++MBBI)
1581 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1586 // Get callee operand.
1587 const MachineOperand &MO = MI->getOperand(0);
1589 // Only take account of global function calls (no closures etc.).
1593 const Function *F = dyn_cast<Function>(MO.getGlobal());
1597 // Do not update 'MaxStack' for primitive and built-in functions
1598 // (encoded with names either starting with "erlang."/"bif_" or not
1599 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1600 // "_", such as the BIF "suspend_0") as they are executed on another
1602 if (F->getName().find("erlang.") != StringRef::npos ||
1603 F->getName().find("bif_") != StringRef::npos ||
1604 F->getName().find_first_of("._") == StringRef::npos)
1607 unsigned CalleeStkArity =
1608 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1609 if (HipeLeafWords - 1 > CalleeStkArity)
1610 MoreStackForCalls = std::max(MoreStackForCalls,
1611 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1613 MaxStack += MoreStackForCalls;
1616 // If the stack frame needed is larger than the guaranteed then runtime checks
1617 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1618 if (MaxStack > Guaranteed) {
1619 MachineBasicBlock &prologueMBB = MF.front();
1620 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1621 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1623 for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),
1624 E = prologueMBB.livein_end(); I != E; I++) {
1625 stackCheckMBB->addLiveIn(*I);
1626 incStackMBB->addLiveIn(*I);
1629 MF.push_front(incStackMBB);
1630 MF.push_front(stackCheckMBB);
1632 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1633 unsigned LEAop, CMPop, CALLop;
1637 LEAop = X86::LEA64r;
1638 CMPop = X86::CMP64rm;
1639 CALLop = X86::CALL64pcrel32;
1640 SPLimitOffset = 0x90;
1644 LEAop = X86::LEA32r;
1645 CMPop = X86::CMP32rm;
1646 CALLop = X86::CALLpcrel32;
1647 SPLimitOffset = 0x4c;
1650 ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1651 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1652 "HiPE prologue scratch register is live-in");
1654 // Create new MBB for StackCheck:
1655 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
1656 SPReg, false, -MaxStack);
1657 // SPLimitOffset is in a fixed heap location (pointed by BP).
1658 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
1659 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1660 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB);
1662 // Create new MBB for IncStack:
1663 BuildMI(incStackMBB, DL, TII.get(CALLop)).
1664 addExternalSymbol("inc_stack_0");
1665 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
1666 SPReg, false, -MaxStack);
1667 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
1668 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1669 BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB);
1671 stackCheckMBB->addSuccessor(&prologueMBB, 99);
1672 stackCheckMBB->addSuccessor(incStackMBB, 1);
1673 incStackMBB->addSuccessor(&prologueMBB, 99);
1674 incStackMBB->addSuccessor(incStackMBB, 1);
1681 void X86FrameLowering::
1682 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1683 MachineBasicBlock::iterator I) const {
1684 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1685 const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
1686 MF.getSubtarget().getRegisterInfo());
1687 unsigned StackPtr = RegInfo.getStackRegister();
1688 bool reseveCallFrame = hasReservedCallFrame(MF);
1689 int Opcode = I->getOpcode();
1690 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
1691 const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
1692 bool IsLP64 = STI.isTarget64BitLP64();
1693 DebugLoc DL = I->getDebugLoc();
1694 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
1695 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
1698 if (!reseveCallFrame) {
1699 // If the stack pointer can be changed after prologue, turn the
1700 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
1701 // adjcallstackdown instruction into 'add ESP, <amt>'
1702 // TODO: consider using push / pop instead of sub + store / add
1706 // We need to keep the stack aligned properly. To do this, we round the
1707 // amount of space needed for the outgoing arguments up to the next
1708 // alignment boundary.
1709 unsigned StackAlign = MF.getTarget()
1711 ->getFrameLowering()
1712 ->getStackAlignment();
1713 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
1715 MachineInstr *New = nullptr;
1716 if (Opcode == TII.getCallFrameSetupOpcode()) {
1717 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
1722 assert(Opcode == TII.getCallFrameDestroyOpcode());
1724 // Factor out the amount the callee already popped.
1725 Amount -= CalleeAmt;
1728 unsigned Opc = getADDriOpcode(IsLP64, Amount);
1729 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1730 .addReg(StackPtr).addImm(Amount);
1735 // The EFLAGS implicit def is dead.
1736 New->getOperand(3).setIsDead();
1738 // Replace the pseudo instruction with a new instruction.
1745 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
1746 // If we are performing frame pointer elimination and if the callee pops
1747 // something off the stack pointer, add it back. We do this until we have
1748 // more advanced stack pointer tracking ability.
1749 unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
1750 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1751 .addReg(StackPtr).addImm(CalleeAmt);
1753 // The EFLAGS implicit def is dead.
1754 New->getOperand(3).setIsDead();
1756 // We are not tracking the stack pointer adjustment by the callee, so make
1757 // sure we restore the stack pointer immediately after the call, there may
1758 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
1759 MachineBasicBlock::iterator B = MBB.begin();
1760 while (I != B && !std::prev(I)->isCall())