X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86RegisterInfo.cpp;h=3228b141eb677f6bdfe4b082d940eba6ad833f70;hb=beb6898df8f96ccea4ae147587479b507bb3e491;hp=0dc63ef9a611a9c5eab6b8b0b739db7e9ebaf8d8;hpb=5248468473f0488a652b545ad95f7abda302b7b5;p=oota-llvm.git diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 0dc63ef9a61..3228b141eb6 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -30,17 +30,23 @@ #include "llvm/CodeGen/MachineLocation.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetAsmInfo.h" +#include "llvm/MC/MCAsmInfo.h" #include "llvm/Target/TargetFrameInfo.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" -#include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/CommandLine.h" using namespace llvm; +cl::opt +ForceStackAlign("force-align-stack", + cl::desc("Force align the stack to the minimum alignment" + " needed for the function."), + cl::init(false), cl::Hidden); + X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii) : X86GenRegisterInfo(tm.getSubtarget().is64Bit() ? @@ -55,6 +61,7 @@ X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, Is64Bit = Subtarget->is64Bit(); IsWin64 = Subtarget->isTargetWin64(); StackAlign = TM.getFrameInfo()->getStackAlignment(); + if (Is64Bit) { SlotSize = 8; StackPtr = X86::RSP; @@ -66,12 +73,12 @@ X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, } } -// getDwarfRegNum - This function maps LLVM register identifiers to the -// Dwarf specific numbering, used in debug info and exception tables. - +/// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF +/// specific numbering, used in debug info and exception tables. int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { const X86Subtarget *Subtarget = &TM.getSubtarget(); unsigned Flavour = DWARFFlavour::X86_64; + if (!Subtarget->is64Bit()) { if (Subtarget->isTargetDarwin()) { if (isEH) @@ -89,9 +96,8 @@ int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const { return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour); } -// getX86RegNum - This function maps LLVM register identifiers to their X86 -// specific numbering, which is used in various places encoding instructions. -// +/// getX86RegNum - This function maps LLVM register identifiers to their X86 +/// specific numbering, which is used in various places encoding instructions. unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { switch(RegNo) { case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; @@ -128,23 +134,53 @@ unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: return RegNo-X86::ST0; - case X86::XMM0: case X86::XMM8: case X86::MM0: + case X86::XMM0: case X86::XMM8: + case X86::YMM0: case X86::YMM8: case X86::MM0: return 0; - case X86::XMM1: case X86::XMM9: case X86::MM1: + case X86::XMM1: case X86::XMM9: + case X86::YMM1: case X86::YMM9: case X86::MM1: return 1; - case X86::XMM2: case X86::XMM10: case X86::MM2: + case X86::XMM2: case X86::XMM10: + case X86::YMM2: case X86::YMM10: case X86::MM2: return 2; - case X86::XMM3: case X86::XMM11: case X86::MM3: + case X86::XMM3: case X86::XMM11: + case X86::YMM3: case X86::YMM11: case X86::MM3: return 3; - case X86::XMM4: case X86::XMM12: case X86::MM4: + case X86::XMM4: case X86::XMM12: + case X86::YMM4: case X86::YMM12: case X86::MM4: return 4; - case X86::XMM5: case X86::XMM13: case X86::MM5: + case X86::XMM5: case X86::XMM13: + case X86::YMM5: case X86::YMM13: case X86::MM5: return 5; - case X86::XMM6: case X86::XMM14: case X86::MM6: + case X86::XMM6: case X86::XMM14: + case X86::YMM6: case X86::YMM14: case X86::MM6: return 6; - case X86::XMM7: case X86::XMM15: case X86::MM7: + case X86::XMM7: case X86::XMM15: + case X86::YMM7: case X86::YMM15: case X86::MM7: return 7; + case X86::ES: return 0; + case X86::CS: return 1; + case X86::SS: return 2; + case X86::DS: return 3; + case X86::FS: return 4; + case X86::GS: return 5; + + case X86::CR0: case X86::CR8 : case X86::DR0: return 0; + case X86::CR1: case X86::CR9 : case X86::DR1: return 1; + case X86::CR2: case X86::CR10: case X86::DR2: return 2; + case X86::CR3: case X86::CR11: case X86::DR3: return 3; + case X86::CR4: case X86::CR12: case X86::DR4: return 4; + case X86::CR5: case X86::CR13: case X86::DR5: return 5; + case X86::CR6: case X86::CR14: case X86::DR6: return 6; + case X86::CR7: case X86::CR15: case X86::DR7: return 7; + + // Pseudo index registers are equivalent to a "none" + // scaled index (See Intel Manual 2A, table 2-3) + case X86::EIZ: + case X86::RIZ: + return 4; + default: assert(isVirtualRegister(RegNo) && "Unknown physical register!"); llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); @@ -158,84 +194,133 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, unsigned SubIdx) const { switch (SubIdx) { default: return 0; - case 1: - // 8-bit + case X86::sub_8bit: if (B == &X86::GR8RegClass) { - if (A == &X86::GR64RegClass) - return &X86::GR64RegClass; - else if (A == &X86::GR32RegClass) - return &X86::GR32RegClass; - else if (A == &X86::GR16RegClass) - return &X86::GR16RegClass; + if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8) + return A; } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; - else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass) + else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || + A == &X86::GR32_NOREXRegClass || + A == &X86::GR32_NOSPRegClass) return &X86::GR32_ABCDRegClass; - else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass) + else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || + A == &X86::GR16_NOREXRegClass) return &X86::GR16_ABCDRegClass; } else if (B == &X86::GR8_NOREXRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_NOREXRegClass; - else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass) + else if (A == &X86::GR64_ABCDRegClass) + return &X86::GR64_ABCDRegClass; + else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || + A == &X86::GR32_NOSPRegClass) return &X86::GR32_NOREXRegClass; + else if (A == &X86::GR32_ABCDRegClass) + return &X86::GR32_ABCDRegClass; else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass) return &X86::GR16_NOREXRegClass; + else if (A == &X86::GR16_ABCDRegClass) + return &X86::GR16_ABCDRegClass; } break; - case 2: - // 8-bit hi + case X86::sub_8bit_hi: if (B == &X86::GR8_ABCD_HRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; - else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass) + else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || + A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) return &X86::GR32_ABCDRegClass; - else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass) + else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass || + A == &X86::GR16_NOREXRegClass) return &X86::GR16_ABCDRegClass; } break; - case 3: - // 16-bit + case X86::sub_16bit: if (B == &X86::GR16RegClass) { - if (A == &X86::GR64RegClass) - return &X86::GR64RegClass; - else if (A == &X86::GR32RegClass) - return &X86::GR32RegClass; + if (A->getSize() == 4 || A->getSize() == 8) + return A; } else if (B == &X86::GR16_ABCDRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; - else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass) + else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass || + A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass) return &X86::GR32_ABCDRegClass; } else if (B == &X86::GR16_NOREXRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_NOREXRegClass; - else if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass) + else if (A == &X86::GR64_ABCDRegClass) + return &X86::GR64_ABCDRegClass; + else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass || + A == &X86::GR32_NOSPRegClass) + return &X86::GR32_NOREXRegClass; + else if (A == &X86::GR32_ABCDRegClass) return &X86::GR64_ABCDRegClass; } break; - case 4: - // 32-bit + case X86::sub_32bit: if (B == &X86::GR32RegClass) { - if (A == &X86::GR64RegClass) - return &X86::GR64RegClass; + if (A->getSize() == 8) + return A; + } else if (B == &X86::GR32_NOSPRegClass) { + if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass) + return &X86::GR64_NOSPRegClass; + if (A->getSize() == 8) + return getCommonSubClass(A, &X86::GR64_NOSPRegClass); } else if (B == &X86::GR32_ABCDRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass || + A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || + A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_ABCDRegClass; } else if (B == &X86::GR32_NOREXRegClass) { - if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass) + if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass || + A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass) return &X86::GR64_NOREXRegClass; + else if (A == &X86::GR64_ABCDRegClass) + return &X86::GR64_ABCDRegClass; } break; + case X86::sub_ss: + if (B == &X86::FR32RegClass) + return A; + break; + case X86::sub_sd: + if (B == &X86::FR64RegClass) + return A; + break; + case X86::sub_xmm: + if (B == &X86::VR128RegClass) + return A; + break; } return 0; } -const TargetRegisterClass *X86RegisterInfo::getPointerRegClass() const { - const X86Subtarget *Subtarget = &TM.getSubtarget(); - if (Subtarget->is64Bit()) - return &X86::GR64RegClass; - else +const TargetRegisterClass * +X86RegisterInfo::getPointerRegClass(unsigned Kind) const { + switch (Kind) { + default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); + case 0: // Normal GPRs. + if (TM.getSubtarget().is64Bit()) + return &X86::GR64RegClass; return &X86::GR32RegClass; + case 1: // Normal GRPs except the stack pointer (for encoding reasons). + if (TM.getSubtarget().is64Bit()) + return &X86::GR64_NOSPRegClass; + return &X86::GR32_NOSPRegClass; + } } const TargetRegisterClass * @@ -252,13 +337,18 @@ X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { const unsigned * X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { bool callsEHReturn = false; + bool ghcCall = false; if (MF) { - const MachineFrameInfo *MFI = MF->getFrameInfo(); - const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); - callsEHReturn = (MMI ? MMI->callsEHReturn() : false); + callsEHReturn = MF->getMMI().callsEHReturn(); + const Function *F = MF->getFunction(); + ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); } + static const unsigned GhcCalleeSavedRegs[] = { + 0 + }; + static const unsigned CalleeSavedRegs32Bit[] = { X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 }; @@ -284,7 +374,9 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { X86::XMM14, X86::XMM15, 0 }; - if (Is64Bit) { + if (ghcCall) { + return GhcCalleeSavedRegs; + } else if (Is64Bit) { if (IsWin64) return CalleeSavedRegsWin64; else @@ -294,78 +386,32 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { } } -const TargetRegisterClass* const* -X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { - bool callsEHReturn = false; - - if (MF) { - const MachineFrameInfo *MFI = MF->getFrameInfo(); - const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); - callsEHReturn = (MMI ? MMI->callsEHReturn() : false); - } - - static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { - &X86::GR32RegClass, &X86::GR32RegClass, - &X86::GR32RegClass, &X86::GR32RegClass, 0 - }; - static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = { - &X86::GR32RegClass, &X86::GR32RegClass, - &X86::GR32RegClass, &X86::GR32RegClass, - &X86::GR32RegClass, &X86::GR32RegClass, 0 - }; - static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, 0 - }; - static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = { - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, 0 - }; - static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = { - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::GR64RegClass, &X86::GR64RegClass, - &X86::VR128RegClass, &X86::VR128RegClass, - &X86::VR128RegClass, &X86::VR128RegClass, - &X86::VR128RegClass, &X86::VR128RegClass, - &X86::VR128RegClass, &X86::VR128RegClass, - &X86::VR128RegClass, &X86::VR128RegClass, 0 - }; - - if (Is64Bit) { - if (IsWin64) - return CalleeSavedRegClassesWin64; - else - return (callsEHReturn ? - CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit); - } else { - return (callsEHReturn ? - CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit); - } -} - BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); + const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); + // Set the stack-pointer register and its aliases as reserved. Reserved.set(X86::RSP); Reserved.set(X86::ESP); Reserved.set(X86::SP); Reserved.set(X86::SPL); + + // Set the instruction pointer register and its aliases as reserved. + Reserved.set(X86::RIP); + Reserved.set(X86::EIP); + Reserved.set(X86::IP); + // Set the frame-pointer register and its aliases as reserved if needed. - if (hasFP(MF)) { + if (TFI->hasFP(MF)) { Reserved.set(X86::RBP); Reserved.set(X86::EBP); Reserved.set(X86::BP); Reserved.set(X86::BPL); } - // Mark the x87 stack registers as reserved, since they don't - // behave normally with respect to liveness. We don't fully - // model the effects of x87 stack pushes and pops after - // stackification. + + // Mark the x87 stack registers as reserved, since they don't behave normally + // with respect to liveness. We don't fully model the effects of x87 stack + // pushes and pops after stackification. Reserved.set(X86::ST0); Reserved.set(X86::ST1); Reserved.set(X86::ST2); @@ -381,96 +427,73 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { // Stack Frame Processing methods //===----------------------------------------------------------------------===// -static unsigned calculateMaxStackAlignment(const MachineFrameInfo *FFI) { - unsigned MaxAlign = 0; - for (int i = FFI->getObjectIndexBegin(), - e = FFI->getObjectIndexEnd(); i != e; ++i) { - if (FFI->isDeadObjectIndex(i)) - continue; - unsigned Align = FFI->getObjectAlignment(i); - MaxAlign = std::max(MaxAlign, Align); - } - - return MaxAlign; -} - -// hasFP - Return true if the specified function should have a dedicated frame -// pointer register. This is true if the function has variable sized allocas or -// if frame pointer elimination is disabled. -// -bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { +bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); - const MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); - - return (NoFramePointerElim || - needsStackRealignment(MF) || - MFI->hasVarSizedObjects() || - MFI->isFrameAddressTaken() || - MF.getInfo()->getForceFramePointer() || - (MMI && MMI->callsUnwindInit())); + return (RealignStack && + !MFI->hasVarSizedObjects()); } bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); + const Function *F = MF.getFunction(); + bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || + F->hasFnAttr(Attribute::StackAlignment)); // FIXME: Currently we don't support stack realignment for functions with - // variable-sized allocas - return (RealignStack && - (MFI->getMaxAlignment() > StackAlign && - !MFI->hasVarSizedObjects())); -} - -bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { - return !MF.getFrameInfo()->hasVarSizedObjects(); -} - -bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, - int &FrameIdx) const { - if (Reg == FramePtr && hasFP(MF)) { + // variable-sized allocas. + // FIXME: It's more complicated than this... + if (0 && requiresRealignment && MFI->hasVarSizedObjects()) + report_fatal_error( + "Stack realignment in presense of dynamic allocas is not supported"); + + // If we've requested that we force align the stack do so now. + if (ForceStackAlign) + return canRealignStack(MF); + + return requiresRealignment && canRealignStack(MF); +} + +bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, + unsigned Reg, int &FrameIdx) const { + const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); + + if (Reg == FramePtr && TFI->hasFP(MF)) { FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); return true; } return false; } - -int -X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { - int Offset = MF.getFrameInfo()->getObjectOffset(FI) + SlotSize; - uint64_t StackSize = MF.getFrameInfo()->getStackSize(); - - if (needsStackRealignment(MF)) { - if (FI < 0) - // Skip the saved EBP - Offset += SlotSize; - else { - unsigned Align = MF.getFrameInfo()->getObjectAlignment(FI); - assert( (-(Offset + StackSize)) % Align == 0); - Align = 0; - return Offset + StackSize; - } - - // FIXME: Support tail calls +static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { + if (is64Bit) { + if (isInt<8>(Imm)) + return X86::SUB64ri8; + return X86::SUB64ri32; } else { - if (!hasFP(MF)) - return Offset + StackSize; - - // Skip the saved EBP - Offset += SlotSize; - - // Skip the RETADDR move area - X86MachineFunctionInfo *X86FI = MF.getInfo(); - int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); - if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta; + if (isInt<8>(Imm)) + return X86::SUB32ri8; + return X86::SUB32ri; } +} - return Offset; +static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { + if (is64Bit) { + if (isInt<8>(Imm)) + return X86::ADD64ri8; + return X86::ADD64ri32; + } else { + if (isInt<8>(Imm)) + return X86::ADD32ri8; + return X86::ADD32ri; + } } void X86RegisterInfo:: eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { - if (!hasReservedCallFrame(MF)) { + const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); + + if (!TFI->hasReservedCallFrame(MF)) { // If the stack pointer can be changed after prologue, turn the // adjcallstackup instruction into a 'sub ESP, ' and the // adjcallstackdown instruction into 'add ESP, ' @@ -481,24 +504,27 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, // We need to keep the stack aligned properly. To do this, we round the // amount of space needed for the outgoing arguments up to the next // alignment boundary. - Amount = (Amount+StackAlign-1)/StackAlign*StackAlign; + Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; MachineInstr *New = 0; if (Old->getOpcode() == getCallFrameSetupOpcode()) { New = BuildMI(MF, Old->getDebugLoc(), - TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), - StackPtr).addReg(StackPtr).addImm(Amount); + TII.get(getSUBriOpcode(Is64Bit, Amount)), + StackPtr) + .addReg(StackPtr) + .addImm(Amount); } else { assert(Old->getOpcode() == getCallFrameDestroyOpcode()); - // factor out the amount the callee already popped. + + // Factor out the amount the callee already popped. uint64_t CalleeAmt = Old->getOperand(1).getImm(); Amount -= CalleeAmt; - if (Amount) { - unsigned Opc = (Amount < 128) ? - (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : - (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); + + if (Amount) { + unsigned Opc = getADDriOpcode(Is64Bit, Amount); New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) - .addReg(StackPtr).addImm(Amount); + .addReg(StackPtr) + .addImm(Amount); } } @@ -506,7 +532,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, // The EFLAGS implicit def is dead. New->getOperand(3).setIsDead(); - // Replace the pseudo instruction with a new instruction... + // Replace the pseudo instruction with a new instruction. MBB.insert(I, New); } } @@ -515,16 +541,16 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, // something off the stack pointer, add it back. We do this until we have // more advanced stack pointer tracking ability. if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { - unsigned Opc = (CalleeAmt < 128) ? - (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : - (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); + unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt); MachineInstr *Old = I; MachineInstr *New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), - StackPtr).addReg(StackPtr).addImm(CalleeAmt); + StackPtr) + .addReg(StackPtr) + .addImm(CalleeAmt); + // The EFLAGS implicit def is dead. New->getOperand(3).setIsDead(); - MBB.insert(I, New); } } @@ -532,41 +558,53 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MBB.erase(I); } -void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, - int SPAdj, RegScavenger *RS) const{ +void +X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, RegScavenger *RS) const{ assert(SPAdj == 0 && "Unexpected"); unsigned i = 0; MachineInstr &MI = *II; MachineFunction &MF = *MI.getParent()->getParent(); + const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); + while (!MI.getOperand(i).isFI()) { ++i; assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); } int FrameIndex = MI.getOperand(i).getIndex(); - unsigned BasePtr; + + unsigned Opc = MI.getOpcode(); + bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; if (needsStackRealignment(MF)) BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); + else if (AfterFPPop) + BasePtr = StackPtr; else - BasePtr = (hasFP(MF) ? FramePtr : StackPtr); + BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); // This must be part of a four operand memory reference. Replace the // FrameIndex with base register with EBP. Add an offset to the offset. MI.getOperand(i).ChangeToRegister(BasePtr, false); // Now add the frame object offset to the offset from EBP. + int FIOffset; + if (AfterFPPop) { + // Tail call jmp happens after FP is popped. + const MachineFrameInfo *MFI = MF.getFrameInfo(); + FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); + } else + FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); + if (MI.getOperand(i+3).isImm()) { // Offset is a 32-bit integer. - int Offset = getFrameIndexOffset(MF, FrameIndex) + - (int)(MI.getOperand(i+3).getImm()); - - MI.getOperand(i+3).ChangeToImmediate(Offset); + int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm()); + MI.getOperand(i + 3).ChangeToImmediate(Offset); } else { // Offset is symbolic. This is extremely rare. - uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) + - (uint64_t)MI.getOperand(i+3).getOffset(); + uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); MI.getOperand(i+3).setOffset(Offset); } } @@ -574,17 +612,12 @@ void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, void X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *RS) const { - MachineFrameInfo *FFI = MF.getFrameInfo(); - - // Calculate and set max stack object alignment early, so we can decide - // whether we will need stack realignment (and thus FP). - unsigned MaxAlign = std::max(FFI->getMaxAlignment(), - calculateMaxStackAlignment(FFI)); - - FFI->setMaxAlignment(MaxAlign); + MachineFrameInfo *MFI = MF.getFrameInfo(); + const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo(); int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); + if (TailCallReturnAddrDelta < 0) { // create RETURNADDR area // arg @@ -595,590 +628,35 @@ X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // ... // } // [EBP] - MF.getFrameInfo()-> - CreateFixedObject(-TailCallReturnAddrDelta, - (-1*SlotSize)+TailCallReturnAddrDelta); + MFI->CreateFixedObject(-TailCallReturnAddrDelta, + (-1U*SlotSize)+TailCallReturnAddrDelta, true); } - if (hasFP(MF)) { + + if (TFI->hasFP(MF)) { assert((TailCallReturnAddrDelta <= 0) && "The Delta should always be zero or negative"); + const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); + // Create a frame entry for the EBP register that must be saved. - int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, - (int)SlotSize * -2+ - TailCallReturnAddrDelta); - assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && + int FrameIdx = MFI->CreateFixedObject(SlotSize, + -(int)SlotSize + + TFI.getOffsetOfLocalArea() + + TailCallReturnAddrDelta, + true); + assert(FrameIdx == MFI->getObjectIndexBegin() && "Slot for EBP register must be last in order to be found!"); FrameIdx = 0; } } -/// emitSPUpdate - Emit a series of instructions to increment / decrement the -/// stack pointer by a constant value. -static -void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, - unsigned StackPtr, int64_t NumBytes, bool Is64Bit, - const TargetInstrInfo &TII) { - bool isSub = NumBytes < 0; - uint64_t Offset = isSub ? -NumBytes : NumBytes; - unsigned Opc = isSub - ? ((Offset < 128) ? - (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : - (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) - : ((Offset < 128) ? - (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : - (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); - uint64_t Chunk = (1LL << 31) - 1; - DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() : - DebugLoc::getUnknownLoc()); - - while (Offset) { - uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; - MachineInstr *MI = - BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) - .addReg(StackPtr).addImm(ThisVal); - // The EFLAGS implicit def is dead. - MI->getOperand(3).setIsDead(); - Offset -= ThisVal; - } -} - -// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. -static -void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, - unsigned StackPtr, uint64_t *NumBytes = NULL) { - if (MBBI == MBB.begin()) return; - - MachineBasicBlock::iterator PI = prior(MBBI); - unsigned Opc = PI->getOpcode(); - if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || - Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && - PI->getOperand(0).getReg() == StackPtr) { - if (NumBytes) - *NumBytes += PI->getOperand(2).getImm(); - MBB.erase(PI); - } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || - Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && - PI->getOperand(0).getReg() == StackPtr) { - if (NumBytes) - *NumBytes -= PI->getOperand(2).getImm(); - MBB.erase(PI); - } -} - -// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. -static -void mergeSPUpdatesDown(MachineBasicBlock &MBB, - MachineBasicBlock::iterator &MBBI, - unsigned StackPtr, uint64_t *NumBytes = NULL) { - return; - - if (MBBI == MBB.end()) return; - - MachineBasicBlock::iterator NI = next(MBBI); - if (NI == MBB.end()) return; - - unsigned Opc = NI->getOpcode(); - if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || - Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && - NI->getOperand(0).getReg() == StackPtr) { - if (NumBytes) - *NumBytes -= NI->getOperand(2).getImm(); - MBB.erase(NI); - MBBI = NI; - } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || - Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && - NI->getOperand(0).getReg() == StackPtr) { - if (NumBytes) - *NumBytes += NI->getOperand(2).getImm(); - MBB.erase(NI); - MBBI = NI; - } -} - -/// mergeSPUpdates - Checks the instruction before/after the passed -/// instruction. If it is an ADD/SUB instruction it is deleted -/// argument and the stack adjustment is returned as a positive value for ADD -/// and a negative for SUB. -static int mergeSPUpdates(MachineBasicBlock &MBB, - MachineBasicBlock::iterator &MBBI, - unsigned StackPtr, - bool doMergeWithPrevious) { - - if ((doMergeWithPrevious && MBBI == MBB.begin()) || - (!doMergeWithPrevious && MBBI == MBB.end())) - return 0; - - int Offset = 0; - - MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; - MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI); - unsigned Opc = PI->getOpcode(); - if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || - Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && - PI->getOperand(0).getReg() == StackPtr){ - Offset += PI->getOperand(2).getImm(); - MBB.erase(PI); - if (!doMergeWithPrevious) MBBI = NI; - } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || - Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && - PI->getOperand(0).getReg() == StackPtr) { - Offset -= PI->getOperand(2).getImm(); - MBB.erase(PI); - if (!doMergeWithPrevious) MBBI = NI; - } - - return Offset; -} - -void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF, - unsigned LabelId, - unsigned FramePtr) const { - MachineFrameInfo *MFI = MF.getFrameInfo(); - MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); - if (!MMI) return; - - // Add callee saved registers to move list. - const std::vector &CSI = MFI->getCalleeSavedInfo(); - if (CSI.empty()) return; - - std::vector &Moves = MMI->getFrameMoves(); - const TargetData *TD = MF.getTarget().getTargetData(); - bool HasFP = hasFP(MF); - - // Calculate amount of bytes used for return address storing - int stackGrowth = - (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == - TargetFrameInfo::StackGrowsUp ? - TD->getPointerSize() : -TD->getPointerSize()); - - // FIXME: This is dirty hack. The code itself is pretty mess right now. - // It should be rewritten from scratch and generalized sometimes. - - // Determine maximum offset (minumum due to stack growth) - int64_t MaxOffset = 0; - for (std::vector::const_iterator - I = CSI.begin(), E = CSI.end(); I != E; ++I) - MaxOffset = std::min(MaxOffset, - MFI->getObjectOffset(I->getFrameIdx())); - - // Calculate offsets. - int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; - for (std::vector::const_iterator - I = CSI.begin(), E = CSI.end(); I != E; ++I) { - int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); - unsigned Reg = I->getReg(); - Offset = MaxOffset - Offset + saveAreaOffset; - - MachineLocation CSDst(MachineLocation::VirtualFP, Offset); - MachineLocation CSSrc(Reg); - Moves.push_back(MachineMove(LabelId, CSDst, CSSrc)); - } -} - -void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { - MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB - MachineFrameInfo *MFI = MF.getFrameInfo(); - const Function* Fn = MF.getFunction(); - const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget(); - MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); - X86MachineFunctionInfo *X86FI = MF.getInfo(); - MachineBasicBlock::iterator MBBI = MBB.begin(); - bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) || - !Fn->doesNotThrow() || - UnwindTablesMandatory; - bool HasFP = hasFP(MF); - DebugLoc DL; - - // Get the number of bytes to allocate from the FrameInfo. - uint64_t StackSize = MFI->getStackSize(); - - // Get desired stack alignment - uint64_t MaxAlign = MFI->getMaxAlignment(); - - // Add RETADDR move area to callee saved frame size. - int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); - if (TailCallReturnAddrDelta < 0) - X86FI->setCalleeSavedFrameSize( - X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta)); - - // If this is x86-64 and the Red Zone is not disabled, if we are a leaf - // function, and use up to 128 bytes of stack space, don't have a frame - // pointer, calls, or dynamic alloca then we do not need to adjust the - // stack pointer (we fit in the Red Zone). - bool DisableRedZone = Fn->hasFnAttr(Attribute::NoRedZone); - if (Is64Bit && !DisableRedZone && - !needsStackRealignment(MF) && - !MFI->hasVarSizedObjects() && // No dynamic alloca. - !MFI->hasCalls() && // No calls. - !Subtarget->isTargetWin64()) { // Win64 has no Red Zone - uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); - if (HasFP) MinSize += SlotSize; - StackSize = std::max(MinSize, - StackSize > 128 ? StackSize - 128 : 0); - MFI->setStackSize(StackSize); - } - - // Insert stack pointer adjustment for later moving of return addr. Only - // applies to tail call optimized functions where the callee argument stack - // size is bigger than the callers. - if (TailCallReturnAddrDelta < 0) { - MachineInstr *MI = - BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), - StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta); - // The EFLAGS implicit def is dead. - MI->getOperand(3).setIsDead(); - } - - // uint64_t StackSize = MFI->getStackSize(); - std::vector &Moves = MMI->getFrameMoves(); - const TargetData *TD = MF.getTarget().getTargetData(); - int stackGrowth = - (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == - TargetFrameInfo::StackGrowsUp ? - TD->getPointerSize() : -TD->getPointerSize()); - - uint64_t NumBytes = 0; - if (HasFP) { - // Calculate required stack adjustment - uint64_t FrameSize = StackSize - SlotSize; - if (needsStackRealignment(MF)) - FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; - - NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); - - // Get the offset of the stack slot for the EBP register, which is - // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. - // Update the frame offset adjustment. - MFI->setOffsetAdjustment(-NumBytes); - - // Save EBP/RBP into the appropriate stack slot... - BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) - .addReg(FramePtr, RegState::Kill); - - if (needsFrameMoves) { - // Mark effective beginning of when frame pointer becomes valid. - unsigned FrameLabelId = MMI->NextLabelID(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); - - // Define the current CFA rule to use the provided offset. - if (StackSize) { - MachineLocation SPDst(MachineLocation::VirtualFP); - MachineLocation SPSrc(MachineLocation::VirtualFP, - HasFP ? 2 * stackGrowth : - -StackSize + stackGrowth); - Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); - } else { - // FIXME: Verify & implement for FP - MachineLocation SPDst(StackPtr); - MachineLocation SPSrc(StackPtr, stackGrowth); - Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); - } - - // Change the rule for the FramePtr to be an "offset" rule. - MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); - MachineLocation FPSrc(FramePtr); - Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); - } - - // Update EBP with the new base value... - BuildMI(MBB, MBBI, DL, - TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) - .addReg(StackPtr); - - if (needsFrameMoves) { - unsigned FrameLabelId = MMI->NextLabelID(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); - - // Define the current CFA to use the EBP/RBP register. - MachineLocation FPDst(FramePtr); - MachineLocation FPSrc(MachineLocation::VirtualFP); - Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc)); - } - - // Mark the FramePtr as live-in in every block except the entry. - for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); - I != E; ++I) - I->addLiveIn(FramePtr); - - // Realign stack - if (needsStackRealignment(MF)) { - MachineInstr *MI = - BuildMI(MBB, MBBI, DL, - TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), - StackPtr).addReg(StackPtr).addImm(-MaxAlign); - - // The EFLAGS implicit def is dead. - MI->getOperand(3).setIsDead(); - } - } else { - NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); - } - - // Skip the callee-saved push instructions. - bool RegsSaved = false; - while (MBBI != MBB.end() && - (MBBI->getOpcode() == X86::PUSH32r || - MBBI->getOpcode() == X86::PUSH64r)) { - RegsSaved = true; - ++MBBI; - } - - if (RegsSaved && needsFrameMoves) { - // Mark end of callee-saved push instructions. - unsigned LabelId = MMI->NextLabelID(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); - - // Emit DWARF info specifying the offsets of the callee-saved registers. - emitCalleeSavedFrameMoves(MF, LabelId, HasFP ? FramePtr : StackPtr); - } - - if (MBBI != MBB.end()) - DL = MBBI->getDebugLoc(); - - // Adjust stack pointer: ESP -= numbytes. - if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { - // Check, whether EAX is livein for this function. - bool isEAXAlive = false; - for (MachineRegisterInfo::livein_iterator - II = MF.getRegInfo().livein_begin(), - EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) { - unsigned Reg = II->first; - isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || - Reg == X86::AH || Reg == X86::AL); - } - - // Function prologue calls _alloca to probe the stack when allocating more - // than 4k bytes in one go. Touching the stack at 4K increments is necessary - // to ensure that the guard pages used by the OS virtual memory manager are - // allocated in correct sequence. - if (!isEAXAlive) { - BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) - .addImm(NumBytes); - BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) - .addExternalSymbol("_alloca"); - } else { - // Save EAX - BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) - .addReg(X86::EAX, RegState::Kill); - - // Allocate NumBytes-4 bytes on stack. We'll also use 4 already - // allocated bytes for EAX. - BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) - .addImm(NumBytes - 4); - BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) - .addExternalSymbol("_alloca"); - - // Restore EAX - MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), - X86::EAX), - StackPtr, false, NumBytes - 4); - MBB.insert(MBBI, MI); - } - } else if (NumBytes) { - // If there is an SUB32ri of ESP immediately before this instruction, merge - // the two. This can be the case when tail call elimination is enabled and - // the callee has more arguments then the caller. - NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); - - // If there is an ADD32ri or SUB32ri of ESP immediately after this - // instruction, merge the two instructions. - mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); - - if (NumBytes) - emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); - } - - if (!HasFP && needsFrameMoves && NumBytes) { - // Mark end of stack pointer adjustment. - unsigned LabelId = MMI->NextLabelID(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId); - - // Define the current CFA rule to use the provided offset. - if (StackSize) { - MachineLocation SPDst(MachineLocation::VirtualFP); - MachineLocation SPSrc(MachineLocation::VirtualFP, - -StackSize + stackGrowth); - Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); - } else { - // FIXME: Verify & implement for FP - MachineLocation SPDst(StackPtr); - MachineLocation SPSrc(StackPtr, stackGrowth); - Moves.push_back(MachineMove(LabelId, SPDst, SPSrc)); - } - } -} - -void X86RegisterInfo::emitEpilogue(MachineFunction &MF, - MachineBasicBlock &MBB) const { - const MachineFrameInfo *MFI = MF.getFrameInfo(); - X86MachineFunctionInfo *X86FI = MF.getInfo(); - MachineBasicBlock::iterator MBBI = prior(MBB.end()); - unsigned RetOpcode = MBBI->getOpcode(); - DebugLoc DL = MBBI->getDebugLoc(); - - switch (RetOpcode) { - case X86::RET: - case X86::RETI: - case X86::TCRETURNdi: - case X86::TCRETURNri: - case X86::TCRETURNri64: - case X86::TCRETURNdi64: - case X86::EH_RETURN: - case X86::EH_RETURN64: - case X86::TAILJMPd: - case X86::TAILJMPr: - case X86::TAILJMPm: break; // These are ok - default: - llvm_unreachable("Can only insert epilog into returning blocks"); - } - - // Get the number of bytes to allocate from the FrameInfo - uint64_t StackSize = MFI->getStackSize(); - uint64_t MaxAlign = MFI->getMaxAlignment(); - unsigned CSSize = X86FI->getCalleeSavedFrameSize(); - uint64_t NumBytes = 0; - - if (hasFP(MF)) { - // Calculate required stack adjustment - uint64_t FrameSize = StackSize - SlotSize; - if (needsStackRealignment(MF)) - FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; - - NumBytes = FrameSize - CSSize; - - // pop EBP. - BuildMI(MBB, MBBI, DL, - TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); - } else { - NumBytes = StackSize - CSSize; - } - - // Skip the callee-saved pop instructions. - MachineBasicBlock::iterator LastCSPop = MBBI; - while (MBBI != MBB.begin()) { - MachineBasicBlock::iterator PI = prior(MBBI); - unsigned Opc = PI->getOpcode(); - if (Opc != X86::POP32r && Opc != X86::POP64r && - !PI->getDesc().isTerminator()) - break; - --MBBI; - } - - DL = MBBI->getDebugLoc(); - - // If there is an ADD32ri or SUB32ri of ESP immediately before this - // instruction, merge the two instructions. - if (NumBytes || MFI->hasVarSizedObjects()) - mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); - - // If dynamic alloca is used, then reset esp to point to the last callee-saved - // slot before popping them off! Same applies for the case, when stack was - // realigned - if (needsStackRealignment(MF)) { - // We cannot use LEA here, because stack pointer was realigned. We need to - // deallocate local frame back - if (CSSize) { - emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); - MBBI = prior(LastCSPop); - } - - BuildMI(MBB, MBBI, DL, - TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), - StackPtr).addReg(FramePtr); - } else if (MFI->hasVarSizedObjects()) { - if (CSSize) { - unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; - MachineInstr *MI = addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), - FramePtr, false, -CSSize); - MBB.insert(MBBI, MI); - } else - BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), - StackPtr).addReg(FramePtr); - - } else { - // adjust stack pointer back: ESP += numbytes - if (NumBytes) - emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); - } - - // We're returning from function via eh_return. - if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { - MBBI = prior(MBB.end()); - MachineOperand &DestAddr = MBBI->getOperand(0); - assert(DestAddr.isReg() && "Offset should be in register!"); - BuildMI(MBB, MBBI, DL, - TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), - StackPtr).addReg(DestAddr.getReg()); - // Tail call return: adjust the stack pointer and jump to callee - } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || - RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) { - MBBI = prior(MBB.end()); - MachineOperand &JumpTarget = MBBI->getOperand(0); - MachineOperand &StackAdjust = MBBI->getOperand(1); - assert(StackAdjust.isImm() && "Expecting immediate value."); - - // Adjust stack pointer. - int StackAdj = StackAdjust.getImm(); - int MaxTCDelta = X86FI->getTCReturnAddrDelta(); - int Offset = 0; - assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); - // Incoporate the retaddr area. - Offset = StackAdj-MaxTCDelta; - assert(Offset >= 0 && "Offset should never be negative"); - - if (Offset) { - // Check for possible merge with preceeding ADD instruction. - Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); - emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); - } - - // Jump to label or value in register. - if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) - BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)). - addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); - else if (RetOpcode== X86::TCRETURNri64) - BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); - else - BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); - - // Delete the pseudo instruction TCRETURN. - MBB.erase(MBBI); - } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && - (X86FI->getTCReturnAddrDelta() < 0)) { - // Add the return addr area delta back since we are not tail calling. - int delta = -1*X86FI->getTCReturnAddrDelta(); - MBBI = prior(MBB.end()); - // Check for possible merge with preceeding ADD instruction. - delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); - emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII); - } -} - unsigned X86RegisterInfo::getRARegister() const { - if (Is64Bit) - return X86::RIP; // Should have dwarf #16 - else - return X86::EIP; // Should have dwarf #8 -} - -unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const { - return hasFP(MF) ? FramePtr : StackPtr; + return Is64Bit ? X86::RIP // Should have dwarf #16. + : X86::EIP; // Should have dwarf #8. } -void X86RegisterInfo::getInitialFrameState(std::vector &Moves) - const { - // Calculate amount of bytes used for return address storing - int stackGrowth = (Is64Bit ? -8 : -4); - - // Initial state of the frame pointer is esp+4. - MachineLocation Dst(MachineLocation::VirtualFP); - MachineLocation Src(StackPtr, stackGrowth); - Moves.push_back(MachineMove(0, Dst, Src)); - - // Add return address to move list - MachineLocation CSDst(StackPtr, stackGrowth); - MachineLocation CSSrc(getRARegister()); - Moves.push_back(MachineMove(0, CSDst, CSSrc)); +unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { + const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); + return TFI->hasFP(MF) ? FramePtr : StackPtr; } unsigned X86RegisterInfo::getEHExceptionRegister() const { @@ -1192,8 +670,8 @@ unsigned X86RegisterInfo::getEHHandlerRegister() const { } namespace llvm { -unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) { - switch (VT.getSimpleVT()) { +unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { + switch (VT.getSimpleVT().SimpleTy) { default: return Reg; case MVT::i8: if (High) { @@ -1362,36 +840,44 @@ unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) { #include "X86GenRegisterInfo.inc" namespace { - struct VISIBILITY_HIDDEN MSAC : public MachineFunctionPass { + struct MSAH : public MachineFunctionPass { static char ID; - MSAC() : MachineFunctionPass(&ID) {} + MSAH() : MachineFunctionPass(ID) {} virtual bool runOnMachineFunction(MachineFunction &MF) { - MachineFrameInfo *FFI = MF.getFrameInfo(); + const X86TargetMachine *TM = + static_cast(&MF.getTarget()); + const X86RegisterInfo *X86RI = TM->getRegisterInfo(); MachineRegisterInfo &RI = MF.getRegInfo(); + X86MachineFunctionInfo *FuncInfo = MF.getInfo(); + unsigned StackAlignment = X86RI->getStackAlignment(); - // Calculate max stack alignment of all already allocated stack objects. - unsigned MaxAlign = calculateMaxStackAlignment(FFI); - - // Be over-conservative: scan over all vreg defs and find, whether vector - // registers are used. If yes - there is probability, that vector register - // will be spilled and thus stack needs to be aligned properly. + // Be over-conservative: scan over all vreg defs and find whether vector + // registers are used. If yes, there is a possibility that vector register + // will be spilled and thus require dynamic stack realignment. for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; RegNum < RI.getLastVirtReg(); ++RegNum) - MaxAlign = std::max(MaxAlign, RI.getRegClass(RegNum)->getAlignment()); - - FFI->setMaxAlignment(MaxAlign); + if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) { + FuncInfo->setReserveFP(true); + return true; + } + // Nothing to do return false; } virtual const char *getPassName() const { - return "X86 Maximal Stack Alignment Calculator"; + return "X86 Maximal Stack Alignment Check"; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); } }; - char MSAC::ID = 0; + char MSAH::ID = 0; } FunctionPass* -llvm::createX86MaxStackAlignmentCalculatorPass() { return new MSAC(); } +llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }