X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const TargetInstrInfo &tii)
- : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP),
+ : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
+ X86::ADJCALLSTACKDOWN64 :
+ X86::ADJCALLSTACKDOWN32,
+ tm.getSubtarget<X86Subtarget>().is64Bit() ?
+ X86::ADJCALLSTACKUP64 :
+ X86::ADJCALLSTACKUP32),
TM(tm), TII(tii) {
// Cache some information.
const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
}
}
+const TargetRegisterClass *X86RegisterInfo::getPointerRegClass() const {
+ const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
+ if (Subtarget->is64Bit())
+ return &X86::GR64RegClass;
+ else
+ return &X86::GR32RegClass;
+}
+
const TargetRegisterClass *
X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
if (RC == &X86::CCRRegClass) {
const unsigned *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ bool callsEHReturn = false;
+
+ if (MF) {
+ const MachineFrameInfo *MFI = MF->getFrameInfo();
+ const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+ callsEHReturn = (MMI ? MMI->callsEHReturn() : false);
+ }
+
static const unsigned CalleeSavedRegs32Bit[] = {
X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
};
X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
};
+ static const unsigned CalleeSavedRegs64EHRet[] = {
+ X86::RAX, X86::RDX, X86::RBX, X86::R12,
+ X86::R13, X86::R14, X86::R15, X86::RBP, 0
+ };
+
static const unsigned CalleeSavedRegsWin64[] = {
- X86::RBX, X86::RBP, X86::RDI, X86::RSI,
- X86::R12, X86::R13, X86::R14, X86::R15, 0
+ X86::RBX, X86::RBP, X86::RDI, X86::RSI,
+ X86::R12, X86::R13, X86::R14, X86::R15,
+ X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
+ X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
+ X86::XMM14, X86::XMM15, 0
};
if (Is64Bit) {
if (IsWin64)
return CalleeSavedRegsWin64;
else
- return CalleeSavedRegs64Bit;
+ return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
} else {
- if (MF) {
- const MachineFrameInfo *MFI = MF->getFrameInfo();
- const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
- if (MMI && MMI->callsEHReturn())
- return CalleeSavedRegs32EHRet;
- }
- return CalleeSavedRegs32Bit;
+ return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
}
}
const TargetRegisterClass* const*
X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
+ bool callsEHReturn = false;
+
+ if (MF) {
+ const MachineFrameInfo *MFI = MF->getFrameInfo();
+ const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+ callsEHReturn = (MMI ? MMI->callsEHReturn() : false);
+ }
+
static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = {
&X86::GR32RegClass, &X86::GR32RegClass,
&X86::GR32RegClass, &X86::GR32RegClass, 0
&X86::GR64RegClass, &X86::GR64RegClass,
&X86::GR64RegClass, &X86::GR64RegClass, 0
};
- static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = {
+ static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = {
&X86::GR64RegClass, &X86::GR64RegClass,
&X86::GR64RegClass, &X86::GR64RegClass,
&X86::GR64RegClass, &X86::GR64RegClass,
&X86::GR64RegClass, &X86::GR64RegClass, 0
};
+ static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = {
+ &X86::GR64RegClass, &X86::GR64RegClass,
+ &X86::GR64RegClass, &X86::GR64RegClass,
+ &X86::GR64RegClass, &X86::GR64RegClass,
+ &X86::GR64RegClass, &X86::GR64RegClass,
+ &X86::VR128RegClass, &X86::VR128RegClass,
+ &X86::VR128RegClass, &X86::VR128RegClass,
+ &X86::VR128RegClass, &X86::VR128RegClass,
+ &X86::VR128RegClass, &X86::VR128RegClass,
+ &X86::VR128RegClass, &X86::VR128RegClass, 0
+ };
if (Is64Bit) {
if (IsWin64)
return CalleeSavedRegClassesWin64;
else
- return CalleeSavedRegClasses64Bit;
+ return (callsEHReturn ?
+ CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit);
} else {
- if (MF) {
- const MachineFrameInfo *MFI = MF->getFrameInfo();
- const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
- if (MMI && MMI->callsEHReturn())
- return CalleeSavedRegClasses32EHRet;
- }
- return CalleeSavedRegClasses32Bit;
+ return (callsEHReturn ?
+ CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit);
}
-
}
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ // Set the stack-pointer register and its aliases as reserved.
Reserved.set(X86::RSP);
Reserved.set(X86::ESP);
Reserved.set(X86::SP);
Reserved.set(X86::SPL);
+ // Set the frame-pointer register and its aliases as reserved if needed.
if (hasFP(MF)) {
Reserved.set(X86::RBP);
Reserved.set(X86::EBP);
Reserved.set(X86::BP);
Reserved.set(X86::BPL);
}
+ // Mark the x87 stack registers as reserved, since they don't
+ // behave normally with respect to liveness. We don't fully
+ // model the effects of x87 stack pushes and pops after
+ // stackification.
+ Reserved.set(X86::ST0);
+ Reserved.set(X86::ST1);
+ Reserved.set(X86::ST2);
+ Reserved.set(X86::ST3);
+ Reserved.set(X86::ST4);
+ Reserved.set(X86::ST5);
+ Reserved.set(X86::ST6);
+ Reserved.set(X86::ST7);
return Reserved;
}
return (NoFramePointerElim ||
needsStackRealignment(MF) ||
MFI->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken() ||
MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
(MMI && MMI->callsUnwindInit()));
}
else {
unsigned Align = MF.getFrameInfo()->getObjectAlignment(FI);
assert( (-(Offset + StackSize)) % Align == 0);
+ Align = 0;
return Offset + StackSize;
}
Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
MachineInstr *New = 0;
- if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
- New = BuildMI(MF, TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri),
+ if (Old->getOpcode() == getCallFrameSetupOpcode()) {
+ New = BuildMI(MF, Old->getDebugLoc(),
+ TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri),
StackPtr).addReg(StackPtr).addImm(Amount);
} else {
- assert(Old->getOpcode() == X86::ADJCALLSTACKUP);
+ assert(Old->getOpcode() == getCallFrameDestroyOpcode());
// factor out the amount the callee already popped.
uint64_t CalleeAmt = Old->getOperand(1).getImm();
Amount -= CalleeAmt;
unsigned Opc = (Amount < 128) ?
(Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
(Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
- New = BuildMI(MF, TII.get(Opc), StackPtr)
+ New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr)
.addReg(StackPtr).addImm(Amount);
}
}
- // Replace the pseudo instruction with a new instruction...
- if (New) MBB.insert(I, New);
+ if (New) {
+ // The EFLAGS implicit def is dead.
+ New->getOperand(3).setIsDead();
+
+ // Replace the pseudo instruction with a new instruction...
+ MBB.insert(I, New);
+ }
}
- } else if (I->getOpcode() == X86::ADJCALLSTACKUP) {
+ } else if (I->getOpcode() == getCallFrameDestroyOpcode()) {
// If we are performing frame pointer elimination and if the callee pops
// something off the stack pointer, add it back. We do this until we have
// more advanced stack pointer tracking ability.
unsigned Opc = (CalleeAmt < 128) ?
(Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
+ MachineInstr *Old = I;
MachineInstr *New =
- BuildMI(MF, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(CalleeAmt);
+ BuildMI(MF, Old->getDebugLoc(), TII.get(Opc),
+ StackPtr).addReg(StackPtr).addImm(CalleeAmt);
+ // The EFLAGS implicit def is dead.
+ New->getOperand(3).setIsDead();
+
MBB.insert(I, New);
}
}
unsigned i = 0;
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
- while (!MI.getOperand(i).isFrameIndex()) {
+ while (!MI.getOperand(i).isFI()) {
++i;
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
}
MI.getOperand(i).ChangeToRegister(BasePtr, false);
// Now add the frame object offset to the offset from EBP.
- int64_t Offset = getFrameIndexOffset(MF, FrameIndex) +
- MI.getOperand(i+3).getImm();
-
- MI.getOperand(i+3).ChangeToImmediate(Offset);
+ if (MI.getOperand(i+3).isImm()) {
+ // Offset is a 32-bit integer.
+ int Offset = getFrameIndexOffset(MF, FrameIndex) +
+ (int)(MI.getOperand(i+3).getImm());
+
+ MI.getOperand(i+3).ChangeToImmediate(Offset);
+ } else {
+ // Offset is symbolic. This is extremely rare.
+ uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) +
+ (uint64_t)MI.getOperand(i+3).getOffset();
+ MI.getOperand(i+3).setOffset(Offset);
+ }
}
void
TailCallReturnAddrDelta);
assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() &&
"Slot for EBP register must be last in order to be found!");
+ FrameIdx = 0;
}
}
while (Offset) {
uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
- BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal);
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal);
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
Offset -= ThisVal;
}
}
bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) ||
!Fn->doesNotThrow() ||
UnwindTablesMandatory;
+ DebugLoc DL = DebugLoc::getUnknownLoc();
// Prepare for frame info.
unsigned FrameLabelId = 0;
X86FI->setCalleeSavedFrameSize(
X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta));
+ // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
+ // function, and use up to 128 bytes of stack space, don't have a frame
+ // pointer, calls, or dynamic alloca then we do not need to adjust the
+ // stack pointer (we fit in the Red Zone).
+ if (Is64Bit && !DisableRedZone &&
+ !needsStackRealignment(MF) &&
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->hasCalls()) { // No calls.
+ uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
+ if (hasFP(MF)) MinSize += SlotSize;
+ StackSize = std::max(MinSize,
+ StackSize > 128 ? StackSize - 128 : 0);
+ MFI->setStackSize(StackSize);
+ }
+
// Insert stack pointer adjustment for later moving of return addr. Only
// applies to tail call optimized functions where the callee argument stack
// size is bigger than the callers.
if (TailCallReturnAddrDelta < 0) {
- BuildMI(MBB, MBBI, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri),
- StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta);
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri),
+ StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta);
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
}
uint64_t NumBytes = 0;
MFI->setOffsetAdjustment(-NumBytes);
// Save EBP into the appropriate stack slot...
- BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
- .addReg(FramePtr);
-
- if (needsFrameMoves) {
- // Mark effective beginning of when frame pointer becomes valid.
- FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
- }
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(FramePtr, /*isDef=*/false, /*isImp=*/false, /*isKill=*/true);
// Update EBP with the new base value...
- BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
- .addReg(StackPtr);
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
+ .addReg(StackPtr);
+
+ // Mark the FramePtr as live-in in every block except the entry.
+ for (MachineFunction::iterator I = next(MF.begin()), E = MF.end();
+ I != E; ++I)
+ I->addLiveIn(FramePtr);
// Realign stack
- if (needsStackRealignment(MF))
- BuildMI(MBB, MBBI,
- TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
- StackPtr).addReg(StackPtr).addImm(-MaxAlign);
+ if (needsStackRealignment(MF)) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
+ StackPtr).addReg(StackPtr).addImm(-MaxAlign);
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
} else
NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
unsigned ReadyLabelId = 0;
- if (needsFrameMoves) {
+ if (needsFrameMoves)
// Mark effective beginning of when frame pointer is ready.
ReadyLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(ReadyLabelId);
- }
// Skip the callee-saved push instructions.
while (MBBI != MBB.end() &&
// necessary to ensure that the guard pages used by the OS virtual memory
// manager are allocated in correct sequence.
if (!isEAXAlive) {
- BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
- BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32))
+ BuildMI(MBB, MBBI,DL, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
+ BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("_alloca");
} else {
// Save EAX
- BuildMI(MBB, MBBI, TII.get(X86::PUSH32r), X86::EAX);
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
+ .addReg(X86::EAX, /*isDef=*/false, /*isImp=*/false, /*isKill=*/true);
// Allocate NumBytes-4 bytes on stack. We'll also use 4 already
// allocated bytes for EAX.
- BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4);
- BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32))
+ BuildMI(MBB, MBBI, DL,
+ TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4);
+ BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("_alloca");
// Restore EAX
- MachineInstr *MI = addRegOffset(BuildMI(MF, TII.get(X86::MOV32rm),X86::EAX),
+ MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
+ X86::EAX),
StackPtr, false, NumBytes-4);
MBB.insert(MBBI, MI);
}
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = prior(MBB.end());
unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc DL = DebugLoc::getUnknownLoc();
switch (RetOpcode) {
case X86::RET:
case X86::TCRETURNri64:
case X86::TCRETURNdi64:
case X86::EH_RETURN:
+ case X86::EH_RETURN64:
case X86::TAILJMPd:
case X86::TAILJMPr:
case X86::TAILJMPm: break; // These are ok
NumBytes = FrameSize - CSSize;
// pop EBP.
- BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
} else
NumBytes = StackSize - CSSize;
MBBI = prior(LastCSPop);
}
- BuildMI(MBB, MBBI,
+ BuildMI(MBB, MBBI, DL,
TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
StackPtr).addReg(FramePtr);
} else if (MFI->hasVarSizedObjects()) {
if (CSSize) {
unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
- MachineInstr *MI = addRegOffset(BuildMI(MF, TII.get(Opc), StackPtr),
+ MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
FramePtr, false, -CSSize);
MBB.insert(MBBI, MI);
} else
- BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
StackPtr).addReg(FramePtr);
} else {
}
// We're returning from function via eh_return.
- if (RetOpcode == X86::EH_RETURN) {
+ if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
MBBI = prior(MBB.end());
MachineOperand &DestAddr = MBBI->getOperand(0);
- assert(DestAddr.isRegister() && "Offset should be in register!");
- BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr).
- addReg(DestAddr.getReg());
+ assert(DestAddr.isReg() && "Offset should be in register!");
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
+ StackPtr).addReg(DestAddr.getReg());
// Tail call return: adjust the stack pointer and jump to callee
} else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) {
MBBI = prior(MBB.end());
MachineOperand &JumpTarget = MBBI->getOperand(0);
MachineOperand &StackAdjust = MBBI->getOperand(1);
- assert( StackAdjust.isImmediate() && "Expecting immediate value.");
+ assert(StackAdjust.isImm() && "Expecting immediate value.");
// Adjust stack pointer.
int StackAdj = StackAdjust.getImm();
}
// Jump to label or value in register.
if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64)
- BuildMI(MBB, MBBI, TII.get(X86::TAILJMPd)).
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)).
addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
else if (RetOpcode== X86::TCRETURNri64) {
- BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
} else
- BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg());
// Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI);
} else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&