X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86RegisterInfo.cpp;h=310290213f72084971d4a364ea61ab874a7c4801;hb=92722533819ab838d958966d0e40a60030bb3c16;hp=d618ffdeb78c43ab0c476052772c9958ad778868;hpb=184793fc8a9cf6ecc9147468bbcc068f472b8517;p=oota-llvm.git diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index d618ffdeb78..310290213f7 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -42,7 +42,12 @@ using namespace llvm; X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii) - : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP), + : X86GenRegisterInfo(tm.getSubtarget().is64Bit() ? + X86::ADJCALLSTACKDOWN64 : + X86::ADJCALLSTACKDOWN32, + tm.getSubtarget().is64Bit() ? + X86::ADJCALLSTACKUP64 : + X86::ADJCALLSTACKUP32), TM(tm), TII(tii) { // Cache some information. const X86Subtarget *Subtarget = &TM.getSubtarget(); @@ -146,6 +151,14 @@ unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { } } +const TargetRegisterClass *X86RegisterInfo::getPointerRegClass() const { + const X86Subtarget *Subtarget = &TM.getSubtarget(); + if (Subtarget->is64Bit()) + return &X86::GR64RegClass; + else + return &X86::GR32RegClass; +} + const TargetRegisterClass * X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { if (RC == &X86::CCRRegClass) { @@ -258,16 +271,30 @@ X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); + // Set the stack-pointer register and its aliases as reserved. Reserved.set(X86::RSP); Reserved.set(X86::ESP); Reserved.set(X86::SP); Reserved.set(X86::SPL); + // Set the frame-pointer register and its aliases as reserved if needed. if (hasFP(MF)) { Reserved.set(X86::RBP); Reserved.set(X86::EBP); Reserved.set(X86::BP); Reserved.set(X86::BPL); } + // Mark the x87 stack registers as reserved, since they don't + // behave normally with respect to liveness. We don't fully + // model the effects of x87 stack pushes and pops after + // stackification. + Reserved.set(X86::ST0); + Reserved.set(X86::ST1); + Reserved.set(X86::ST2); + Reserved.set(X86::ST3); + Reserved.set(X86::ST4); + Reserved.set(X86::ST5); + Reserved.set(X86::ST6); + Reserved.set(X86::ST7); return Reserved; } @@ -305,7 +332,7 @@ bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { } bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { - const MachineFrameInfo *MFI = MF.getFrameInfo();; + const MachineFrameInfo *MFI = MF.getFrameInfo(); // FIXME: Currently we don't support stack realignment for functions with // variable-sized allocas @@ -330,6 +357,7 @@ X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const { else { unsigned Align = MF.getFrameInfo()->getObjectAlignment(FI); assert( (-(Offset + StackSize)) % Align == 0); + Align = 0; return Offset + StackSize; } @@ -367,11 +395,12 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, Amount = (Amount+StackAlign-1)/StackAlign*StackAlign; MachineInstr *New = 0; - if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) { - New = BuildMI(MF, TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), + if (Old->getOpcode() == getCallFrameSetupOpcode()) { + New = BuildMI(MF, Old->getDebugLoc(), + TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), StackPtr).addReg(StackPtr).addImm(Amount); } else { - assert(Old->getOpcode() == X86::ADJCALLSTACKUP); + assert(Old->getOpcode() == getCallFrameDestroyOpcode()); // factor out the amount the callee already popped. uint64_t CalleeAmt = Old->getOperand(1).getImm(); Amount -= CalleeAmt; @@ -379,15 +408,20 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, unsigned Opc = (Amount < 128) ? (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); - New = BuildMI(MF, TII.get(Opc), StackPtr) + New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr) .addReg(StackPtr).addImm(Amount); } } - // Replace the pseudo instruction with a new instruction... - if (New) MBB.insert(I, New); + if (New) { + // The EFLAGS implicit def is dead. + New->getOperand(3).setIsDead(); + + // Replace the pseudo instruction with a new instruction... + MBB.insert(I, New); + } } - } else if (I->getOpcode() == X86::ADJCALLSTACKUP) { + } else if (I->getOpcode() == getCallFrameDestroyOpcode()) { // If we are performing frame pointer elimination and if the callee pops // something off the stack pointer, add it back. We do this until we have // more advanced stack pointer tracking ability. @@ -395,8 +429,13 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, unsigned Opc = (CalleeAmt < 128) ? (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); + MachineInstr *Old = I; MachineInstr *New = - BuildMI(MF, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(CalleeAmt); + BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), + StackPtr).addReg(StackPtr).addImm(CalleeAmt); + // The EFLAGS implicit def is dead. + New->getOperand(3).setIsDead(); + MBB.insert(I, New); } } @@ -411,7 +450,7 @@ void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, unsigned i = 0; MachineInstr &MI = *II; MachineFunction &MF = *MI.getParent()->getParent(); - while (!MI.getOperand(i).isFrameIndex()) { + while (!MI.getOperand(i).isFI()) { ++i; assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); } @@ -428,12 +467,19 @@ void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // FrameIndex with base register with EBP. Add an offset to the offset. MI.getOperand(i).ChangeToRegister(BasePtr, false); - // Now add the frame object offset to the offset from EBP. Offset is a - // 32-bit integer. - int Offset = getFrameIndexOffset(MF, FrameIndex) + - (int)(MI.getOperand(i+3).getImm()); - - MI.getOperand(i+3).ChangeToImmediate(Offset); + // Now add the frame object offset to the offset from EBP. + if (MI.getOperand(i+3).isImm()) { + // Offset is a 32-bit integer. + int Offset = getFrameIndexOffset(MF, FrameIndex) + + (int)(MI.getOperand(i+3).getImm()); + + MI.getOperand(i+3).ChangeToImmediate(Offset); + } else { + // Offset is symbolic. This is extremely rare. + uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) + + (uint64_t)MI.getOperand(i+3).getOffset(); + MI.getOperand(i+3).setOffset(Offset); + } } void @@ -476,6 +522,7 @@ X86RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const{ TailCallReturnAddrDelta); assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && "Slot for EBP register must be last in order to be found!"); + FrameIdx = 0; } } @@ -495,10 +542,16 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); uint64_t Chunk = (1LL << 31) - 1; + DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() : + DebugLoc::getUnknownLoc()); while (Offset) { uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; - BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal); + MachineInstr *MI = + BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) + .addReg(StackPtr).addImm(ThisVal); + // The EFLAGS implicit def is dead. + MI->getOperand(3).setIsDead(); Offset -= ThisVal; } } @@ -676,11 +729,15 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) || !Fn->doesNotThrow() || UnwindTablesMandatory; + DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() : + DebugLoc::getUnknownLoc()); + // Prepare for frame info. unsigned FrameLabelId = 0; // Get the number of bytes to allocate from the FrameInfo. uint64_t StackSize = MFI->getStackSize(); + // Get desired stack alignment uint64_t MaxAlign = MFI->getMaxAlignment(); @@ -690,12 +747,30 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { X86FI->setCalleeSavedFrameSize( X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta)); + // If this is x86-64 and the Red Zone is not disabled, if we are a leaf + // function, and use up to 128 bytes of stack space, don't have a frame + // pointer, calls, or dynamic alloca then we do not need to adjust the + // stack pointer (we fit in the Red Zone). + if (Is64Bit && !DisableRedZone && + !needsStackRealignment(MF) && + !MFI->hasVarSizedObjects() && // No dynamic alloca. + !MFI->hasCalls()) { // No calls. + uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); + if (hasFP(MF)) MinSize += SlotSize; + StackSize = std::max(MinSize, + StackSize > 128 ? StackSize - 128 : 0); + MFI->setStackSize(StackSize); + } + // Insert stack pointer adjustment for later moving of return addr. Only // applies to tail call optimized functions where the callee argument stack // size is bigger than the callers. if (TailCallReturnAddrDelta < 0) { - BuildMI(MBB, MBBI, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), - StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta); + MachineInstr *MI = + BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri), + StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta); + // The EFLAGS implicit def is dead. + MI->getOperand(3).setIsDead(); } uint64_t NumBytes = 0; @@ -713,32 +788,43 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { MFI->setOffsetAdjustment(-NumBytes); // Save EBP into the appropriate stack slot... - BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) - .addReg(FramePtr); + BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) + .addReg(FramePtr, /*isDef=*/false, /*isImp=*/false, /*isKill=*/true); if (needsFrameMoves) { // Mark effective beginning of when frame pointer becomes valid. FrameLabelId = MMI->NextLabelID(); - BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); + BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId); } // Update EBP with the new base value... - BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) - .addReg(StackPtr); + BuildMI(MBB, MBBI, DL, + TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) + .addReg(StackPtr); + + // Mark the FramePtr as live-in in every block except the entry. + for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); + I != E; ++I) + I->addLiveIn(FramePtr); // Realign stack - if (needsStackRealignment(MF)) - BuildMI(MBB, MBBI, - TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), - StackPtr).addReg(StackPtr).addImm(-MaxAlign); - } else + if (needsStackRealignment(MF)) { + MachineInstr *MI = + BuildMI(MBB, MBBI, DL, + TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), + StackPtr).addReg(StackPtr).addImm(-MaxAlign); + // The EFLAGS implicit def is dead. + MI->getOperand(3).setIsDead(); + } + } else { NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); + } unsigned ReadyLabelId = 0; if (needsFrameMoves) { // Mark effective beginning of when frame pointer is ready. ReadyLabelId = MMI->NextLabelID(); - BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(ReadyLabelId); + BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(ReadyLabelId); } // Skip the callee-saved push instructions. @@ -747,6 +833,9 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { MBBI->getOpcode() == X86::PUSH64r)) ++MBBI; + if (MBBI != MBB.end()) + DL = MBBI->getDebugLoc(); + if (NumBytes) { // adjust stack pointer: ESP -= numbytes if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { // Check, whether EAX is livein for this function @@ -764,19 +853,23 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { // necessary to ensure that the guard pages used by the OS virtual memory // manager are allocated in correct sequence. if (!isEAXAlive) { - BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes); - BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) + BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) + .addImm(NumBytes); + BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) .addExternalSymbol("_alloca"); } else { // Save EAX - BuildMI(MBB, MBBI, TII.get(X86::PUSH32r), X86::EAX); + BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) + .addReg(X86::EAX, /*isDef=*/false, /*isImp=*/false, /*isKill=*/true); // Allocate NumBytes-4 bytes on stack. We'll also use 4 already // allocated bytes for EAX. - BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4); - BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)) + BuildMI(MBB, MBBI, DL, + TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4); + BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) .addExternalSymbol("_alloca"); // Restore EAX - MachineInstr *MI = addRegOffset(BuildMI(MF, TII.get(X86::MOV32rm),X86::EAX), + MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), + X86::EAX), StackPtr, false, NumBytes-4); MBB.insert(MBBI, MI); } @@ -804,6 +897,7 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, X86MachineFunctionInfo *X86FI = MF.getInfo(); MachineBasicBlock::iterator MBBI = prior(MBB.end()); unsigned RetOpcode = MBBI->getOpcode(); + DebugLoc DL = MBBI->getDebugLoc(); switch (RetOpcode) { case X86::RET: @@ -836,9 +930,11 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, NumBytes = FrameSize - CSSize; // pop EBP. - BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); - } else + BuildMI(MBB, MBBI, DL, + TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); + } else { NumBytes = StackSize - CSSize; + } // Skip the callee-saved pop instructions. MachineBasicBlock::iterator LastCSPop = MBBI; @@ -851,6 +947,8 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, --MBBI; } + DL = MBBI->getDebugLoc(); + // If there is an ADD32ri or SUB32ri of ESP immediately before this // instruction, merge the two instructions. if (NumBytes || MFI->hasVarSizedObjects()) @@ -867,17 +965,17 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, MBBI = prior(LastCSPop); } - BuildMI(MBB, MBBI, + BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr).addReg(FramePtr); } else if (MFI->hasVarSizedObjects()) { if (CSSize) { unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; - MachineInstr *MI = addRegOffset(BuildMI(MF, TII.get(Opc), StackPtr), + MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), FramePtr, false, -CSSize); MBB.insert(MBBI, MI); } else - BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), + BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr).addReg(FramePtr); } else { @@ -890,8 +988,8 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { MBBI = prior(MBB.end()); MachineOperand &DestAddr = MBBI->getOperand(0); - assert(DestAddr.isRegister() && "Offset should be in register!"); - BuildMI(MBB, MBBI, + assert(DestAddr.isReg() && "Offset should be in register!"); + BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr).addReg(DestAddr.getReg()); // Tail call return: adjust the stack pointer and jump to callee @@ -900,7 +998,7 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, MBBI = prior(MBB.end()); MachineOperand &JumpTarget = MBBI->getOperand(0); MachineOperand &StackAdjust = MBBI->getOperand(1); - assert( StackAdjust.isImmediate() && "Expecting immediate value."); + assert(StackAdjust.isImm() && "Expecting immediate value."); // Adjust stack pointer. int StackAdj = StackAdjust.getImm(); @@ -910,19 +1008,22 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, // Incoporate the retaddr area. Offset = StackAdj-MaxTCDelta; assert(Offset >= 0 && "Offset should never be negative"); + if (Offset) { // Check for possible merge with preceeding ADD instruction. Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII); } + // Jump to label or value in register. if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) - BuildMI(MBB, MBBI, TII.get(X86::TAILJMPd)). + BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)). addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); - else if (RetOpcode== X86::TCRETURNri64) { - BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); - } else - BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr), JumpTarget.getReg()); + else if (RetOpcode== X86::TCRETURNri64) + BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg()); + else + BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg()); + // Delete the pseudo instruction TCRETURN. MBB.erase(MBBI); } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&