X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FVirtRegMap.cpp;h=253d5c16ffaf7893db7c6c20259c5edc6fcc9d5d;hb=63e3cd4e0f3731d6801ac24199652e4d7b4b3729;hp=4f3a96307cf02471e5330737223713682fffc21f;hpb=66f716354527c5ab4687a89a1605915e5128a106;p=oota-llvm.git diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index 4f3a96307cf..253d5c16ffa 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the LLVM research group and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -21,7 +21,7 @@ #include "llvm/Function.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/SSARegMap.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Support/CommandLine.h" @@ -63,24 +63,26 @@ namespace { VirtRegMap::VirtRegMap(MachineFunction &mf) : TII(*mf.getTarget().getInstrInfo()), MF(mf), Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), - Virt2ReMatIdMap(NO_STACK_SLOT), ReMatMap(NULL), - ReMatId(MAX_STACK_SLOT+1) { + Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), + Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1) { grow(); } void VirtRegMap::grow() { - unsigned LastVirtReg = MF.getSSARegMap()->getLastVirtReg(); + unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); Virt2PhysMap.grow(LastVirtReg); Virt2StackSlotMap.grow(LastVirtReg); Virt2ReMatIdMap.grow(LastVirtReg); + Virt2SplitMap.grow(LastVirtReg); + Virt2SplitKillMap.grow(LastVirtReg); ReMatMap.grow(LastVirtReg); } int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && "attempt to assign stack slot to already spilled register"); - const TargetRegisterClass* RC = MF.getSSARegMap()->getRegClass(virtReg); + const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); int frameIndex = MF.getFrameInfo()->CreateStackObject(RC->getSize(), RC->getAlignment()); Virt2StackSlotMap[virtReg] = frameIndex; @@ -89,7 +91,7 @@ int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { } void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && "attempt to assign stack slot to already spilled register"); assert((frameIndex >= 0 || @@ -99,7 +101,7 @@ void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) { } int VirtRegMap::assignVirtReMatId(unsigned virtReg) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && "attempt to assign re-mat id to already spilled register"); Virt2ReMatIdMap[virtReg] = ReMatId; @@ -107,14 +109,14 @@ int VirtRegMap::assignVirtReMatId(unsigned virtReg) { } void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && "attempt to assign re-mat id to already spilled register"); Virt2ReMatIdMap[virtReg] = id; } void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, - unsigned OpNo, MachineInstr *NewMI) { + MachineInstr *NewMI, ModRef MRInfo) { // Move previous memory references folded to new instruction. MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), @@ -123,18 +125,6 @@ void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, MI2VirtMap.erase(I++); } - ModRef MRInfo; - const TargetInstrDescriptor *TID = OldMI->getInstrDescriptor(); - if (TID->getOperandConstraint(OpNo, TOI::TIED_TO) != -1 || - TID->findTiedToSrcOperand(OpNo) != -1) { - // Folded a two-address operand. - MRInfo = isModRef; - } else if (OldMI->getOperand(OpNo).isDef()) { - MRInfo = isMod; - } else { - MRInfo = isRef; - } - // add new memory reference MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); } @@ -145,18 +135,18 @@ void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { } void VirtRegMap::print(std::ostream &OS) const { - const MRegisterInfo* MRI = MF.getTarget().getRegisterInfo(); + const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); OS << "********** REGISTER MAP **********\n"; - for (unsigned i = MRegisterInfo::FirstVirtualRegister, - e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i) { + for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, + e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) - OS << "[reg" << i << " -> " << MRI->getName(Virt2PhysMap[i]) << "]\n"; + OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) << "]\n"; } - for (unsigned i = MRegisterInfo::FirstVirtualRegister, - e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i) + for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, + e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; OS << '\n'; @@ -183,7 +173,8 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { DOUT << "********** REWRITE MACHINE CODE **********\n"; DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; const TargetMachine &TM = MF.getTarget(); - const MRegisterInfo &MRI = *TM.getRegisterInfo(); + const TargetInstrInfo &TII = *TM.getInstrInfo(); + // LoadedRegs - Keep track of which vregs are loaded, so that we only load // each vreg once (in the case where a spilled vreg is used by multiple @@ -201,32 +192,33 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (MO.isRegister() && MO.getReg()) - if (MRegisterInfo::isVirtualRegister(MO.getReg())) { + if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { unsigned VirtReg = MO.getReg(); unsigned PhysReg = VRM.getPhys(VirtReg); if (!VRM.isAssignedReg(VirtReg)) { int StackSlot = VRM.getStackSlot(VirtReg); const TargetRegisterClass* RC = - MF.getSSARegMap()->getRegClass(VirtReg); + MF.getRegInfo().getRegClass(VirtReg); if (MO.isUse() && std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) == LoadedRegs.end()) { - MRI.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); + TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); LoadedRegs.push_back(VirtReg); ++NumLoads; DOUT << '\t' << *prior(MII); } if (MO.isDef()) { - MRI.storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC); + TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, + StackSlot, RC); ++NumStores; } } - MF.setPhysRegUsed(PhysReg); + MF.getRegInfo().setPhysRegUsed(PhysReg); MI.getOperand(i).setReg(PhysReg); } else { - MF.setPhysRegUsed(MO.getReg()); + MF.getRegInfo().setPhysRegUsed(MO.getReg()); } } @@ -249,17 +241,18 @@ namespace { /// blocks that have low register pressure (the vreg may be spilled due to /// register pressure in other blocks). class VISIBILITY_HIDDEN LocalSpiller : public Spiller { - SSARegMap *RegMap; - const MRegisterInfo *MRI; + MachineRegisterInfo *RegInfo; + const TargetRegisterInfo *TRI; const TargetInstrInfo *TII; public: bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { - RegMap = MF.getSSARegMap(); - MRI = MF.getTarget().getRegisterInfo(); + RegInfo = &MF.getRegInfo(); + TRI = MF.getTarget().getRegisterInfo(); TII = MF.getTarget().getInstrInfo(); DOUT << "\n**** Local spiller rewriting function '" << MF.getFunction()->getName() << "':\n"; - DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!) ****\n"; + DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" + " ****\n"; DEBUG(MF.dump()); for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); @@ -278,6 +271,16 @@ namespace { AvailableSpills &Spills, BitVector &RegKills, std::vector &KillOps, VirtRegMap &VRM); + void SpillRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + int Idx, unsigned PhysReg, int StackSlot, + const TargetRegisterClass *RC, + bool isAvailable, MachineInstr *&LastStore, + AvailableSpills &Spills, + SmallSet &ReMatDefs, + BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM); void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); }; } @@ -295,7 +298,7 @@ namespace { /// this bit and addAvailable sets it if. namespace { class VISIBILITY_HIDDEN AvailableSpills { - const MRegisterInfo *MRI; + const TargetRegisterInfo *TRI; const TargetInstrInfo *TII; // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled @@ -313,11 +316,11 @@ class VISIBILITY_HIDDEN AvailableSpills { void ClobberPhysRegOnly(unsigned PhysReg); public: - AvailableSpills(const MRegisterInfo *mri, const TargetInstrInfo *tii) - : MRI(mri), TII(tii) { + AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) + : TRI(tri), TII(tii) { } - const MRegisterInfo *getRegInfo() const { return MRI; } + const TargetRegisterInfo *getRegInfo() const { return TRI; } /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is /// available in a physical register, return that PhysReg, otherwise @@ -347,7 +350,7 @@ public: DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; else DOUT << "Remembering SS#" << SlotOrReMat; - DOUT << " in physreg " << MRI->getName(Reg) << "\n"; + DOUT << " in physreg " << TRI->getName(Reg) << "\n"; } /// canClobberPhysReg - Return true if the spiller is allowed to change the @@ -358,7 +361,7 @@ public: "Value not available!"); return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; } - + /// disallowClobberPhysReg - Unset the CanClobber bit of the specified /// stackslot register. The register is still available but is no longer /// allowed to be modifed. @@ -388,7 +391,7 @@ void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && "Bidirectional map mismatch!"); SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; - DOUT << "PhysReg " << MRI->getName(PhysReg) + DOUT << "PhysReg " << TRI->getName(PhysReg) << " copied, it is available for use but can no longer be modified\n"; } } @@ -397,7 +400,7 @@ void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { /// stackslot register and its aliases. The register and its aliases may /// still available but is no longer allowed to be modifed. void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { - for (const unsigned *AS = MRI->getAliasSet(PhysReg); *AS; ++AS) + for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) disallowClobberPhysRegOnly(*AS); disallowClobberPhysRegOnly(PhysReg); } @@ -413,7 +416,7 @@ void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && "Bidirectional map mismatch!"); SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); - DOUT << "PhysReg " << MRI->getName(PhysReg) + DOUT << "PhysReg " << TRI->getName(PhysReg) << " clobbered, invalidating "; if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; @@ -426,7 +429,7 @@ void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { /// value. We use this to invalidate any info about stuff we thing lives in /// it and any of its aliases. void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { - for (const unsigned *AS = MRI->getAliasSet(PhysReg); *AS; ++AS) + for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) ClobberPhysRegOnly(*AS); ClobberPhysRegOnly(PhysReg); } @@ -473,6 +476,17 @@ static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, } } +/// InvalidateKill - A MI that defines the specified register is being deleted, +/// invalidate the register kill information. +static void InvalidateKill(unsigned Reg, BitVector &RegKills, + std::vector &KillOps) { + if (RegKills[Reg]) { + KillOps[Reg]->setIsKill(false); + KillOps[Reg] = NULL; + RegKills.reset(Reg); + } +} + /// InvalidateRegDef - If the def operand of the specified def MI is now dead /// (since it's spill instruction is removed), mark it isDead. Also checks if /// the def MI has other definition operands that are not dead. Returns it by @@ -523,7 +537,7 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, /// over. static void UpdateKills(MachineInstr &MI, BitVector &RegKills, std::vector &KillOps) { - const TargetInstrDescriptor *TID = MI.getInstrDescriptor(); + const TargetInstrDesc &TID = MI.getDesc(); for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isRegister() || !MO.isUse()) @@ -535,13 +549,14 @@ static void UpdateKills(MachineInstr &MI, BitVector &RegKills, if (RegKills[Reg]) { // That can't be right. Register is killed but not re-defined and it's // being reused. Let's fix that. - KillOps[Reg]->unsetIsKill(); - if (i < TID->numOperands && - TID->getOperandConstraint(i, TOI::TIED_TO) == -1) + KillOps[Reg]->setIsKill(false); + KillOps[Reg] = NULL; + RegKills.reset(Reg); + if (i < TID.getNumOperands() && + TID.getOperandConstraint(i, TOI::TIED_TO) == -1) // Unless it's a two-address operand, this is the new kill. MO.setIsKill(); } - if (MO.isKill()) { RegKills.set(Reg); KillOps[Reg] = &MO; @@ -591,8 +606,8 @@ namespace { std::vector Reuses; BitVector PhysRegsClobbered; public: - ReuseInfo(MachineInstr &mi, const MRegisterInfo *mri) : MI(mi) { - PhysRegsClobbered.resize(mri->getNumRegs()); + ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { + PhysRegsClobbered.resize(tri->getNumRegs()); } bool hasReuses() const { @@ -631,6 +646,9 @@ namespace { BitVector &RegKills, std::vector &KillOps, VirtRegMap &VRM) { + const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() + .getInstrInfo(); + if (Reuses.empty()) return PhysReg; // This is most often empty. for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { @@ -652,14 +670,14 @@ namespace { // value aliases the new register. If so, codegen the previous reload // and use this one. unsigned PRRU = Op.PhysRegReused; - const MRegisterInfo *MRI = Spills.getRegInfo(); - if (MRI->areAliases(PRRU, PhysReg)) { + const TargetRegisterInfo *TRI = Spills.getRegInfo(); + if (TRI->areAliases(PRRU, PhysReg)) { // Okay, we found out that an alias of a reused register // was used. This isn't good because it means we have // to undo a previous reuse. MachineBasicBlock *MBB = MI->getParent(); const TargetRegisterClass *AliasRC = - MBB->getParent()->getSSARegMap()->getRegClass(Op.VirtReg); + MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); // Copy Op out of the vector and remove it, we're going to insert an // explicit load for it. @@ -675,11 +693,11 @@ namespace { Rejected, RegKills, KillOps, VRM); if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { - MRI->reMaterialize(*MBB, MI, NewPhysReg, + TRI->reMaterialize(*MBB, MI, NewPhysReg, VRM.getReMaterializedMI(NewOp.VirtReg)); ++NumReMats; } else { - MRI->loadRegFromStackSlot(*MBB, MI, NewPhysReg, + TII->loadRegFromStackSlot(*MBB, MI, NewPhysReg, NewOp.StackSlotOrReMat, AliasRC); // Any stores to this stack slot are not dead anymore. MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; @@ -775,7 +793,7 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, DeadStore->findRegisterUseOperandIdx(PhysReg, true) == -1) continue; UnfoldPR = PhysReg; - UnfoldedOpc = MRI->getOpcodeAfterMemoryUnfold(MI.getOpcode(), + UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), false, true); } } @@ -788,24 +806,23 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse()) continue; unsigned VirtReg = MO.getReg(); - if (MRegisterInfo::isPhysicalRegister(VirtReg) || - RegMap->isSubRegister(VirtReg)) + if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) continue; if (VRM.isAssignedReg(VirtReg)) { unsigned PhysReg = VRM.getPhys(VirtReg); - if (PhysReg && MRI->regsOverlap(PhysReg, UnfoldPR)) + if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) return false; } else if (VRM.isReMaterialized(VirtReg)) continue; int SS = VRM.getStackSlot(VirtReg); unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); if (PhysReg) { - if (MRI->regsOverlap(PhysReg, UnfoldPR)) + if (TRI->regsOverlap(PhysReg, UnfoldPR)) return false; continue; } PhysReg = VRM.getPhys(VirtReg); - if (!MRI->regsOverlap(PhysReg, UnfoldPR)) + if (!TRI->regsOverlap(PhysReg, UnfoldPR)) continue; // Ok, we'll need to reload the value into a register which makes @@ -814,21 +831,21 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, // unfolded. This allows us to perform the store unfolding // optimization. SmallVector NewMIs; - if (MRI->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { + if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { assert(NewMIs.size() == 1); MachineInstr *NewMI = NewMIs.back(); NewMIs.clear(); - unsigned Idx = NewMI->findRegisterUseOperandIdx(VirtReg); - MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Idx, SS); + int Idx = NewMI->findRegisterUseOperandIdx(VirtReg); + assert(Idx != -1); + SmallVector Ops; + Ops.push_back(Idx); + MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); if (FoldedMI) { - if (VRM.hasPhys(UnfoldVR)) - assert(VRM.getPhys(UnfoldVR) == UnfoldPR); - else + if (!VRM.hasPhys(UnfoldVR)) VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); - VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); MII = MBB.insert(MII, FoldedMI); - VRM.RemoveFromFoldedVirtMap(&MI); + VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); return true; } @@ -838,16 +855,87 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, return false; } +/// findSuperReg - Find the SubReg's super-register of given register class +/// where its SubIdx sub-register is SubReg. +static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, + unsigned SubIdx, const TargetRegisterInfo *TRI) { + for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); + I != E; ++I) { + unsigned Reg = *I; + if (TRI->getSubReg(Reg, SubIdx) == SubReg) + return Reg; + } + return 0; +} + +/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if +/// the last store to the same slot is now dead. If so, remove the last store. +void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + int Idx, unsigned PhysReg, int StackSlot, + const TargetRegisterClass *RC, + bool isAvailable, MachineInstr *&LastStore, + AvailableSpills &Spills, + SmallSet &ReMatDefs, + BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM) { + TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); + DOUT << "Store:\t" << *next(MII); + + // If there is a dead store to this stack slot, nuke it now. + if (LastStore) { + DOUT << "Removed dead store:\t" << *LastStore; + ++NumDSE; + SmallVector KillRegs; + InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); + MachineBasicBlock::iterator PrevMII = LastStore; + bool CheckDef = PrevMII != MBB.begin(); + if (CheckDef) + --PrevMII; + MBB.erase(LastStore); + VRM.RemoveMachineInstrFromMaps(LastStore); + if (CheckDef) { + // Look at defs of killed registers on the store. Mark the defs + // as dead since the store has been deleted and they aren't + // being reused. + for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { + bool HasOtherDef = false; + if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { + MachineInstr *DeadDef = PrevMII; + if (ReMatDefs.count(DeadDef) && !HasOtherDef) { + // FIXME: This assumes a remat def does not have side + // effects. + MBB.erase(DeadDef); + VRM.RemoveMachineInstrFromMaps(DeadDef); + ++NumDRM; + } + } + } + } + } + + LastStore = next(MII); + + // If the stack slot value was previously available in some other + // register, change it now. Otherwise, make the register available, + // in PhysReg. + Spills.ModifyStackSlotOrReMat(StackSlot); + Spills.ClobberPhysReg(PhysReg); + Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); + ++NumStores; +} + /// rewriteMBB - Keep track of which spills are available even after the -/// register allocator is done with them. If possible, avoid reloading vregs. +/// register allocator is done with them. If possible, avid reloading vregs. void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { DOUT << MBB.getBasicBlock()->getName() << ":\n"; MachineFunction &MF = *MBB.getParent(); - + // Spills - Keep track of which spilled values are available in physregs so // that we can choose to reuse the physregs instead of emitting reloads. - AvailableSpills Spills(MRI, TII); + AvailableSpills Spills(TRI, TII); // MaybeDeadStores - When we need to write a value back into a stack slot, // keep track of the inserted store. If the stack slot value is never read @@ -862,9 +950,9 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { SmallSet ReMatDefs; // Keep track of kill information. - BitVector RegKills(MRI->getNumRegs()); + BitVector RegKills(TRI->getNumRegs()); std::vector KillOps; - KillOps.resize(MRI->getNumRegs(), NULL); + KillOps.resize(TRI->getNumRegs(), NULL); for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); MII != E; ) { @@ -877,13 +965,58 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { MaybeDeadStores, Spills, RegKills, KillOps, VRM)) NextMII = next(MII); - /// ReusedOperands - Keep track of operand reuse in case we need to undo - /// reuse. MachineInstr &MI = *MII; - ReuseInfo ReusedOperands(MI, MRI); + const TargetInstrDesc &TID = MI.getDesc(); + + // Insert restores here if asked to. + if (VRM.isRestorePt(&MI)) { + std::vector &RestoreRegs = VRM.getRestorePtRestores(&MI); + for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { + unsigned VirtReg = RestoreRegs[i]; + if (!VRM.getPreSplitReg(VirtReg)) + continue; // Split interval spilled again. + unsigned Phys = VRM.getPhys(VirtReg); + RegInfo->setPhysRegUsed(Phys); + if (VRM.isReMaterialized(VirtReg)) { + TRI->reMaterialize(MBB, &MI, Phys, + VRM.getReMaterializedMI(VirtReg)); + ++NumReMats; + } else { + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), + RC); + ++NumLoads; + } + // This invalidates Phys. + Spills.ClobberPhysReg(Phys); + UpdateKills(*prior(MII), RegKills, KillOps); + DOUT << '\t' << *prior(MII); + } + } - const TargetInstrDescriptor *TID = MI.getInstrDescriptor(); + // Insert spills here if asked to. + if (VRM.isSpillPt(&MI)) { + std::vector > &SpillRegs = + VRM.getSpillPtSpills(&MI); + for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { + unsigned VirtReg = SpillRegs[i].first; + bool isKill = SpillRegs[i].second; + if (!VRM.getPreSplitReg(VirtReg)) + continue; // Split interval spilled again. + const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); + unsigned Phys = VRM.getPhys(VirtReg); + int StackSlot = VRM.getStackSlot(VirtReg); + TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); + MachineInstr *StoreMI = next(MII); + DOUT << "Store:\t" << StoreMI; + VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); + } + NextMII = next(MII); + } + /// ReusedOperands - Keep track of operand reuse in case we need to undo + /// reuse. + ReuseInfo ReusedOperands(MI, TRI); // Process all of the spilled uses and all non spilled reg references. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); @@ -891,30 +1024,24 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { continue; // Ignore non-register operands. unsigned VirtReg = MO.getReg(); - if (MRegisterInfo::isPhysicalRegister(VirtReg)) { + if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { // Ignore physregs for spilling, but remember that it is used by this // function. - MF.setPhysRegUsed(VirtReg); + RegInfo->setPhysRegUsed(VirtReg); continue; } - assert(MRegisterInfo::isVirtualRegister(VirtReg) && + assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Not a virtual or a physical register?"); - - unsigned SubIdx = 0; - bool isSubReg = RegMap->isSubRegister(VirtReg); - if (isSubReg) { - SubIdx = RegMap->getSubRegisterIndex(VirtReg); - VirtReg = RegMap->getSuperRegister(VirtReg); - } + unsigned SubIdx = MO.getSubReg(); if (VRM.isAssignedReg(VirtReg)) { // This virtual register was assigned a physreg! unsigned Phys = VRM.getPhys(VirtReg); - MF.setPhysRegUsed(Phys); + RegInfo->setPhysRegUsed(Phys); if (MO.isDef()) ReusedOperands.markClobbered(Phys); - unsigned RReg = isSubReg ? MRI->getSubReg(Phys, SubIdx) : Phys; + unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; MI.getOperand(i).setReg(RReg); continue; } @@ -930,14 +1057,6 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // Check to see if this stack slot is available. unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); - if (!PhysReg && DoReMat) { - // This use is rematerializable. But perhaps the value is available in - // a register if the definition is not deleted. If so, check if we can - // reuse the value. - ReuseSlot = VRM.getStackSlot(VirtReg); - if (ReuseSlot != VirtRegMap::NO_STACK_SLOT) - PhysReg = Spills.getSpillSlotOrReMatPhysReg(ReuseSlot); - } // If this is a sub-register use, make sure the reuse register is in the // right register class. For example, for x86 not all of the 32-bit @@ -950,8 +1069,8 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // fi#1 is available in EDI, but it cannot be reused because it's not in // the right register file. if (PhysReg && - (isSubReg || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { - const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg); + (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); if (!RC->contains(PhysReg)) PhysReg = 0; } @@ -963,7 +1082,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // aren't allowed to modify the reused register. If none of these cases // apply, reuse it. bool CanReuse = true; - int ti = TID->getOperandConstraint(i, TOI::TIED_TO); + int ti = TID.getOperandConstraint(i, TOI::TIED_TO); if (ti != -1 && MI.getOperand(ti).isRegister() && MI.getOperand(ti).getReg() == VirtReg) { @@ -981,10 +1100,10 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { else DOUT << "Reusing SS#" << ReuseSlot; DOUT << " from physreg " - << MRI->getName(PhysReg) << " for vreg" + << TRI->getName(PhysReg) << " for vreg" << VirtReg <<" instead of reloading into physreg " - << MRI->getName(VRM.getPhys(VirtReg)) << "\n"; - unsigned RReg = isSubReg ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg; + << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; MI.getOperand(i).setReg(RReg); // The only technical detail we have is that we don't know that @@ -1016,7 +1135,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (DeadStore) { DOUT << "Removed dead store:\t" << *DeadStore; InvalidateKills(*DeadStore, RegKills, KillOps); - VRM.RemoveFromFoldedVirtMap(DeadStore); + VRM.RemoveMachineInstrFromMaps(DeadStore); MBB.erase(DeadStore); MaybeDeadStores[ReuseSlot] = NULL; ++NumDSE; @@ -1054,20 +1173,20 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; else DOUT << "Reusing SS#" << ReuseSlot; - DOUT << " from physreg " << MRI->getName(PhysReg) << " for vreg" + DOUT << " from physreg " << TRI->getName(PhysReg) << " for vreg" << VirtReg << " instead of reloading into same physreg.\n"; - unsigned RReg = isSubReg ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg; + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; MI.getOperand(i).setReg(RReg); - ReusedOperands.markClobbered(PhysReg); + ReusedOperands.markClobbered(RReg); ++NumReused; continue; } - const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg); - MF.setPhysRegUsed(DesignatedReg); + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + RegInfo->setPhysRegUsed(DesignatedReg); ReusedOperands.markClobbered(DesignatedReg); - MRI->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); + TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); MachineInstr *CopyMI = prior(MII); UpdateKills(*CopyMI, RegKills, KillOps); @@ -1077,7 +1196,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); unsigned RReg = - isSubReg ? MRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; + SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; MI.getOperand(i).setReg(RReg); DOUT << '\t' << *prior(MII); ++NumReused; @@ -1096,14 +1215,14 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, Spills, MaybeDeadStores, RegKills, KillOps, VRM); - MF.setPhysRegUsed(PhysReg); + RegInfo->setPhysRegUsed(PhysReg); ReusedOperands.markClobbered(PhysReg); if (DoReMat) { - MRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg)); + TRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg)); ++NumReMats; } else { - const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg); - MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); ++NumLoads; } // This invalidates PhysReg. @@ -1115,9 +1234,9 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { Spills.addAvailable(SSorRMId, &MI, PhysReg); // Assumes this is the last use. IsKill will be unset if reg is reused // unless it's a two-address operand. - if (TID->getOperandConstraint(i, TOI::TIED_TO) == -1) + if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) MI.getOperand(i).setIsKill(); - unsigned RReg = isSubReg ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg; + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; MI.getOperand(i).setReg(RReg); UpdateKills(*prior(MII), RegKills, KillOps); DOUT << '\t' << *prior(MII); @@ -1125,6 +1244,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { DOUT << '\t' << MI; + // If we have folded references to memory operands, make sure we clear all // physical registers that may contain the value of the spilled virtual // register @@ -1133,11 +1253,10 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { unsigned VirtReg = I->second.first; VirtRegMap::ModRef MR = I->second.second; DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; - if (VRM.isAssignedReg(VirtReg)) { - DOUT << ": No stack slot!\n"; - continue; - } + int SS = VRM.getStackSlot(VirtReg); + if (SS == VirtRegMap::NO_STACK_SLOT) + continue; FoldedSS.insert(SS); DOUT << " - StackSlot: " << SS << "\n"; @@ -1152,18 +1271,21 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { DOUT << "Promoted Load To Copy: " << MI; if (DestReg != InReg) { - const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg); - MRI->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); + const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); + TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); // Revisit the copy so we make sure to notice the effects of the // operation on the destreg (either needing to RA it if it's // virtual or needing to clobber any values if it's physical). NextMII = &MI; --NextMII; // backtrack to the copy. BackTracked = true; - } else + } else { DOUT << "Removing now-noop copy: " << MI; + // Unset last kill since it's being reused. + InvalidateKill(InReg, RegKills, KillOps); + } - VRM.RemoveFromFoldedVirtMap(&MI); + VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); Erased = true; goto ProcessNextInst; @@ -1172,9 +1294,9 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); SmallVector NewMIs; if (PhysReg && - MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { + TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { MBB.insert(MII, NewMIs[0]); - VRM.RemoveFromFoldedVirtMap(&MI); + VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); Erased = true; --NextMII; // backtrack to the unfolded instruction. @@ -1190,16 +1312,20 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (DeadStore) { bool isDead = !(MR & VirtRegMap::isRef); MachineInstr *NewStore = NULL; - if (MR & VirtRegMap::isMod) { + if (MR & VirtRegMap::isModRef) { unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); SmallVector NewMIs; + // We can reuse this physreg as long as we are allowed to clobber + // the value and there isn't an earlier def that has already clobbered + // the physreg. if (PhysReg && + !TII->isStoreToStackSlot(&MI, SS) && // Not profitable! DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 && - MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) { + TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) { MBB.insert(MII, NewMIs[0]); NewStore = NewMIs[1]; MBB.insert(MII, NewStore); - VRM.RemoveFromFoldedVirtMap(&MI); + VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); Erased = true; --NextMII; @@ -1213,7 +1339,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // If we get here, the store is dead, nuke it now. DOUT << "Removed dead store:\t" << *DeadStore; InvalidateKills(*DeadStore, RegKills, KillOps); - VRM.RemoveFromFoldedVirtMap(DeadStore); + VRM.RemoveMachineInstrFromMaps(DeadStore); MBB.erase(DeadStore); if (!NewStore) ++NumDSE; @@ -1241,7 +1367,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { int StackSlot; if (!(MR & VirtRegMap::isRef)) { if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { - assert(MRegisterInfo::isPhysicalRegister(SrcReg) && + assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && "Src hasn't been allocated yet?"); // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark // this as a potentially dead store in case there is a subsequent @@ -1264,7 +1390,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { continue; unsigned VirtReg = MO.getReg(); - if (!MRegisterInfo::isVirtualRegister(VirtReg)) { + if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { // Check to see if this is a noop copy. If so, eliminate the // instruction before considering the dest reg to be changed. unsigned Src, Dst; @@ -1273,7 +1399,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { DOUT << "Removing now-noop copy: " << MI; MBB.erase(&MI); Erased = true; - VRM.RemoveFromFoldedVirtMap(&MI); + VRM.RemoveMachineInstrFromMaps(&MI); Spills.disallowClobberPhysReg(VirtReg); goto ProcessNextInst; } @@ -1298,21 +1424,28 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { continue; } + unsigned SubIdx = MO.getSubReg(); bool DoReMat = VRM.isReMaterialized(VirtReg); if (DoReMat) ReMatDefs.insert(&MI); // The only vregs left are stack slot definitions. int StackSlot = VRM.getStackSlot(VirtReg); - const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg); + const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); // If this def is part of a two-address operand, make sure to execute // the store from the correct physical register. unsigned PhysReg; - int TiedOp = MI.getInstrDescriptor()->findTiedToSrcOperand(i); - if (TiedOp != -1) + int TiedOp = MI.getDesc().findTiedToSrcOperand(i); + if (TiedOp != -1) { PhysReg = MI.getOperand(TiedOp).getReg(); - else { + if (SubIdx) { + unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); + assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && + "Can't find corresponding super-register!"); + PhysReg = SuperReg; + } + } else { PhysReg = VRM.getPhys(VirtReg); if (ReusedOperands.isClobbered(PhysReg)) { // Another def has taken the assigned physreg. It must have been a @@ -1322,54 +1455,16 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { } } - MF.setPhysRegUsed(PhysReg); - ReusedOperands.markClobbered(PhysReg); - MI.getOperand(i).setReg(PhysReg); - if (!MO.isDead()) { - MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC); - DOUT << "Store:\t" << *next(MII); + RegInfo->setPhysRegUsed(PhysReg); + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; + ReusedOperands.markClobbered(RReg); + MI.getOperand(i).setReg(RReg); - // If there is a dead store to this stack slot, nuke it now. + if (!MO.isDead()) { MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; - if (LastStore) { - DOUT << "Removed dead store:\t" << *LastStore; - ++NumDSE; - SmallVector KillRegs; - InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); - MachineBasicBlock::iterator PrevMII = LastStore; - bool CheckDef = PrevMII != MBB.begin(); - if (CheckDef) - --PrevMII; - MBB.erase(LastStore); - VRM.RemoveFromFoldedVirtMap(LastStore); - if (CheckDef) { - // Look at defs of killed registers on the store. Mark the defs - // as dead since the store has been deleted and they aren't - // being reused. - for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { - bool HasOtherDef = false; - if (InvalidateRegDef(PrevMII, MI, KillRegs[j], HasOtherDef)) { - MachineInstr *DeadDef = PrevMII; - if (ReMatDefs.count(DeadDef) && !HasOtherDef) { - // FIXME: This assumes a remat def does not have side - // effects. - MBB.erase(DeadDef); - VRM.RemoveFromFoldedVirtMap(DeadDef); - ++NumDRM; - } - } - } - } - } - LastStore = next(MII); - - // If the stack slot value was previously available in some other - // register, change it now. Otherwise, make the register available, - // in PhysReg. - Spills.ModifyStackSlotOrReMat(StackSlot); - Spills.ClobberPhysReg(PhysReg); - Spills.addAvailable(StackSlot, LastStore, PhysReg); - ++NumStores; + SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, + LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); + NextMII = next(MII); // Check to see if this is a noop copy. If so, eliminate the // instruction before considering the dest reg to be changed. @@ -1380,7 +1475,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { DOUT << "Removing now-noop copy: " << MI; MBB.erase(&MI); Erased = true; - VRM.RemoveFromFoldedVirtMap(&MI); + VRM.RemoveMachineInstrFromMaps(&MI); UpdateKills(*LastStore, RegKills, KillOps); goto ProcessNextInst; } @@ -1388,14 +1483,14 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { } } ProcessNextInst: - if (!Erased && !BackTracked) + if (!Erased && !BackTracked) { for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II) UpdateKills(*II, RegKills, KillOps); + } MII = NextMII; } } - llvm::Spiller* llvm::createSpiller() { switch (SpillerOpt) { default: assert(0 && "Unreachable!");