X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FVirtRegMap.cpp;h=253d5c16ffaf7893db7c6c20259c5edc6fcc9d5d;hb=e984e504b5f3090ab270cbdab02638ac3a2afb21;hp=03a5cfde5bd3bc20b03b16cf7616c7c733b86d8b;hpb=90a43c3ae385d98d19f9c858b51920552fb9ceaa;p=oota-llvm.git diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index 03a5cfde5bd..253d5c16ffa 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the LLVM research group and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -21,7 +21,7 @@ #include "llvm/Function.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/SSARegMap.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Support/CommandLine.h" @@ -63,24 +63,26 @@ namespace { VirtRegMap::VirtRegMap(MachineFunction &mf) : TII(*mf.getTarget().getInstrInfo()), MF(mf), Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), - Virt2ReMatIdMap(NO_STACK_SLOT), ReMatMap(NULL), - ReMatId(MAX_STACK_SLOT+1) { + Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), + Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1) { grow(); } void VirtRegMap::grow() { - unsigned LastVirtReg = MF.getSSARegMap()->getLastVirtReg(); + unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); Virt2PhysMap.grow(LastVirtReg); Virt2StackSlotMap.grow(LastVirtReg); Virt2ReMatIdMap.grow(LastVirtReg); + Virt2SplitMap.grow(LastVirtReg); + Virt2SplitKillMap.grow(LastVirtReg); ReMatMap.grow(LastVirtReg); } int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && "attempt to assign stack slot to already spilled register"); - const TargetRegisterClass* RC = MF.getSSARegMap()->getRegClass(virtReg); + const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); int frameIndex = MF.getFrameInfo()->CreateStackObject(RC->getSize(), RC->getAlignment()); Virt2StackSlotMap[virtReg] = frameIndex; @@ -89,7 +91,7 @@ int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { } void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && "attempt to assign stack slot to already spilled register"); assert((frameIndex >= 0 || @@ -99,7 +101,7 @@ void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) { } int VirtRegMap::assignVirtReMatId(unsigned virtReg) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && "attempt to assign re-mat id to already spilled register"); Virt2ReMatIdMap[virtReg] = ReMatId; @@ -107,14 +109,14 @@ int VirtRegMap::assignVirtReMatId(unsigned virtReg) { } void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { - assert(MRegisterInfo::isVirtualRegister(virtReg)); + assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && "attempt to assign re-mat id to already spilled register"); Virt2ReMatIdMap[virtReg] = id; } void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, - unsigned OpNo, MachineInstr *NewMI) { + MachineInstr *NewMI, ModRef MRInfo) { // Move previous memory references folded to new instruction. MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), @@ -123,35 +125,28 @@ void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, MI2VirtMap.erase(I++); } - ModRef MRInfo; - const TargetInstrDescriptor *TID = OldMI->getInstrDescriptor(); - if (TID->getOperandConstraint(OpNo, TOI::TIED_TO) != -1 || - TID->findTiedToSrcOperand(OpNo) != -1) { - // Folded a two-address operand. - MRInfo = isModRef; - } else if (OldMI->getOperand(OpNo).isDef()) { - MRInfo = isMod; - } else { - MRInfo = isRef; - } - // add new memory reference MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); } +void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { + MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI); + MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo))); +} + void VirtRegMap::print(std::ostream &OS) const { - const MRegisterInfo* MRI = MF.getTarget().getRegisterInfo(); + const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); OS << "********** REGISTER MAP **********\n"; - for (unsigned i = MRegisterInfo::FirstVirtualRegister, - e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i) { + for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, + e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) - OS << "[reg" << i << " -> " << MRI->getName(Virt2PhysMap[i]) << "]\n"; + OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) << "]\n"; } - for (unsigned i = MRegisterInfo::FirstVirtualRegister, - e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i) + for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, + e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; OS << '\n'; @@ -178,7 +173,8 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { DOUT << "********** REWRITE MACHINE CODE **********\n"; DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; const TargetMachine &TM = MF.getTarget(); - const MRegisterInfo &MRI = *TM.getRegisterInfo(); + const TargetInstrInfo &TII = *TM.getInstrInfo(); + // LoadedRegs - Keep track of which vregs are loaded, so that we only load // each vreg once (in the case where a spilled vreg is used by multiple @@ -196,32 +192,33 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (MO.isRegister() && MO.getReg()) - if (MRegisterInfo::isVirtualRegister(MO.getReg())) { + if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { unsigned VirtReg = MO.getReg(); unsigned PhysReg = VRM.getPhys(VirtReg); if (!VRM.isAssignedReg(VirtReg)) { int StackSlot = VRM.getStackSlot(VirtReg); const TargetRegisterClass* RC = - MF.getSSARegMap()->getRegClass(VirtReg); + MF.getRegInfo().getRegClass(VirtReg); if (MO.isUse() && std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) == LoadedRegs.end()) { - MRI.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); + TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); LoadedRegs.push_back(VirtReg); ++NumLoads; DOUT << '\t' << *prior(MII); } if (MO.isDef()) { - MRI.storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC); + TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, + StackSlot, RC); ++NumStores; } } - MF.setPhysRegUsed(PhysReg); + MF.getRegInfo().setPhysRegUsed(PhysReg); MI.getOperand(i).setReg(PhysReg); } else { - MF.setPhysRegUsed(MO.getReg()); + MF.getRegInfo().setPhysRegUsed(MO.getReg()); } } @@ -237,26 +234,53 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { //===----------------------------------------------------------------------===// namespace { + class AvailableSpills; + /// LocalSpiller - This spiller does a simple pass over the machine basic /// block to attempt to keep spills in registers as much as possible for /// blocks that have low register pressure (the vreg may be spilled due to /// register pressure in other blocks). class VISIBILITY_HIDDEN LocalSpiller : public Spiller { - const MRegisterInfo *MRI; + MachineRegisterInfo *RegInfo; + const TargetRegisterInfo *TRI; const TargetInstrInfo *TII; public: bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { - MRI = MF.getTarget().getRegisterInfo(); + RegInfo = &MF.getRegInfo(); + TRI = MF.getTarget().getRegisterInfo(); TII = MF.getTarget().getInstrInfo(); DOUT << "\n**** Local spiller rewriting function '" << MF.getFunction()->getName() << "':\n"; + DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" + " ****\n"; + DEBUG(MF.dump()); for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E; ++MBB) RewriteMBB(*MBB, VRM); + + DOUT << "**** Post Machine Instrs ****\n"; + DEBUG(MF.dump()); + return true; } private: + bool PrepForUnfoldOpti(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + std::vector &MaybeDeadStores, + AvailableSpills &Spills, BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM); + void SpillRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + int Idx, unsigned PhysReg, int StackSlot, + const TargetRegisterClass *RC, + bool isAvailable, MachineInstr *&LastStore, + AvailableSpills &Spills, + SmallSet &ReMatDefs, + BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM); void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); }; } @@ -274,7 +298,7 @@ namespace { /// this bit and addAvailable sets it if. namespace { class VISIBILITY_HIDDEN AvailableSpills { - const MRegisterInfo *MRI; + const TargetRegisterInfo *TRI; const TargetInstrInfo *TII; // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled @@ -292,11 +316,11 @@ class VISIBILITY_HIDDEN AvailableSpills { void ClobberPhysRegOnly(unsigned PhysReg); public: - AvailableSpills(const MRegisterInfo *mri, const TargetInstrInfo *tii) - : MRI(mri), TII(tii) { + AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) + : TRI(tri), TII(tii) { } - const MRegisterInfo *getRegInfo() const { return MRI; } + const TargetRegisterInfo *getRegInfo() const { return TRI; } /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is /// available in a physical register, return that PhysReg, otherwise @@ -326,7 +350,7 @@ public: DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; else DOUT << "Remembering SS#" << SlotOrReMat; - DOUT << " in physreg " << MRI->getName(Reg) << "\n"; + DOUT << " in physreg " << TRI->getName(Reg) << "\n"; } /// canClobberPhysReg - Return true if the spiller is allowed to change the @@ -337,14 +361,14 @@ public: "Value not available!"); return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; } - + /// disallowClobberPhysReg - Unset the CanClobber bit of the specified /// stackslot register. The register is still available but is no longer /// allowed to be modifed. void disallowClobberPhysReg(unsigned PhysReg); /// ClobberPhysReg - This is called when the specified physreg changes - /// value. We use this to invalidate any info about stuff we thing lives in + /// value. We use this to invalidate any info about stuff that lives in /// it and any of its aliases. void ClobberPhysReg(unsigned PhysReg); @@ -367,7 +391,7 @@ void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && "Bidirectional map mismatch!"); SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; - DOUT << "PhysReg " << MRI->getName(PhysReg) + DOUT << "PhysReg " << TRI->getName(PhysReg) << " copied, it is available for use but can no longer be modified\n"; } } @@ -376,7 +400,7 @@ void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { /// stackslot register and its aliases. The register and its aliases may /// still available but is no longer allowed to be modifed. void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { - for (const unsigned *AS = MRI->getAliasSet(PhysReg); *AS; ++AS) + for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) disallowClobberPhysRegOnly(*AS); disallowClobberPhysRegOnly(PhysReg); } @@ -392,7 +416,7 @@ void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && "Bidirectional map mismatch!"); SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); - DOUT << "PhysReg " << MRI->getName(PhysReg) + DOUT << "PhysReg " << TRI->getName(PhysReg) << " clobbered, invalidating "; if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; @@ -405,7 +429,7 @@ void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { /// value. We use this to invalidate any info about stuff we thing lives in /// it and any of its aliases. void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { - for (const unsigned *AS = MRI->getAliasSet(PhysReg); *AS; ++AS) + for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) ClobberPhysRegOnly(*AS); ClobberPhysRegOnly(PhysReg); } @@ -437,10 +461,10 @@ void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { /// marked kill, then invalidate the information. static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, std::vector &KillOps, - SmallVector *KillRegs = NULL) { + SmallVector *KillRegs = NULL) { for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (!MO.isReg() || !MO.isUse() || !MO.isKill()) + if (!MO.isRegister() || !MO.isUse() || !MO.isKill()) continue; unsigned Reg = MO.getReg(); if (KillRegs) @@ -452,6 +476,17 @@ static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, } } +/// InvalidateKill - A MI that defines the specified register is being deleted, +/// invalidate the register kill information. +static void InvalidateKill(unsigned Reg, BitVector &RegKills, + std::vector &KillOps) { + if (RegKills[Reg]) { + KillOps[Reg]->setIsKill(false); + KillOps[Reg] = NULL; + RegKills.reset(Reg); + } +} + /// InvalidateRegDef - If the def operand of the specified def MI is now dead /// (since it's spill instruction is removed), mark it isDead. Also checks if /// the def MI has other definition operands that are not dead. Returns it by @@ -465,7 +500,7 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, MachineOperand *DefOp = NULL; for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { MachineOperand &MO = DefMI->getOperand(i); - if (MO.isReg() && MO.isDef()) { + if (MO.isRegister() && MO.isDef()) { if (MO.getReg() == Reg) DefOp = &MO; else if (!MO.isDead()) @@ -482,7 +517,7 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, MachineInstr *NMI = I; for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { MachineOperand &MO = NMI->getOperand(j); - if (!MO.isReg() || MO.getReg() != Reg) + if (!MO.isRegister() || MO.getReg() != Reg) continue; if (MO.isUse()) FoundUse = true; @@ -502,10 +537,10 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, /// over. static void UpdateKills(MachineInstr &MI, BitVector &RegKills, std::vector &KillOps) { - const TargetInstrDescriptor *TID = MI.getInstrDescriptor(); + const TargetInstrDesc &TID = MI.getDesc(); for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (!MO.isReg() || !MO.isUse()) + if (!MO.isRegister() || !MO.isUse()) continue; unsigned Reg = MO.getReg(); if (Reg == 0) @@ -514,13 +549,14 @@ static void UpdateKills(MachineInstr &MI, BitVector &RegKills, if (RegKills[Reg]) { // That can't be right. Register is killed but not re-defined and it's // being reused. Let's fix that. - KillOps[Reg]->unsetIsKill(); - if (i < TID->numOperands && - TID->getOperandConstraint(i, TOI::TIED_TO) == -1) + KillOps[Reg]->setIsKill(false); + KillOps[Reg] = NULL; + RegKills.reset(Reg); + if (i < TID.getNumOperands() && + TID.getOperandConstraint(i, TOI::TIED_TO) == -1) // Unless it's a two-address operand, this is the new kill. MO.setIsKill(); } - if (MO.isKill()) { RegKills.set(Reg); KillOps[Reg] = &MO; @@ -529,7 +565,7 @@ static void UpdateKills(MachineInstr &MI, BitVector &RegKills, for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI.getOperand(i); - if (!MO.isReg() || !MO.isDef()) + if (!MO.isRegister() || !MO.isDef()) continue; unsigned Reg = MO.getReg(); RegKills.reset(Reg); @@ -570,8 +606,8 @@ namespace { std::vector Reuses; BitVector PhysRegsClobbered; public: - ReuseInfo(MachineInstr &mi, const MRegisterInfo *mri) : MI(mi) { - PhysRegsClobbered.resize(mri->getNumRegs()); + ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { + PhysRegsClobbered.resize(tri->getNumRegs()); } bool hasReuses() const { @@ -610,6 +646,9 @@ namespace { BitVector &RegKills, std::vector &KillOps, VirtRegMap &VRM) { + const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() + .getInstrInfo(); + if (Reuses.empty()) return PhysReg; // This is most often empty. for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { @@ -631,14 +670,14 @@ namespace { // value aliases the new register. If so, codegen the previous reload // and use this one. unsigned PRRU = Op.PhysRegReused; - const MRegisterInfo *MRI = Spills.getRegInfo(); - if (MRI->areAliases(PRRU, PhysReg)) { + const TargetRegisterInfo *TRI = Spills.getRegInfo(); + if (TRI->areAliases(PRRU, PhysReg)) { // Okay, we found out that an alias of a reused register // was used. This isn't good because it means we have // to undo a previous reuse. MachineBasicBlock *MBB = MI->getParent(); const TargetRegisterClass *AliasRC = - MBB->getParent()->getSSARegMap()->getRegClass(Op.VirtReg); + MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); // Copy Op out of the vector and remove it, we're going to insert an // explicit load for it. @@ -654,11 +693,11 @@ namespace { Rejected, RegKills, KillOps, VRM); if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { - MRI->reMaterialize(*MBB, MI, NewPhysReg, + TRI->reMaterialize(*MBB, MI, NewPhysReg, VRM.getReMaterializedMI(NewOp.VirtReg)); ++NumReMats; } else { - MRI->loadRegFromStackSlot(*MBB, MI, NewPhysReg, + TII->loadRegFromStackSlot(*MBB, MI, NewPhysReg, NewOp.StackSlotOrReMat, AliasRC); // Any stores to this stack slot are not dead anymore. MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; @@ -710,17 +749,193 @@ namespace { }; } +/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding +/// instruction. e.g. +/// xorl %edi, %eax +/// movl %eax, -32(%ebp) +/// movl -36(%ebp), %eax +/// orl %eax, -32(%ebp) +/// ==> +/// xorl %edi, %eax +/// orl -36(%ebp), %eax +/// mov %eax, -32(%ebp) +/// This enables unfolding optimization for a subsequent instruction which will +/// also eliminate the newly introduced store instruction. +bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + std::vector &MaybeDeadStores, + AvailableSpills &Spills, + BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM) { + MachineFunction &MF = *MBB.getParent(); + MachineInstr &MI = *MII; + unsigned UnfoldedOpc = 0; + unsigned UnfoldPR = 0; + unsigned UnfoldVR = 0; + int FoldedSS = VirtRegMap::NO_STACK_SLOT; + VirtRegMap::MI2VirtMapTy::const_iterator I, End; + for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) { + // Only transform a MI that folds a single register. + if (UnfoldedOpc) + return false; + UnfoldVR = I->second.first; + VirtRegMap::ModRef MR = I->second.second; + if (VRM.isAssignedReg(UnfoldVR)) + continue; + // If this reference is not a use, any previous store is now dead. + // Otherwise, the store to this stack slot is not dead anymore. + FoldedSS = VRM.getStackSlot(UnfoldVR); + MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; + if (DeadStore && (MR & VirtRegMap::isModRef)) { + unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); + if (!PhysReg || + DeadStore->findRegisterUseOperandIdx(PhysReg, true) == -1) + continue; + UnfoldPR = PhysReg; + UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), + false, true); + } + } + + if (!UnfoldedOpc) + return false; + + for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI.getOperand(i); + if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse()) + continue; + unsigned VirtReg = MO.getReg(); + if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) + continue; + if (VRM.isAssignedReg(VirtReg)) { + unsigned PhysReg = VRM.getPhys(VirtReg); + if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) + return false; + } else if (VRM.isReMaterialized(VirtReg)) + continue; + int SS = VRM.getStackSlot(VirtReg); + unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); + if (PhysReg) { + if (TRI->regsOverlap(PhysReg, UnfoldPR)) + return false; + continue; + } + PhysReg = VRM.getPhys(VirtReg); + if (!TRI->regsOverlap(PhysReg, UnfoldPR)) + continue; + + // Ok, we'll need to reload the value into a register which makes + // it impossible to perform the store unfolding optimization later. + // Let's see if it is possible to fold the load if the store is + // unfolded. This allows us to perform the store unfolding + // optimization. + SmallVector NewMIs; + if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { + assert(NewMIs.size() == 1); + MachineInstr *NewMI = NewMIs.back(); + NewMIs.clear(); + int Idx = NewMI->findRegisterUseOperandIdx(VirtReg); + assert(Idx != -1); + SmallVector Ops; + Ops.push_back(Idx); + MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); + if (FoldedMI) { + if (!VRM.hasPhys(UnfoldVR)) + VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); + VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); + MII = MBB.insert(MII, FoldedMI); + VRM.RemoveMachineInstrFromMaps(&MI); + MBB.erase(&MI); + return true; + } + delete NewMI; + } + } + return false; +} + +/// findSuperReg - Find the SubReg's super-register of given register class +/// where its SubIdx sub-register is SubReg. +static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, + unsigned SubIdx, const TargetRegisterInfo *TRI) { + for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); + I != E; ++I) { + unsigned Reg = *I; + if (TRI->getSubReg(Reg, SubIdx) == SubReg) + return Reg; + } + return 0; +} + +/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if +/// the last store to the same slot is now dead. If so, remove the last store. +void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + int Idx, unsigned PhysReg, int StackSlot, + const TargetRegisterClass *RC, + bool isAvailable, MachineInstr *&LastStore, + AvailableSpills &Spills, + SmallSet &ReMatDefs, + BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM) { + TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); + DOUT << "Store:\t" << *next(MII); + + // If there is a dead store to this stack slot, nuke it now. + if (LastStore) { + DOUT << "Removed dead store:\t" << *LastStore; + ++NumDSE; + SmallVector KillRegs; + InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); + MachineBasicBlock::iterator PrevMII = LastStore; + bool CheckDef = PrevMII != MBB.begin(); + if (CheckDef) + --PrevMII; + MBB.erase(LastStore); + VRM.RemoveMachineInstrFromMaps(LastStore); + if (CheckDef) { + // Look at defs of killed registers on the store. Mark the defs + // as dead since the store has been deleted and they aren't + // being reused. + for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { + bool HasOtherDef = false; + if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { + MachineInstr *DeadDef = PrevMII; + if (ReMatDefs.count(DeadDef) && !HasOtherDef) { + // FIXME: This assumes a remat def does not have side + // effects. + MBB.erase(DeadDef); + VRM.RemoveMachineInstrFromMaps(DeadDef); + ++NumDRM; + } + } + } + } + } + + LastStore = next(MII); + + // If the stack slot value was previously available in some other + // register, change it now. Otherwise, make the register available, + // in PhysReg. + Spills.ModifyStackSlotOrReMat(StackSlot); + Spills.ClobberPhysReg(PhysReg); + Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); + ++NumStores; +} /// rewriteMBB - Keep track of which spills are available even after the -/// register allocator is done with them. If possible, avoid reloading vregs. +/// register allocator is done with them. If possible, avid reloading vregs. void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { DOUT << MBB.getBasicBlock()->getName() << ":\n"; MachineFunction &MF = *MBB.getParent(); - + // Spills - Keep track of which spilled values are available in physregs so // that we can choose to reuse the physregs instead of emitting reloads. - AvailableSpills Spills(MRI, TII); + AvailableSpills Spills(TRI, TII); // MaybeDeadStores - When we need to write a value back into a stack slot, // keep track of the inserted store. If the stack slot value is never read @@ -735,60 +950,99 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { SmallSet ReMatDefs; // Keep track of kill information. - BitVector RegKills(MRI->getNumRegs()); + BitVector RegKills(TRI->getNumRegs()); std::vector KillOps; - KillOps.resize(MRI->getNumRegs(), NULL); + KillOps.resize(TRI->getNumRegs(), NULL); for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); MII != E; ) { - MachineInstr &MI = *MII; MachineBasicBlock::iterator NextMII = MII; ++NextMII; - VirtRegMap::MI2VirtMapTy::const_iterator I, End; + VirtRegMap::MI2VirtMapTy::const_iterator I, End; bool Erased = false; bool BackTracked = false; + if (PrepForUnfoldOpti(MBB, MII, + MaybeDeadStores, Spills, RegKills, KillOps, VRM)) + NextMII = next(MII); - /// ReusedOperands - Keep track of operand reuse in case we need to undo - /// reuse. - ReuseInfo ReusedOperands(MI, MRI); - - // Loop over all of the implicit defs, clearing them from our available - // sets. - const TargetInstrDescriptor *TID = MI.getInstrDescriptor(); - if (TID->ImplicitDefs) { - const unsigned *ImpDef = TID->ImplicitDefs; - for ( ; *ImpDef; ++ImpDef) { - MF.setPhysRegUsed(*ImpDef); - ReusedOperands.markClobbered(*ImpDef); - Spills.ClobberPhysReg(*ImpDef); + MachineInstr &MI = *MII; + const TargetInstrDesc &TID = MI.getDesc(); + + // Insert restores here if asked to. + if (VRM.isRestorePt(&MI)) { + std::vector &RestoreRegs = VRM.getRestorePtRestores(&MI); + for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { + unsigned VirtReg = RestoreRegs[i]; + if (!VRM.getPreSplitReg(VirtReg)) + continue; // Split interval spilled again. + unsigned Phys = VRM.getPhys(VirtReg); + RegInfo->setPhysRegUsed(Phys); + if (VRM.isReMaterialized(VirtReg)) { + TRI->reMaterialize(MBB, &MI, Phys, + VRM.getReMaterializedMI(VirtReg)); + ++NumReMats; + } else { + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), + RC); + ++NumLoads; + } + // This invalidates Phys. + Spills.ClobberPhysReg(Phys); + UpdateKills(*prior(MII), RegKills, KillOps); + DOUT << '\t' << *prior(MII); } } + // Insert spills here if asked to. + if (VRM.isSpillPt(&MI)) { + std::vector > &SpillRegs = + VRM.getSpillPtSpills(&MI); + for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { + unsigned VirtReg = SpillRegs[i].first; + bool isKill = SpillRegs[i].second; + if (!VRM.getPreSplitReg(VirtReg)) + continue; // Split interval spilled again. + const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); + unsigned Phys = VRM.getPhys(VirtReg); + int StackSlot = VRM.getStackSlot(VirtReg); + TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); + MachineInstr *StoreMI = next(MII); + DOUT << "Store:\t" << StoreMI; + VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); + } + NextMII = next(MII); + } + + /// ReusedOperands - Keep track of operand reuse in case we need to undo + /// reuse. + ReuseInfo ReusedOperands(MI, TRI); // Process all of the spilled uses and all non spilled reg references. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isRegister() || MO.getReg() == 0) continue; // Ignore non-register operands. - if (MRegisterInfo::isPhysicalRegister(MO.getReg())) { + unsigned VirtReg = MO.getReg(); + if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { // Ignore physregs for spilling, but remember that it is used by this // function. - MF.setPhysRegUsed(MO.getReg()); - ReusedOperands.markClobbered(MO.getReg()); + RegInfo->setPhysRegUsed(VirtReg); continue; } - assert(MRegisterInfo::isVirtualRegister(MO.getReg()) && + assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Not a virtual or a physical register?"); - - unsigned VirtReg = MO.getReg(); + + unsigned SubIdx = MO.getSubReg(); if (VRM.isAssignedReg(VirtReg)) { // This virtual register was assigned a physreg! unsigned Phys = VRM.getPhys(VirtReg); - MF.setPhysRegUsed(Phys); + RegInfo->setPhysRegUsed(Phys); if (MO.isDef()) ReusedOperands.markClobbered(Phys); - MI.getOperand(i).setReg(Phys); + unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; + MI.getOperand(i).setReg(RReg); continue; } @@ -803,14 +1057,24 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // Check to see if this stack slot is available. unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); - if (!PhysReg && DoReMat) { - // This use is rematerializable. But perhaps the value is available in - // stack if the definition is not deleted. If so, check if we can - // reuse the value. - ReuseSlot = VRM.getStackSlot(VirtReg); - if (ReuseSlot != VirtRegMap::NO_STACK_SLOT) - PhysReg = Spills.getSpillSlotOrReMatPhysReg(ReuseSlot); + + // If this is a sub-register use, make sure the reuse register is in the + // right register class. For example, for x86 not all of the 32-bit + // registers have accessible sub-registers. + // Similarly so for EXTRACT_SUBREG. Consider this: + // EDI = op + // MOV32_mr fi#1, EDI + // ... + // = EXTRACT_SUBREG fi#1 + // fi#1 is available in EDI, but it cannot be reused because it's not in + // the right register file. + if (PhysReg && + (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + if (!RC->contains(PhysReg)) + PhysReg = 0; } + if (PhysReg) { // This spilled operand might be part of a two-address operand. If this // is the case, then changing it will necessarily require changing the @@ -818,9 +1082,9 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // aren't allowed to modify the reused register. If none of these cases // apply, reuse it. bool CanReuse = true; - int ti = TID->getOperandConstraint(i, TOI::TIED_TO); + int ti = TID.getOperandConstraint(i, TOI::TIED_TO); if (ti != -1 && - MI.getOperand(ti).isReg() && + MI.getOperand(ti).isRegister() && MI.getOperand(ti).getReg() == VirtReg) { // Okay, we have a two address operand. We can reuse this physreg as // long as we are allowed to clobber the value and there isn't an @@ -836,10 +1100,11 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { else DOUT << "Reusing SS#" << ReuseSlot; DOUT << " from physreg " - << MRI->getName(PhysReg) << " for vreg" + << TRI->getName(PhysReg) << " for vreg" << VirtReg <<" instead of reloading into physreg " - << MRI->getName(VRM.getPhys(VirtReg)) << "\n"; - MI.getOperand(i).setReg(PhysReg); + << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; + MI.getOperand(i).setReg(RReg); // The only technical detail we have is that we don't know that // PhysReg won't be clobbered by a reloaded stack slot that occurs @@ -870,14 +1135,14 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (DeadStore) { DOUT << "Removed dead store:\t" << *DeadStore; InvalidateKills(*DeadStore, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(DeadStore); MBB.erase(DeadStore); - VRM.RemoveFromFoldedVirtMap(DeadStore); MaybeDeadStores[ReuseSlot] = NULL; ++NumDSE; } } continue; - } + } // CanReuse // Otherwise we have a situation where we have a two-address instruction // whose mod/ref operand needs to be reloaded. This reload is already @@ -908,19 +1173,20 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; else DOUT << "Reusing SS#" << ReuseSlot; - DOUT << " from physreg " << MRI->getName(PhysReg) << " for vreg" + DOUT << " from physreg " << TRI->getName(PhysReg) << " for vreg" << VirtReg << " instead of reloading into same physreg.\n"; - MI.getOperand(i).setReg(PhysReg); - ReusedOperands.markClobbered(PhysReg); + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; + MI.getOperand(i).setReg(RReg); + ReusedOperands.markClobbered(RReg); ++NumReused; continue; } - const TargetRegisterClass* RC = MF.getSSARegMap()->getRegClass(VirtReg); - MF.setPhysRegUsed(DesignatedReg); + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + RegInfo->setPhysRegUsed(DesignatedReg); ReusedOperands.markClobbered(DesignatedReg); - MRI->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC); + TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); MachineInstr *CopyMI = prior(MII); UpdateKills(*CopyMI, RegKills, KillOps); @@ -929,16 +1195,17 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { Spills.ClobberPhysReg(DesignatedReg); Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); - MI.getOperand(i).setReg(DesignatedReg); + unsigned RReg = + SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; + MI.getOperand(i).setReg(RReg); DOUT << '\t' << *prior(MII); ++NumReused; continue; - } + } // if (PhysReg) // Otherwise, reload it and remember that we have it. PhysReg = VRM.getPhys(VirtReg); assert(PhysReg && "Must map virtreg to physreg!"); - const TargetRegisterClass* RC = MF.getSSARegMap()->getRegClass(VirtReg); // Note that, if we reused a register for a previous operand, the // register we want to reload into might not actually be @@ -948,13 +1215,14 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, Spills, MaybeDeadStores, RegKills, KillOps, VRM); - MF.setPhysRegUsed(PhysReg); + RegInfo->setPhysRegUsed(PhysReg); ReusedOperands.markClobbered(PhysReg); if (DoReMat) { - MRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg)); + TRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg)); ++NumReMats; } else { - MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); ++NumLoads; } // This invalidates PhysReg. @@ -966,29 +1234,29 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { Spills.addAvailable(SSorRMId, &MI, PhysReg); // Assumes this is the last use. IsKill will be unset if reg is reused // unless it's a two-address operand. - if (TID->getOperandConstraint(i, TOI::TIED_TO) == -1) + if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) MI.getOperand(i).setIsKill(); - MI.getOperand(i).setReg(PhysReg); + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; + MI.getOperand(i).setReg(RReg); UpdateKills(*prior(MII), RegKills, KillOps); DOUT << '\t' << *prior(MII); } DOUT << '\t' << MI; + // If we have folded references to memory operands, make sure we clear all // physical registers that may contain the value of the spilled virtual // register - SmallSet FoldedSS; + SmallSet FoldedSS; for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) { - DOUT << "Folded vreg: " << I->second.first << " MR: " - << I->second.second; unsigned VirtReg = I->second.first; VirtRegMap::ModRef MR = I->second.second; - if (VRM.isAssignedReg(VirtReg)) { - DOUT << ": No stack slot!\n"; - continue; - } + DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; + int SS = VRM.getStackSlot(VirtReg); + if (SS == VirtRegMap::NO_STACK_SLOT) + continue; FoldedSS.insert(SS); DOUT << " - StackSlot: " << SS << "\n"; @@ -996,29 +1264,44 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // straight load from the virt reg slot. if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { int FrameIdx; - if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { - if (FrameIdx == SS) { - // If this spill slot is available, turn it into a copy (or nothing) - // instead of leaving it as a load! - if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { - DOUT << "Promoted Load To Copy: " << MI; - if (DestReg != InReg) { - MRI->copyRegToReg(MBB, &MI, DestReg, InReg, - MF.getSSARegMap()->getRegClass(VirtReg)); - // Revisit the copy so we make sure to notice the effects of the - // operation on the destreg (either needing to RA it if it's - // virtual or needing to clobber any values if it's physical). - NextMII = &MI; - --NextMII; // backtrack to the copy. - BackTracked = true; - } else - DOUT << "Removing now-noop copy: " << MI; - - VRM.RemoveFromFoldedVirtMap(&MI); - MBB.erase(&MI); - Erased = true; - goto ProcessNextInst; + unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx); + if (DestReg && FrameIdx == SS) { + // If this spill slot is available, turn it into a copy (or nothing) + // instead of leaving it as a load! + if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { + DOUT << "Promoted Load To Copy: " << MI; + if (DestReg != InReg) { + const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); + TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); + // Revisit the copy so we make sure to notice the effects of the + // operation on the destreg (either needing to RA it if it's + // virtual or needing to clobber any values if it's physical). + NextMII = &MI; + --NextMII; // backtrack to the copy. + BackTracked = true; + } else { + DOUT << "Removing now-noop copy: " << MI; + // Unset last kill since it's being reused. + InvalidateKill(InReg, RegKills, KillOps); } + + VRM.RemoveMachineInstrFromMaps(&MI); + MBB.erase(&MI); + Erased = true; + goto ProcessNextInst; + } + } else { + unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); + SmallVector NewMIs; + if (PhysReg && + TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { + MBB.insert(MII, NewMIs[0]); + VRM.RemoveMachineInstrFromMaps(&MI); + MBB.erase(&MI); + Erased = true; + --NextMII; // backtrack to the unfolded instruction. + BackTracked = true; + goto ProcessNextInst; } } } @@ -1027,16 +1310,48 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // Otherwise, the store to this stack slot is not dead anymore. MachineInstr* DeadStore = MaybeDeadStores[SS]; if (DeadStore) { - if (!(MR & VirtRegMap::isRef)) { // Previous store is dead. + bool isDead = !(MR & VirtRegMap::isRef); + MachineInstr *NewStore = NULL; + if (MR & VirtRegMap::isModRef) { + unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); + SmallVector NewMIs; + // We can reuse this physreg as long as we are allowed to clobber + // the value and there isn't an earlier def that has already clobbered + // the physreg. + if (PhysReg && + !TII->isStoreToStackSlot(&MI, SS) && // Not profitable! + DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 && + TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) { + MBB.insert(MII, NewMIs[0]); + NewStore = NewMIs[1]; + MBB.insert(MII, NewStore); + VRM.RemoveMachineInstrFromMaps(&MI); + MBB.erase(&MI); + Erased = true; + --NextMII; + --NextMII; // backtrack to the unfolded instruction. + BackTracked = true; + isDead = true; + } + } + + if (isDead) { // Previous store is dead. // If we get here, the store is dead, nuke it now. - assert(VirtRegMap::isMod && "Can't be modref!"); DOUT << "Removed dead store:\t" << *DeadStore; InvalidateKills(*DeadStore, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(DeadStore); MBB.erase(DeadStore); - VRM.RemoveFromFoldedVirtMap(DeadStore); - ++NumDSE; + if (!NewStore) + ++NumDSE; } + MaybeDeadStores[SS] = NULL; + if (NewStore) { + // Treat this store as a spill merged into a copy. That makes the + // stack slot value available. + VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod); + goto ProcessNextInst; + } } // If the spill slot value is available, and this is a new definition of @@ -1052,7 +1367,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { int StackSlot; if (!(MR & VirtRegMap::isRef)) { if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { - assert(MRegisterInfo::isPhysicalRegister(SrcReg) && + assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && "Src hasn't been allocated yet?"); // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark // this as a potentially dead store in case there is a subsequent @@ -1071,142 +1386,111 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // Process all of the spilled defs. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (MO.isRegister() && MO.getReg() && MO.isDef()) { - unsigned VirtReg = MO.getReg(); + if (!(MO.isRegister() && MO.getReg() && MO.isDef())) + continue; - if (!MRegisterInfo::isVirtualRegister(VirtReg)) { - // Check to see if this is a noop copy. If so, eliminate the - // instruction before considering the dest reg to be changed. + unsigned VirtReg = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { + // Check to see if this is a noop copy. If so, eliminate the + // instruction before considering the dest reg to be changed. + unsigned Src, Dst; + if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { + ++NumDCE; + DOUT << "Removing now-noop copy: " << MI; + MBB.erase(&MI); + Erased = true; + VRM.RemoveMachineInstrFromMaps(&MI); + Spills.disallowClobberPhysReg(VirtReg); + goto ProcessNextInst; + } + + // If it's not a no-op copy, it clobbers the value in the destreg. + Spills.ClobberPhysReg(VirtReg); + ReusedOperands.markClobbered(VirtReg); + + // Check to see if this instruction is a load from a stack slot into + // a register. If so, this provides the stack slot value in the reg. + int FrameIdx; + if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { + assert(DestReg == VirtReg && "Unknown load situation!"); + + // If it is a folded reference, then it's not safe to clobber. + bool Folded = FoldedSS.count(FrameIdx); + // Otherwise, if it wasn't available, remember that it is now! + Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); + goto ProcessNextInst; + } + + continue; + } + + unsigned SubIdx = MO.getSubReg(); + bool DoReMat = VRM.isReMaterialized(VirtReg); + if (DoReMat) + ReMatDefs.insert(&MI); + + // The only vregs left are stack slot definitions. + int StackSlot = VRM.getStackSlot(VirtReg); + const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); + + // If this def is part of a two-address operand, make sure to execute + // the store from the correct physical register. + unsigned PhysReg; + int TiedOp = MI.getDesc().findTiedToSrcOperand(i); + if (TiedOp != -1) { + PhysReg = MI.getOperand(TiedOp).getReg(); + if (SubIdx) { + unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); + assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && + "Can't find corresponding super-register!"); + PhysReg = SuperReg; + } + } else { + PhysReg = VRM.getPhys(VirtReg); + if (ReusedOperands.isClobbered(PhysReg)) { + // Another def has taken the assigned physreg. It must have been a + // use&def which got it due to reuse. Undo the reuse! + PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, + Spills, MaybeDeadStores, RegKills, KillOps, VRM); + } + } + + RegInfo->setPhysRegUsed(PhysReg); + unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; + ReusedOperands.markClobbered(RReg); + MI.getOperand(i).setReg(RReg); + + if (!MO.isDead()) { + MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; + SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, + LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); + NextMII = next(MII); + + // Check to see if this is a noop copy. If so, eliminate the + // instruction before considering the dest reg to be changed. + { unsigned Src, Dst; if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { ++NumDCE; DOUT << "Removing now-noop copy: " << MI; MBB.erase(&MI); Erased = true; - VRM.RemoveFromFoldedVirtMap(&MI); - Spills.disallowClobberPhysReg(VirtReg); - goto ProcessNextInst; - } - - // If it's not a no-op copy, it clobbers the value in the destreg. - Spills.ClobberPhysReg(VirtReg); - ReusedOperands.markClobbered(VirtReg); - - // Check to see if this instruction is a load from a stack slot into - // a register. If so, this provides the stack slot value in the reg. - int FrameIdx; - if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { - assert(DestReg == VirtReg && "Unknown load situation!"); - - // If it is a folded reference, then it's not safe to clobber. - bool Folded = FoldedSS.count(FrameIdx); - // Otherwise, if it wasn't available, remember that it is now! - Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); + VRM.RemoveMachineInstrFromMaps(&MI); + UpdateKills(*LastStore, RegKills, KillOps); goto ProcessNextInst; } - - continue; - } - - bool DoReMat = VRM.isReMaterialized(VirtReg); - if (DoReMat) - ReMatDefs.insert(&MI); - - // The only vregs left are stack slot definitions. - int StackSlot = VRM.getStackSlot(VirtReg); - const TargetRegisterClass *RC = MF.getSSARegMap()->getRegClass(VirtReg); - - // If this def is part of a two-address operand, make sure to execute - // the store from the correct physical register. - unsigned PhysReg; - int TiedOp = MI.getInstrDescriptor()->findTiedToSrcOperand(i); - if (TiedOp != -1) - PhysReg = MI.getOperand(TiedOp).getReg(); - else { - PhysReg = VRM.getPhys(VirtReg); - if (ReusedOperands.isClobbered(PhysReg)) { - // Another def has taken the assigned physreg. It must have been a - // use&def which got it due to reuse. Undo the reuse! - PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, - Spills, MaybeDeadStores, RegKills, KillOps, VRM); - } } - - MF.setPhysRegUsed(PhysReg); - ReusedOperands.markClobbered(PhysReg); - MI.getOperand(i).setReg(PhysReg); - if (!MO.isDead()) { - MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC); - DOUT << "Store:\t" << *next(MII); - - // If there is a dead store to this stack slot, nuke it now. - MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; - if (LastStore) { - DOUT << "Removed dead store:\t" << *LastStore; - ++NumDSE; - SmallVector KillRegs; - InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); - MachineBasicBlock::iterator PrevMII = LastStore; - bool CheckDef = PrevMII != MBB.begin(); - if (CheckDef) - --PrevMII; - MBB.erase(LastStore); - VRM.RemoveFromFoldedVirtMap(LastStore); - if (CheckDef) { - // Look at defs of killed registers on the store. Mark the defs - // as dead since the store has been deleted and they aren't - // being reused. - for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { - bool HasOtherDef = false; - if (InvalidateRegDef(PrevMII, MI, KillRegs[j], HasOtherDef)) { - MachineInstr *DeadDef = PrevMII; - if (ReMatDefs.count(DeadDef) && !HasOtherDef) { - // FIXME: This assumes a remat def does not have side - // effects. - MBB.erase(DeadDef); - VRM.RemoveFromFoldedVirtMap(DeadDef); - ++NumDRM; - } - } - } - } - } - LastStore = next(MII); - - // If the stack slot value was previously available in some other - // register, change it now. Otherwise, make the register available, - // in PhysReg. - Spills.ModifyStackSlotOrReMat(StackSlot); - Spills.ClobberPhysReg(PhysReg); - Spills.addAvailable(StackSlot, LastStore, PhysReg); - ++NumStores; - - // Check to see if this is a noop copy. If so, eliminate the - // instruction before considering the dest reg to be changed. - { - unsigned Src, Dst; - if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { - ++NumDCE; - DOUT << "Removing now-noop copy: " << MI; - MBB.erase(&MI); - Erased = true; - VRM.RemoveFromFoldedVirtMap(&MI); - UpdateKills(*LastStore, RegKills, KillOps); - goto ProcessNextInst; - } - } - } - } + } } ProcessNextInst: - if (!Erased && !BackTracked) + if (!Erased && !BackTracked) { for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II) UpdateKills(*II, RegKills, KillOps); + } MII = NextMII; } } - llvm::Spiller* llvm::createSpiller() { switch (SpillerOpt) { default: assert(0 && "Unreachable!");