X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FVirtRegMap.cpp;h=e2dc636e69119c00377748613022631b4aada493;hb=58b1ac76d470eb5faa7e98feae97c4906d4d146e;hp=b094412ae859e1997f5850cfcddd6afcce849fce;hpb=c17ba8a28d2f83dd320745c0c9994464845ac990;p=oota-llvm.git diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index b094412ae85..e2dc636e691 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -21,43 +21,49 @@ #include "llvm/Function.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Support/CommandLine.h" -#include "llvm/Support/Debug.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" #include "llvm/ADT/BitVector.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallSet.h" #include using namespace llvm; -STATISTIC(NumSpills, "Number of register spills"); -STATISTIC(NumPSpills,"Number of physical register spills"); -STATISTIC(NumReMats, "Number of re-materialization"); -STATISTIC(NumDRM , "Number of re-materializable defs elided"); -STATISTIC(NumStores, "Number of stores added"); -STATISTIC(NumLoads , "Number of loads added"); -STATISTIC(NumReused, "Number of values reused"); -STATISTIC(NumDSE , "Number of dead stores elided"); -STATISTIC(NumDCE , "Number of copies elided"); -STATISTIC(NumDSS , "Number of dead spill slots removed"); +STATISTIC(NumSpills , "Number of register spills"); +STATISTIC(NumPSpills , "Number of physical register spills"); +STATISTIC(NumReMats , "Number of re-materialization"); +STATISTIC(NumDRM , "Number of re-materializable defs elided"); +STATISTIC(NumStores , "Number of stores added"); +STATISTIC(NumLoads , "Number of loads added"); +STATISTIC(NumReused , "Number of values reused"); +STATISTIC(NumDSE , "Number of dead stores elided"); +STATISTIC(NumDCE , "Number of copies elided"); +STATISTIC(NumDSS , "Number of dead spill slots removed"); +STATISTIC(NumCommutes, "Number of instructions commuted"); +STATISTIC(NumOmitted , "Number of reloads omited"); +STATISTIC(NumCopified, "Number of available reloads turned into copies"); namespace { enum SpillerName { simple, local }; - - static cl::opt - SpillerOpt("spiller", - cl::desc("Spiller to use: (default: local)"), - cl::Prefix, - cl::values(clEnumVal(simple, " simple spiller"), - clEnumVal(local, " local spiller"), - clEnumValEnd), - cl::init(local)); } +static cl::opt +SpillerOpt("spiller", + cl::desc("Spiller to use: (default: local)"), + cl::Prefix, + cl::values(clEnumVal(simple, "simple spiller"), + clEnumVal(local, "local spiller"), + clEnumValEnd), + cl::init(local)); + //===----------------------------------------------------------------------===// // VirtRegMap implementation //===----------------------------------------------------------------------===// @@ -69,6 +75,8 @@ VirtRegMap::VirtRegMap(MachineFunction &mf) Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1), LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { SpillSlotToUsesMap.resize(8); + ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1- + TargetRegisterInfo::FirstVirtualRegister); grow(); } @@ -80,6 +88,7 @@ void VirtRegMap::grow() { Virt2SplitMap.grow(LastVirtReg); Virt2SplitKillMap.grow(LastVirtReg); ReMatMap.grow(LastVirtReg); + ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1); } int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { @@ -137,14 +146,20 @@ int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) { LowSpillSlot = SS; if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) HighSpillSlot = SS; - I->second = SS; + EmergencySpillSlots[RC] = SS; return SS; } void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) { if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) { - assert(FI >= 0 && "Spill slot index should not be negative!"); - SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI); + // If FI < LowSpillSlot, this stack reference was produced by + // instruction selection and is not a spill + if (FI >= LowSpillSlot) { + assert(FI >= 0 && "Spill slot index should not be negative!"); + assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() + && "Invalid spill slot"); + SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI); + } } } @@ -170,11 +185,17 @@ void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); - if (!MO.isFrameIndex()) + if (!MO.isFI()) continue; int FI = MO.getIndex(); if (MF.getFrameInfo()->isFixedObjectIndex(FI)) continue; + // This stack reference was produced by instruction selection and + // is not a spill + if (FI < LowSpillSlot) + continue; + assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() + && "Invalid spill slot"); SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI); } MI2VirtMap.erase(MI); @@ -223,6 +244,7 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; const TargetMachine &TM = MF.getTarget(); const TargetInstrInfo &TII = *TM.getInstrInfo(); + const TargetRegisterInfo &TRI = *TM.getRegisterInfo(); // LoadedRegs - Keep track of which vregs are loaded, so that we only load @@ -240,10 +262,12 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { MachineInstr &MI = *MII; for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (MO.isRegister() && MO.getReg()) { + if (MO.isReg() && MO.getReg()) { if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { unsigned VirtReg = MO.getReg(); + unsigned SubIdx = MO.getSubReg(); unsigned PhysReg = VRM.getPhys(VirtReg); + unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg; if (!VRM.isAssignedReg(VirtReg)) { int StackSlot = VRM.getStackSlot(VirtReg); const TargetRegisterClass* RC = @@ -268,8 +292,8 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { ++NumStores; } } - MF.getRegInfo().setPhysRegUsed(PhysReg); - MI.getOperand(i).setReg(PhysReg); + MF.getRegInfo().setPhysRegUsed(RReg); + MI.getOperand(i).setReg(RReg); } else { MF.getRegInfo().setPhysRegUsed(MO.getReg()); } @@ -287,68 +311,6 @@ bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { // Local Spiller Implementation //===----------------------------------------------------------------------===// -namespace { - class AvailableSpills; - - /// LocalSpiller - This spiller does a simple pass over the machine basic - /// block to attempt to keep spills in registers as much as possible for - /// blocks that have low register pressure (the vreg may be spilled due to - /// register pressure in other blocks). - class VISIBILITY_HIDDEN LocalSpiller : public Spiller { - MachineRegisterInfo *RegInfo; - const TargetRegisterInfo *TRI; - const TargetInstrInfo *TII; - public: - bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { - RegInfo = &MF.getRegInfo(); - TRI = MF.getTarget().getRegisterInfo(); - TII = MF.getTarget().getInstrInfo(); - DOUT << "\n**** Local spiller rewriting function '" - << MF.getFunction()->getName() << "':\n"; - DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" - " ****\n"; - DEBUG(MF.dump()); - - for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); - MBB != E; ++MBB) - RewriteMBB(*MBB, VRM); - - // Mark unused spill slots. - MachineFrameInfo *MFI = MF.getFrameInfo(); - int SS = VRM.getLowSpillSlot(); - if (SS != VirtRegMap::NO_STACK_SLOT) - for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) - if (!VRM.isSpillSlotUsed(SS)) { - MFI->RemoveStackObject(SS); - ++NumDSS; - } - - DOUT << "**** Post Machine Instrs ****\n"; - DEBUG(MF.dump()); - - return true; - } - private: - bool PrepForUnfoldOpti(MachineBasicBlock &MBB, - MachineBasicBlock::iterator &MII, - std::vector &MaybeDeadStores, - AvailableSpills &Spills, BitVector &RegKills, - std::vector &KillOps, - VirtRegMap &VRM); - void SpillRegToStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator &MII, - int Idx, unsigned PhysReg, int StackSlot, - const TargetRegisterClass *RC, - bool isAvailable, MachineInstr *&LastStore, - AvailableSpills &Spills, - SmallSet &ReMatDefs, - BitVector &RegKills, - std::vector &KillOps, - VirtRegMap &VRM); - void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); - }; -} - /// AvailableSpills - As the local spiller is scanning and rewriting an MBB from /// top down, keep track of which spills slots or remat are available in each /// register. @@ -383,6 +345,12 @@ public: AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) : TRI(tri), TII(tii) { } + + /// clear - Reset the state. + void clear() { + SpillSlotsOrReMatsAvailable.clear(); + PhysRegsAvailable.clear(); + } const TargetRegisterInfo *getRegInfo() const { return TRI; } @@ -401,8 +369,7 @@ public: /// addAvailable - Mark that the specified stack slot / remat is available in /// the specified physreg. If CanClobber is true, the physreg can be modified /// at any time without changing the semantics of the program. - void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg, - bool CanClobber = true) { + void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) { // If this stack slot is thought to be available in some other physreg, // remove its record. ModifyStackSlotOrReMat(SlotOrReMat); @@ -440,6 +407,8 @@ public: /// slot changes. This removes information about which register the previous /// value for this slot lives in (as the previous value is dead now). void ModifyStackSlotOrReMat(int SlotOrReMat); + + void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB); }; } @@ -519,7 +488,148 @@ void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { PhysRegsAvailable.erase(I); } +/// AddAvailableRegsToLiveIn - Availability information is being kept coming +/// into the specified MBB. Add available physical registers as live-in's +/// so register scavenger and post-allocation scheduler are happy. +void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB) { + for (std::multimap::iterator + I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end(); + I != E; ++I) { + unsigned Reg = (*I).first; + const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg); + // FIXME: A temporary workaround. We can't reuse available value if it's + // not safe to move the def of the virtual register's class. e.g. + // X86::RFP* register classes. Do not add it as a live-in. + if (!TII->isSafeToMoveRegClassDefs(RC)) + continue; + if (!MBB.isLiveIn(Reg)) + MBB.addLiveIn(Reg); + } +} + +/// findSinglePredSuccessor - Return via reference a vector of machine basic +/// blocks each of which is a successor of the specified BB and has no other +/// predecessor. +static void findSinglePredSuccessor(MachineBasicBlock *MBB, + SmallVectorImpl &Succs) { + for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), + SE = MBB->succ_end(); SI != SE; ++SI) { + MachineBasicBlock *SuccMBB = *SI; + if (SuccMBB->pred_size() == 1) + Succs.push_back(SuccMBB); + } +} + +namespace { + /// LocalSpiller - This spiller does a simple pass over the machine basic + /// block to attempt to keep spills in registers as much as possible for + /// blocks that have low register pressure (the vreg may be spilled due to + /// register pressure in other blocks). + class VISIBILITY_HIDDEN LocalSpiller : public Spiller { + MachineRegisterInfo *RegInfo; + const TargetRegisterInfo *TRI; + const TargetInstrInfo *TII; + DenseMap DistanceMap; + public: + bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { + RegInfo = &MF.getRegInfo(); + TRI = MF.getTarget().getRegisterInfo(); + TII = MF.getTarget().getInstrInfo(); + DOUT << "\n**** Local spiller rewriting function '" + << MF.getFunction()->getName() << "':\n"; + DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" + " ****\n"; + DEBUG(MF.dump()); + + // Spills - Keep track of which spilled values are available in physregs + // so that we can choose to reuse the physregs instead of emitting + // reloads. This is usually refreshed per basic block. + AvailableSpills Spills(TRI, TII); + + // SingleEntrySuccs - Successor blocks which have a single predecessor. + SmallVector SinglePredSuccs; + SmallPtrSet EarlyVisited; + + // Traverse the basic blocks depth first. + MachineBasicBlock *Entry = MF.begin(); + SmallPtrSet Visited; + for (df_ext_iterator > + DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited); + DFI != E; ++DFI) { + MachineBasicBlock *MBB = *DFI; + if (!EarlyVisited.count(MBB)) + RewriteMBB(*MBB, VRM, Spills); + + // If this MBB is the only predecessor of a successor. Keep the + // availability information and visit it next. + do { + // Keep visiting single predecessor successor as long as possible. + SinglePredSuccs.clear(); + findSinglePredSuccessor(MBB, SinglePredSuccs); + if (SinglePredSuccs.empty()) + MBB = 0; + else { + // FIXME: More than one successors, each of which has MBB has + // the only predecessor. + MBB = SinglePredSuccs[0]; + if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) { + Spills.AddAvailableRegsToLiveIn(*MBB); + RewriteMBB(*MBB, VRM, Spills); + } + } + } while (MBB); + + // Clear the availability info. + Spills.clear(); + } + + DOUT << "**** Post Machine Instrs ****\n"; + DEBUG(MF.dump()); + + // Mark unused spill slots. + MachineFrameInfo *MFI = MF.getFrameInfo(); + int SS = VRM.getLowSpillSlot(); + if (SS != VirtRegMap::NO_STACK_SLOT) + for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) + if (!VRM.isSpillSlotUsed(SS)) { + MFI->RemoveStackObject(SS); + ++NumDSS; + } + return true; + } + private: + void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, + unsigned Reg, BitVector &RegKills, + std::vector &KillOps); + bool PrepForUnfoldOpti(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + std::vector &MaybeDeadStores, + AvailableSpills &Spills, BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM); + bool CommuteToFoldReload(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + unsigned VirtReg, unsigned SrcReg, int SS, + BitVector &RegKills, + std::vector &KillOps, + const TargetRegisterInfo *TRI, + VirtRegMap &VRM); + void SpillRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + int Idx, unsigned PhysReg, int StackSlot, + const TargetRegisterClass *RC, + bool isAvailable, MachineInstr *&LastStore, + AvailableSpills &Spills, + SmallSet &ReMatDefs, + BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM); + void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM, + AvailableSpills &Spills); + }; +} /// InvalidateKills - MI is going to be deleted. If any of its operands are /// marked kill, then invalidate the information. @@ -528,11 +638,14 @@ static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, SmallVector *KillRegs = NULL) { for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (!MO.isRegister() || !MO.isUse() || !MO.isKill()) + if (!MO.isReg() || !MO.isUse() || !MO.isKill()) continue; unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) + continue; if (KillRegs) KillRegs->push_back(Reg); + assert(Reg < KillOps.size()); if (KillOps[Reg] == &MO) { RegKills.reset(Reg); KillOps[Reg] = NULL; @@ -564,7 +677,7 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, MachineOperand *DefOp = NULL; for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { MachineOperand &MO = DefMI->getOperand(i); - if (MO.isRegister() && MO.isDef()) { + if (MO.isReg() && MO.isDef()) { if (MO.getReg() == Reg) DefOp = &MO; else if (!MO.isDead()) @@ -575,13 +688,13 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, return false; bool FoundUse = false, Done = false; - MachineBasicBlock::iterator E = NewDef; + MachineBasicBlock::iterator E = &NewDef; ++I; ++E; for (; !Done && I != E; ++I) { MachineInstr *NMI = I; for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { MachineOperand &MO = NMI->getOperand(j); - if (!MO.isRegister() || MO.getReg() != Reg) + if (!MO.isReg() || MO.getReg() != Reg) continue; if (MO.isUse()) FoundUse = true; @@ -600,17 +713,18 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I, /// marked kill, then it must be due to register reuse. Transfer the kill info /// over. static void UpdateKills(MachineInstr &MI, BitVector &RegKills, - std::vector &KillOps) { + std::vector &KillOps, + const TargetRegisterInfo* TRI) { const TargetInstrDesc &TID = MI.getDesc(); for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (!MO.isRegister() || !MO.isUse()) + if (!MO.isReg() || !MO.isUse()) continue; unsigned Reg = MO.getReg(); if (Reg == 0) continue; - if (RegKills[Reg]) { + if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) { // That can't be right. Register is killed but not re-defined and it's // being reused. Let's fix that. KillOps[Reg]->setIsKill(false); @@ -629,11 +743,16 @@ static void UpdateKills(MachineInstr &MI, BitVector &RegKills, for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI.getOperand(i); - if (!MO.isRegister() || !MO.isDef()) + if (!MO.isReg() || !MO.isDef()) continue; unsigned Reg = MO.getReg(); RegKills.reset(Reg); KillOps[Reg] = NULL; + // It also defines (or partially define) aliases. + for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) { + RegKills.reset(*AS); + KillOps[*AS] = NULL; + } } } @@ -642,13 +761,14 @@ static void UpdateKills(MachineInstr &MI, BitVector &RegKills, static void ReMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MII, unsigned DestReg, unsigned Reg, + const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, VirtRegMap &VRM) { - TRI->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); + TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); MachineInstr *NewMI = prior(MII); for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { MachineOperand &MO = NewMI->getOperand(i); - if (!MO.isRegister() || MO.getReg() == 0) + if (!MO.isReg() || MO.getReg() == 0) continue; unsigned VirtReg = MO.getReg(); if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) @@ -784,7 +904,7 @@ namespace { MachineBasicBlock::iterator MII = MI; if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { - ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TRI, VRM); + ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM); } else { TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg, NewOp.StackSlotOrReMat, AliasRC); @@ -796,12 +916,14 @@ namespace { } Spills.ClobberPhysReg(NewPhysReg); Spills.ClobberPhysReg(NewOp.PhysRegReused); + + unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg(); + unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg; + MI->getOperand(NewOp.Operand).setReg(RReg); - MI->getOperand(NewOp.Operand).setReg(NewPhysReg); - - Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg); + Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg); --MII; - UpdateKills(*MII, RegKills, KillOps); + UpdateKills(*MII, RegKills, KillOps, TRI); DOUT << '\t' << *MII; DOUT << "Reuse undone!\n"; @@ -852,12 +974,12 @@ namespace { /// This enables unfolding optimization for a subsequent instruction which will /// also eliminate the newly introduced store instruction. bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, - MachineBasicBlock::iterator &MII, + MachineBasicBlock::iterator &MII, std::vector &MaybeDeadStores, - AvailableSpills &Spills, - BitVector &RegKills, - std::vector &KillOps, - VirtRegMap &VRM) { + AvailableSpills &Spills, + BitVector &RegKills, + std::vector &KillOps, + VirtRegMap &VRM) { MachineFunction &MF = *MBB.getParent(); MachineInstr &MI = *MII; unsigned UnfoldedOpc = 0; @@ -895,7 +1017,7 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse()) + if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse()) continue; unsigned VirtReg = MO.getReg(); if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) @@ -913,9 +1035,11 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, return false; continue; } - PhysReg = VRM.getPhys(VirtReg); - if (!TRI->regsOverlap(PhysReg, UnfoldPR)) - continue; + if (VRM.hasPhys(VirtReg)) { + PhysReg = VRM.getPhys(VirtReg); + if (!TRI->regsOverlap(PhysReg, UnfoldPR)) + continue; + } // Ok, we'll need to reload the value into a register which makes // it impossible to perform the store unfolding optimization later. @@ -929,7 +1053,7 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, NewMIs.clear(); int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); assert(Idx != -1); - SmallVector Ops; + SmallVector Ops; Ops.push_back(Idx); MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); if (FoldedMI) { @@ -938,16 +1062,105 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); MII = MBB.insert(MII, FoldedMI); + InvalidateKills(MI, RegKills, KillOps); VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); + MF.DeleteMachineInstr(NewMI); return true; } - delete NewMI; + MF.DeleteMachineInstr(NewMI); } } return false; } +/// CommuteToFoldReload - +/// Look for +/// r1 = load fi#1 +/// r1 = op r1, r2 +/// store r1, fi#1 +/// +/// If op is commutable and r2 is killed, then we can xform these to +/// r2 = op r2, fi#1 +/// store r2, fi#1 +bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MII, + unsigned VirtReg, unsigned SrcReg, int SS, + BitVector &RegKills, + std::vector &KillOps, + const TargetRegisterInfo *TRI, + VirtRegMap &VRM) { + if (MII == MBB.begin() || !MII->killsRegister(SrcReg)) + return false; + + MachineFunction &MF = *MBB.getParent(); + MachineInstr &MI = *MII; + MachineBasicBlock::iterator DefMII = prior(MII); + MachineInstr *DefMI = DefMII; + const TargetInstrDesc &TID = DefMI->getDesc(); + unsigned NewDstIdx; + if (DefMII != MBB.begin() && + TID.isCommutable() && + TII->CommuteChangesDestination(DefMI, NewDstIdx)) { + MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); + unsigned NewReg = NewDstMO.getReg(); + if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg)) + return false; + MachineInstr *ReloadMI = prior(DefMII); + int FrameIdx; + unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx); + if (DestReg != SrcReg || FrameIdx != SS) + return false; + int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false); + if (UseIdx == -1) + return false; + int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO); + if (DefIdx == -1) + return false; + assert(DefMI->getOperand(DefIdx).isReg() && + DefMI->getOperand(DefIdx).getReg() == SrcReg); + + // Now commute def instruction. + MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true); + if (!CommutedMI) + return false; + SmallVector Ops; + Ops.push_back(NewDstIdx); + MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS); + // Not needed since foldMemoryOperand returns new MI. + MF.DeleteMachineInstr(CommutedMI); + if (!FoldedMI) + return false; + + VRM.addSpillSlotUse(SS, FoldedMI); + VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); + // Insert new def MI and spill MI. + const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg); + TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC); + MII = prior(MII); + MachineInstr *StoreMI = MII; + VRM.addSpillSlotUse(SS, StoreMI); + VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); + MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack. + + // Delete all 3 old instructions. + InvalidateKills(*ReloadMI, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(ReloadMI); + MBB.erase(ReloadMI); + InvalidateKills(*DefMI, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(DefMI); + MBB.erase(DefMI); + InvalidateKills(MI, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(&MI); + MBB.erase(&MI); + + ++NumCommutes; + return true; + } + + return false; +} + /// findSuperReg - Find the SubReg's super-register of given register class /// where its SubIdx sub-register is SubReg. static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, @@ -1017,21 +1230,62 @@ void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, // in PhysReg. Spills.ModifyStackSlotOrReMat(StackSlot); Spills.ClobberPhysReg(PhysReg); - Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); + Spills.addAvailable(StackSlot, PhysReg, isAvailable); ++NumStores; } +/// TransferDeadness - A identity copy definition is dead and it's being +/// removed. Find the last def or use and mark it as dead / kill. +void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, + unsigned Reg, BitVector &RegKills, + std::vector &KillOps) { + int LastUDDist = -1; + MachineInstr *LastUDMI = NULL; + for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg), + RE = RegInfo->reg_end(); RI != RE; ++RI) { + MachineInstr *UDMI = &*RI; + if (UDMI->getParent() != MBB) + continue; + DenseMap::iterator DI = DistanceMap.find(UDMI); + if (DI == DistanceMap.end() || DI->second > CurDist) + continue; + if ((int)DI->second < LastUDDist) + continue; + LastUDDist = DI->second; + LastUDMI = UDMI; + } + + if (LastUDMI) { + const TargetInstrDesc &TID = LastUDMI->getDesc(); + MachineOperand *LastUD = NULL; + for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = LastUDMI->getOperand(i); + if (!MO.isReg() || MO.getReg() != Reg) + continue; + if (!LastUD || (LastUD->isUse() && MO.isDef())) + LastUD = &MO; + if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) + return; + } + if (LastUD->isDef()) + LastUD->setIsDead(); + else { + LastUD->setIsKill(); + RegKills.set(Reg); + KillOps[Reg] = LastUD; + } + } +} + /// rewriteMBB - Keep track of which spills are available even after the /// register allocator is done with them. If possible, avid reloading vregs. -void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { - DOUT << MBB.getBasicBlock()->getName() << ":\n"; +void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM, + AvailableSpills &Spills) { + DOUT << "\n**** Local spiller rewriting MBB '" + << MBB.getBasicBlock()->getName() << ":\n"; MachineFunction &MF = *MBB.getParent(); - // Spills - Keep track of which spilled values are available in physregs so - // that we can choose to reuse the physregs instead of emitting reloads. - AvailableSpills Spills(TRI, TII); - // MaybeDeadStores - When we need to write a value back into a stack slot, // keep track of the inserted store. If the stack slot value is never read // (because the value was used from some available register, for example), and @@ -1049,6 +1303,8 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { std::vector KillOps; KillOps.resize(TRI->getNumRegs(), NULL); + unsigned Dist = 0; + DistanceMap.clear(); for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); MII != E; ) { MachineBasicBlock::iterator NextMII = MII; ++NextMII; @@ -1097,19 +1353,85 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { continue; // Split interval spilled again. unsigned Phys = VRM.getPhys(VirtReg); RegInfo->setPhysRegUsed(Phys); + + // Check if the value being restored if available. If so, it must be + // from a predecessor BB that fallthrough into this BB. We do not + // expect: + // BB1: + // r1 = load fi#1 + // ... + // = r1 + // ... # r1 not clobbered + // ... + // = load fi#1 + bool DoReMat = VRM.isReMaterialized(VirtReg); + int SSorRMId = DoReMat + ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); + const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); + // FIXME: A temporary workaround. Don't reuse available value if it's + // not safe to move the def of the virtual register's class. e.g. + // X86::RFP* register classes. + unsigned InReg = TII->isSafeToMoveRegClassDefs(RC) ? + Spills.getSpillSlotOrReMatPhysReg(SSorRMId) : 0; + if (InReg == Phys) { + // If the value is already available in the expected register, save + // a reload / remat. + if (SSorRMId) + DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1; + else + DOUT << "Reusing SS#" << SSorRMId; + DOUT << " from physreg " + << TRI->getName(InReg) << " for vreg" + << VirtReg <<" instead of reloading into physreg " + << TRI->getName(Phys) << "\n"; + ++NumOmitted; + continue; + } else if (InReg && InReg != Phys) { + if (SSorRMId) + DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1; + else + DOUT << "Reusing SS#" << SSorRMId; + DOUT << " from physreg " + << TRI->getName(InReg) << " for vreg" + << VirtReg <<" by copying it into physreg " + << TRI->getName(Phys) << "\n"; + + // If the reloaded / remat value is available in another register, + // copy it to the desired register. + TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC); + + // This invalidates Phys. + Spills.ClobberPhysReg(Phys); + // Remember it's available. + Spills.addAvailable(SSorRMId, Phys); + + // Mark is killed. + MachineInstr *CopyMI = prior(MII); + MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg); + KillOpnd->setIsKill(); + UpdateKills(*CopyMI, RegKills, KillOps, TRI); + + DOUT << '\t' << *CopyMI; + ++NumCopified; + continue; + } + if (VRM.isReMaterialized(VirtReg)) { - ReMaterialize(MBB, MII, Phys, VirtReg, TRI, VRM); + ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM); } else { const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); - int SS = VRM.getStackSlot(VirtReg); - TII->loadRegFromStackSlot(MBB, &MI, Phys, SS, RC); + TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC); MachineInstr *LoadMI = prior(MII); - VRM.addSpillSlotUse(SS, LoadMI); + VRM.addSpillSlotUse(SSorRMId, LoadMI); ++NumLoads; } + // This invalidates Phys. Spills.ClobberPhysReg(Phys); - UpdateKills(*prior(MII), RegKills, KillOps); + // Remember it's available. + Spills.addAvailable(SSorRMId, Phys); + + UpdateKills(*prior(MII), RegKills, KillOps, TRI); DOUT << '\t' << *prior(MII); } } @@ -1141,7 +1463,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { SmallVector VirtUseOps; for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (!MO.isRegister() || MO.getReg() == 0) + if (!MO.isReg() || MO.getReg() == 0) continue; // Ignore non-register operands. unsigned VirtReg = MO.getReg(); @@ -1154,12 +1476,15 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // We want to process implicit virtual register uses first. if (MO.isImplicit()) + // If the virtual register is implicitly defined, emit a implicit_def + // before so scavenger knows it's "defined". VirtUseOps.insert(VirtUseOps.begin(), i); else VirtUseOps.push_back(i); } // Process all of the spilled uses and all non spilled reg references. + SmallVector PotentialDeadStoreSlots; for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) { unsigned i = VirtUseOps[j]; MachineOperand &MO = MI.getOperand(i); @@ -1176,6 +1501,9 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { ReusedOperands.markClobbered(Phys); unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; MI.getOperand(i).setReg(RReg); + if (VRM.isImplicitlyDefined(VirtReg)) + BuildMI(MBB, &MI, MI.getDebugLoc(), + TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); continue; } @@ -1217,7 +1545,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { bool CanReuse = true; int ti = TID.getOperandConstraint(i, TOI::TIED_TO); if (ti != -1 && - MI.getOperand(ti).isRegister() && + MI.getOperand(ti).isReg() && MI.getOperand(ti).getReg() == VirtReg) { // Okay, we have a two address operand. We can reuse this physreg as // long as we are allowed to clobber the value and there isn't an @@ -1262,18 +1590,21 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (MI.getOperand(i).isKill() && ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) { - // This was the last use and the spilled value is still available - // for reuse. That means the spill was unnecessary! - MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot]; - if (DeadStore) { - DOUT << "Removed dead store:\t" << *DeadStore; - InvalidateKills(*DeadStore, RegKills, KillOps); - VRM.RemoveMachineInstrFromMaps(DeadStore); - MBB.erase(DeadStore); - MaybeDeadStores[ReuseSlot] = NULL; - ++NumDSE; - } + + // The store of this spilled value is potentially dead, but we + // won't know for certain until we've confirmed that the re-use + // above is valid, which means waiting until the other operands + // are processed. For now we just track the spill slot, we'll + // remove it after the other operands are processed if valid. + + PotentialDeadStoreSlots.push_back(ReuseSlot); } + + // Assumes this is the last use. IsKill will be unset if reg is reused + // unless it's a two-address operand. + if (ti == -1) + MI.getOperand(i).setIsKill(); + continue; } // CanReuse @@ -1322,12 +1653,12 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); MachineInstr *CopyMI = prior(MII); - UpdateKills(*CopyMI, RegKills, KillOps); + UpdateKills(*CopyMI, RegKills, KillOps, TRI); // This invalidates DesignatedReg. Spills.ClobberPhysReg(DesignatedReg); - Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); + Spills.addAvailable(ReuseSlot, DesignatedReg); unsigned RReg = SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; MI.getOperand(i).setReg(RReg); @@ -1351,7 +1682,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { RegInfo->setPhysRegUsed(PhysReg); ReusedOperands.markClobbered(PhysReg); if (DoReMat) { - ReMaterialize(MBB, MII, PhysReg, VirtReg, TRI, VRM); + ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM); } else { const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); @@ -1365,17 +1696,34 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // Any stores to this stack slot are not dead anymore. if (!DoReMat) MaybeDeadStores[SSorRMId] = NULL; - Spills.addAvailable(SSorRMId, &MI, PhysReg); + Spills.addAvailable(SSorRMId, PhysReg); // Assumes this is the last use. IsKill will be unset if reg is reused // unless it's a two-address operand. if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) MI.getOperand(i).setIsKill(); unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; MI.getOperand(i).setReg(RReg); - UpdateKills(*prior(MII), RegKills, KillOps); + UpdateKills(*prior(MII), RegKills, KillOps, TRI); DOUT << '\t' << *prior(MII); } + // Ok - now we can remove stores that have been confirmed dead. + for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) { + // This was the last use and the spilled value is still available + // for reuse. That means the spill was unnecessary! + int PDSSlot = PotentialDeadStoreSlots[j]; + MachineInstr* DeadStore = MaybeDeadStores[PDSSlot]; + if (DeadStore) { + DOUT << "Removed dead store:\t" << *DeadStore; + InvalidateKills(*DeadStore, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(DeadStore); + MBB.erase(DeadStore); + MaybeDeadStores[PDSSlot] = NULL; + ++NumDSE; + } + } + + DOUT << '\t' << MI; @@ -1410,11 +1758,23 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (DestReg != InReg) { const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); + MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg); + unsigned SubIdx = DefMO->getSubReg(); // Revisit the copy so we make sure to notice the effects of the // operation on the destreg (either needing to RA it if it's // virtual or needing to clobber any values if it's physical). NextMII = &MI; --NextMII; // backtrack to the copy. + // Propagate the sub-register index over. + if (SubIdx) { + DefMO = NextMII->findRegisterDefOperand(DestReg); + DefMO->setSubReg(SubIdx); + } + + // Mark is killed. + MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg); + KillOpnd->setIsKill(); + BackTracked = true; } else { DOUT << "Removing now-noop copy: " << MI; @@ -1422,6 +1782,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { InvalidateKill(InReg, RegKills, KillOps); } + InvalidateKills(MI, RegKills, KillOps); VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); Erased = true; @@ -1433,6 +1794,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (PhysReg && TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { MBB.insert(MII, NewMIs[0]); + InvalidateKills(MI, RegKills, KillOps); VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); Erased = true; @@ -1456,20 +1818,26 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // the value and there isn't an earlier def that has already clobbered // the physreg. if (PhysReg && - !TII->isStoreToStackSlot(&MI, SS) && // Not profitable! - DeadStore->killsRegister(PhysReg) && - TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) { - MBB.insert(MII, NewMIs[0]); - NewStore = NewMIs[1]; - MBB.insert(MII, NewStore); - VRM.addSpillSlotUse(SS, NewStore); - VRM.RemoveMachineInstrFromMaps(&MI); - MBB.erase(&MI); - Erased = true; - --NextMII; - --NextMII; // backtrack to the unfolded instruction. - BackTracked = true; - isDead = true; + !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable! + MachineOperand *KillOpnd = + DeadStore->findRegisterUseOperand(PhysReg, true); + // Note, if the store is storing a sub-register, it's possible the + // super-register is needed below. + if (KillOpnd && !KillOpnd->getSubReg() && + TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){ + MBB.insert(MII, NewMIs[0]); + NewStore = NewMIs[1]; + MBB.insert(MII, NewStore); + VRM.addSpillSlotUse(SS, NewStore); + InvalidateKills(MI, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(&MI); + MBB.erase(&MI); + Erased = true; + --NextMII; + --NextMII; // backtrack to the unfolded instruction. + BackTracked = true; + isDead = true; + } } } @@ -1507,15 +1875,23 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && "Src hasn't been allocated yet?"); + + if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot, + RegKills, KillOps, TRI, VRM)) { + NextMII = next(MII); + BackTracked = true; + goto ProcessNextInst; + } + // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark // this as a potentially dead store in case there is a subsequent // store into the stack slot without a read from it. MaybeDeadStores[StackSlot] = &MI; // If the stack slot value was previously available in some other - // register, change it now. Otherwise, make the register available, - // in PhysReg. - Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/); + // register, change it now. Otherwise, make the register + // available in PhysReg. + Spills.addAvailable(StackSlot, SrcReg, false/*!clobber*/); } } } @@ -1524,17 +1900,27 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // Process all of the spilled defs. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - if (!(MO.isRegister() && MO.getReg() && MO.isDef())) + if (!(MO.isReg() && MO.getReg() && MO.isDef())) continue; unsigned VirtReg = MO.getReg(); if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { // Check to see if this is a noop copy. If so, eliminate the // instruction before considering the dest reg to be changed. - unsigned Src, Dst; - if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { + unsigned Src, Dst, SrcSR, DstSR; + if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) { ++NumDCE; DOUT << "Removing now-noop copy: " << MI; + SmallVector KillRegs; + InvalidateKills(MI, RegKills, KillOps, &KillRegs); + if (MO.isDead() && !KillRegs.empty()) { + // Source register or an implicit super/sub-register use is killed. + assert(KillRegs[0] == Dst || + TRI->isSubRegister(KillRegs[0], Dst) || + TRI->isSuperRegister(KillRegs[0], Dst)); + // Last def is now dead. + TransferDeadness(&MBB, Dist, Src, RegKills, KillOps); + } VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); Erased = true; @@ -1555,7 +1941,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // If it is a folded reference, then it's not safe to clobber. bool Folded = FoldedSS.count(FrameIdx); // Otherwise, if it wasn't available, remember that it is now! - Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); + Spills.addAvailable(FrameIdx, DestReg, !Folded); goto ProcessNextInst; } @@ -1593,6 +1979,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { } } + assert(PhysReg && "VR not assigned a physical register?"); RegInfo->setPhysRegUsed(PhysReg); unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; ReusedOperands.markClobbered(RReg); @@ -1607,26 +1994,29 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { // Check to see if this is a noop copy. If so, eliminate the // instruction before considering the dest reg to be changed. { - unsigned Src, Dst; - if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { + unsigned Src, Dst, SrcSR, DstSR; + if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) { ++NumDCE; DOUT << "Removing now-noop copy: " << MI; + InvalidateKills(MI, RegKills, KillOps); VRM.RemoveMachineInstrFromMaps(&MI); MBB.erase(&MI); Erased = true; - UpdateKills(*LastStore, RegKills, KillOps); + UpdateKills(*LastStore, RegKills, KillOps, TRI); goto ProcessNextInst; } } } } ProcessNextInst: + DistanceMap.insert(std::make_pair(&MI, Dist++)); if (!Erased && !BackTracked) { - for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II) - UpdateKills(*II, RegKills, KillOps); + for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II) + UpdateKills(*II, RegKills, KillOps, TRI); } MII = NextMII; } + } llvm::Spiller* llvm::createSpiller() {