X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FVirtRegRewriter.cpp;h=460b508a804b64c28eb1a551ee83b8030a994eca;hb=2d147c440178044aa2af1d9c276f9c4e540fcb8e;hp=69f640ea11fef70ee9dd4414ccda0cc14743cdc6;hpb=c23197a26f34f559ea9797de51e187087c039c42;p=oota-llvm.git diff --git a/lib/CodeGen/VirtRegRewriter.cpp b/lib/CodeGen/VirtRegRewriter.cpp index 69f640ea11f..460b508a804 100644 --- a/lib/CodeGen/VirtRegRewriter.cpp +++ b/lib/CodeGen/VirtRegRewriter.cpp @@ -11,6 +11,8 @@ #include "VirtRegRewriter.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetLowering.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/STLExtras.h" @@ -46,10 +48,14 @@ RewriterOpt("rewriter", clEnumValEnd), cl::init(local)); +cl::opt +ScheduleSpills("schedule-spills", + cl::desc("Schedule spill code"), + cl::init(false)); + VirtRegRewriter::~VirtRegRewriter() {} - /// This class is intended for use with the new spilling framework only. It /// rewrites vreg def/uses to use the assigned preg, but does not insert any /// spill code. @@ -58,7 +64,12 @@ struct VISIBILITY_HIDDEN TrivialRewriter : public VirtRegRewriter { bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM, LiveIntervals* LIs) { DOUT << "********** REWRITE MACHINE CODE **********\n"; - DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; + DEBUG(errs() << "********** Function: " + << MF.getFunction()->getName() << '\n'); + DOUT << "**** Machine Instrs" + << "(NOTE! Does not include spills and reloads!) ****\n"; + DEBUG(MF.dump()); + MachineRegisterInfo *mri = &MF.getRegInfo(); bool changed = false; @@ -80,6 +91,10 @@ struct VISIBILITY_HIDDEN TrivialRewriter : public VirtRegRewriter { } } } + + + DOUT << "**** Post Machine Instrs ****\n"; + DEBUG(MF.dump()); return changed; } @@ -212,6 +227,76 @@ public: // ************************************************************************ // +// Given a location where a reload of a spilled register or a remat of +// a constant is to be inserted, attempt to find a safe location to +// insert the load at an earlier point in the basic-block, to hide +// latency of the load and to avoid address-generation interlock +// issues. +static MachineBasicBlock::iterator +ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc, + MachineBasicBlock::iterator const Begin, + unsigned PhysReg, + const TargetRegisterInfo *TRI, + bool DoReMat, + int SSorRMId, + const TargetInstrInfo *TII, + const MachineFunction &MF) +{ + if (!ScheduleSpills) + return InsertLoc; + + // Spill backscheduling is of primary interest to addresses, so + // don't do anything if the register isn't in the register class + // used for pointers. + + const TargetLowering *TL = MF.getTarget().getTargetLowering(); + + if (!TL->isTypeLegal(TL->getPointerTy())) + // Believe it or not, this is true on PIC16. + return InsertLoc; + + const TargetRegisterClass *ptrRegClass = + TL->getRegClassFor(TL->getPointerTy()); + if (!ptrRegClass->contains(PhysReg)) + return InsertLoc; + + // Scan upwards through the preceding instructions. If an instruction doesn't + // reference the stack slot or the register we're loading, we can + // backschedule the reload up past it. + MachineBasicBlock::iterator NewInsertLoc = InsertLoc; + while (NewInsertLoc != Begin) { + MachineBasicBlock::iterator Prev = prior(NewInsertLoc); + for (unsigned i = 0; i < Prev->getNumOperands(); ++i) { + MachineOperand &Op = Prev->getOperand(i); + if (!DoReMat && Op.isFI() && Op.getIndex() == SSorRMId) + goto stop; + } + if (Prev->findRegisterUseOperandIdx(PhysReg) != -1 || + Prev->findRegisterDefOperand(PhysReg)) + goto stop; + for (const unsigned *Alias = TRI->getAliasSet(PhysReg); *Alias; ++Alias) + if (Prev->findRegisterUseOperandIdx(*Alias) != -1 || + Prev->findRegisterDefOperand(*Alias)) + goto stop; + NewInsertLoc = Prev; + } +stop:; + + // If we made it to the beginning of the block, turn around and move back + // down just past any existing reloads. They're likely to be reloads/remats + // for instructions earlier than what our current reload/remat is for, so + // they should be scheduled earlier. + if (NewInsertLoc == Begin) { + int FrameIdx; + while (InsertLoc != NewInsertLoc && + (TII->isLoadFromStackSlot(NewInsertLoc, FrameIdx) || + TII->isTriviallyReMaterializable(NewInsertLoc))) + ++NewInsertLoc; + } + + return NewInsertLoc; +} + // ReusedOp - For each reused operand, we keep track of a bit of information, // in case we need to rollback upon processing a new operand. See comments // below. @@ -277,7 +362,8 @@ public: /// GetRegForReload - We are about to emit a reload into PhysReg. If there /// is some other operand that is using the specified register, either pick /// a new register to use, or evict the previous reload and use this reg. - unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, + unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg, + MachineFunction &MF, MachineInstr *MI, AvailableSpills &Spills, std::vector &MaybeDeadStores, SmallSet &Rejected, @@ -296,15 +382,17 @@ public: /// sees r1 is taken by t2, tries t2's reload register r0 /// sees r0 is taken by t3, tries t3's reload register r1 /// sees r1 is taken by t2, tries t2's reload register r0 ... - unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, + unsigned GetRegForReload(unsigned VirtReg, unsigned PhysReg, MachineInstr *MI, AvailableSpills &Spills, std::vector &MaybeDeadStores, BitVector &RegKills, std::vector &KillOps, VirtRegMap &VRM) { SmallSet Rejected; - return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected, - RegKills, KillOps, VRM); + MachineFunction &MF = *MI->getParent()->getParent(); + const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg); + return GetRegForReload(RC, PhysReg, MF, MI, Spills, MaybeDeadStores, + Rejected, RegKills, KillOps, VRM); } }; @@ -490,7 +578,14 @@ static void ReMaterialize(MachineBasicBlock &MBB, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, VirtRegMap &VRM) { - TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); + MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg); +#ifndef NDEBUG + const TargetInstrDesc &TID = ReMatDefMI->getDesc(); + assert(TID.getNumDefs() == 1 && + "Don't know how to remat instructions that define > 1 values!"); +#endif + TII->reMaterialize(MBB, MII, DestReg, + ReMatDefMI->getOperand(0).getSubReg(), ReMatDefMI); MachineInstr *NewMI = prior(MII); for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { MachineOperand &MO = NewMI->getOperand(i); @@ -651,15 +746,17 @@ void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { /// GetRegForReload - We are about to emit a reload into PhysReg. If there /// is some other operand that is using the specified register, either pick /// a new register to use, or evict the previous reload and use this reg. -unsigned ReuseInfo::GetRegForReload(unsigned PhysReg, MachineInstr *MI, - AvailableSpills &Spills, +unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC, + unsigned PhysReg, + MachineFunction &MF, + MachineInstr *MI, AvailableSpills &Spills, std::vector &MaybeDeadStores, SmallSet &Rejected, BitVector &RegKills, std::vector &KillOps, VirtRegMap &VRM) { - const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() - .getInstrInfo(); + const TargetInstrInfo* TII = MF.getTarget().getInstrInfo(); + const TargetRegisterInfo *TRI = Spills.getRegInfo(); if (Reuses.empty()) return PhysReg; // This is most often empty. @@ -671,18 +768,18 @@ unsigned ReuseInfo::GetRegForReload(unsigned PhysReg, MachineInstr *MI, // considered and subsequently rejected because it has also been reused // by another operand. if (Op.PhysRegReused == PhysReg && - Rejected.count(Op.AssignedPhysReg) == 0) { + Rejected.count(Op.AssignedPhysReg) == 0 && + RC->contains(Op.AssignedPhysReg)) { // Yup, use the reload register that we didn't use before. unsigned NewReg = Op.AssignedPhysReg; Rejected.insert(PhysReg); - return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected, + return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores, Rejected, RegKills, KillOps, VRM); } else { // Otherwise, we might also have a problem if a previously reused - // value aliases the new register. If so, codegen the previous reload + // value aliases the new register. If so, codegen the previous reload // and use this one. unsigned PRRU = Op.PhysRegReused; - const TargetRegisterInfo *TRI = Spills.getRegInfo(); if (TRI->areAliases(PRRU, PhysReg)) { // Okay, we found out that an alias of a reused register // was used. This isn't good because it means we have @@ -700,17 +797,26 @@ unsigned ReuseInfo::GetRegForReload(unsigned PhysReg, MachineInstr *MI, // slot that we were supposed to in the first place. However, that // register could hold a reuse. Check to see if it conflicts or // would prefer us to use a different register. - unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg, - MI, Spills, MaybeDeadStores, - Rejected, RegKills, KillOps, VRM); - - MachineBasicBlock::iterator MII = MI; - if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { - ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM); - } else { - TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg, + unsigned NewPhysReg = GetRegForReload(RC, NewOp.AssignedPhysReg, + MF, MI, Spills, MaybeDeadStores, + Rejected, RegKills, KillOps, VRM); + + bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT; + int SSorRMId = DoReMat + ? VRM.getReMatId(NewOp.VirtReg) : NewOp.StackSlotOrReMat; + + // Back-schedule reloads and remats. + MachineBasicBlock::iterator InsertLoc = + ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI, + DoReMat, SSorRMId, TII, MF); + + if (DoReMat) { + ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII, + TRI, VRM); + } else { + TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg, NewOp.StackSlotOrReMat, AliasRC); - MachineInstr *LoadMI = prior(MII); + MachineInstr *LoadMI = prior(InsertLoc); VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI); // Any stores to this stack slot are not dead anymore. MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; @@ -725,9 +831,8 @@ unsigned ReuseInfo::GetRegForReload(unsigned PhysReg, MachineInstr *MI, MI->getOperand(NewOp.Operand).setSubReg(0); Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg); - --MII; - UpdateKills(*MII, TRI, RegKills, KillOps); - DOUT << '\t' << *MII; + UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps); + DOUT << '\t' << *prior(InsertLoc); DOUT << "Reuse undone!\n"; --NumReused; @@ -871,8 +976,8 @@ public: TRI = MF.getTarget().getRegisterInfo(); TII = MF.getTarget().getInstrInfo(); AllocatableRegs = TRI->getAllocatableSet(MF); - DOUT << "\n**** Local spiller rewriting function '" - << MF.getFunction()->getName() << "':\n"; + DEBUG(errs() << "\n**** Local spiller rewriting function '" + << MF.getFunction()->getName() << "':\n"); DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" " ****\n"; DEBUG(MF.dump()); @@ -989,6 +1094,10 @@ private: if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM)) return false; + // Back-schedule reloads and remats. + MachineBasicBlock::iterator InsertLoc = + ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, false, SS, TII, MF); + // Load from SS to the spare physical register. TII->loadRegFromStackSlot(MBB, MII, PhysReg, SS, RC); // This invalidates Phys. @@ -1378,9 +1487,7 @@ private: if (LastUD->isDef()) { // If the instruction has no side effect, delete it and propagate // backward further. Otherwise, mark is dead and we are done. - const TargetInstrDesc &TID = LastUDMI->getDesc(); - if (TID.mayStore() || TID.isCall() || TID.isTerminator() || - TID.hasUnmodeledSideEffects()) { + if (!TII->isDeadInstruction(LastUDMI)) { LastUD->setIsDead(); break; } @@ -1402,8 +1509,8 @@ private: AvailableSpills &Spills, BitVector &RegKills, std::vector &KillOps) { - DOUT << "\n**** Local spiller rewriting MBB '" - << MBB.getBasicBlock()->getName() << "':\n"; + DEBUG(errs() << "\n**** Local spiller rewriting MBB '" + << MBB.getBasicBlock()->getName() << "':\n"); MachineFunction &MF = *MBB.getParent(); @@ -1457,8 +1564,15 @@ private: TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC); MachineInstr *StoreMI = prior(MII); VRM.addSpillSlotUse(SS, StoreMI); - TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC); - MachineInstr *LoadMI = next(MII); + + // Back-schedule reloads and remats. + MachineBasicBlock::iterator InsertLoc = + ComputeReloadLoc(next(MII), MBB.begin(), PhysReg, TRI, false, + SS, TII, MF); + + TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SS, RC); + + MachineInstr *LoadMI = prior(InsertLoc); VRM.addSpillSlotUse(SS, LoadMI); ++NumPSpills; } @@ -1515,7 +1629,13 @@ private: // If the reloaded / remat value is available in another register, // copy it to the desired register. - TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC); + + // Back-schedule reloads and remats. + MachineBasicBlock::iterator InsertLoc = + ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat, + SSorRMId, TII, MF); + + TII->copyRegToReg(MBB, InsertLoc, Phys, InReg, RC, RC); // This invalidates Phys. Spills.ClobberPhysReg(Phys); @@ -1523,7 +1643,7 @@ private: Spills.addAvailable(SSorRMId, Phys); // Mark is killed. - MachineInstr *CopyMI = prior(MII); + MachineInstr *CopyMI = prior(InsertLoc); MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg); KillOpnd->setIsKill(); UpdateKills(*CopyMI, TRI, RegKills, KillOps); @@ -1533,12 +1653,17 @@ private: continue; } + // Back-schedule reloads and remats. + MachineBasicBlock::iterator InsertLoc = + ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat, + SSorRMId, TII, MF); + if (VRM.isReMaterialized(VirtReg)) { - ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM); + ReMaterialize(MBB, InsertLoc, Phys, VirtReg, TII, TRI, VRM); } else { const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); - TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC); - MachineInstr *LoadMI = prior(MII); + TII->loadRegFromStackSlot(MBB, InsertLoc, Phys, SSorRMId, RC); + MachineInstr *LoadMI = prior(InsertLoc); VRM.addSpillSlotUse(SSorRMId, LoadMI); ++NumLoads; } @@ -1548,7 +1673,7 @@ private: // Remember it's available. Spills.addAvailable(SSorRMId, Phys); - UpdateKills(*prior(MII), TRI, RegKills, KillOps); + UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps); DOUT << '\t' << *prior(MII); } } @@ -1760,8 +1885,9 @@ private: // available. If this occurs, use the register indicated by the // reuser. if (ReusedOperands.hasReuses()) - DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI, - Spills, MaybeDeadStores, RegKills, KillOps, VRM); + DesignatedReg = ReusedOperands.GetRegForReload(VirtReg, + DesignatedReg, &MI, + Spills, MaybeDeadStores, RegKills, KillOps, VRM); // If the mapped designated register is actually the physreg we have // incoming, we don't need to inserted a dead copy. @@ -1785,9 +1911,15 @@ private: const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); RegInfo->setPhysRegUsed(DesignatedReg); ReusedOperands.markClobbered(DesignatedReg); - TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); - MachineInstr *CopyMI = prior(MII); + // Back-schedule reloads and remats. + MachineBasicBlock::iterator InsertLoc = + ComputeReloadLoc(&MI, MBB.begin(), PhysReg, TRI, DoReMat, + SSorRMId, TII, MF); + + TII->copyRegToReg(MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC); + + MachineInstr *CopyMI = prior(InsertLoc); UpdateKills(*CopyMI, TRI, RegKills, KillOps); // This invalidates DesignatedReg. @@ -1812,20 +1944,25 @@ private: // available. If this occurs, use the register indicated by the // reuser. if (ReusedOperands.hasReuses()) - PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, - Spills, MaybeDeadStores, RegKills, KillOps, VRM); + PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI, + Spills, MaybeDeadStores, RegKills, KillOps, VRM); RegInfo->setPhysRegUsed(PhysReg); ReusedOperands.markClobbered(PhysReg); if (AvoidReload) ++NumAvoided; else { + // Back-schedule reloads and remats. + MachineBasicBlock::iterator InsertLoc = + ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, DoReMat, + SSorRMId, TII, MF); + if (DoReMat) { - ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM); + ReMaterialize(MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, VRM); } else { const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); - TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); - MachineInstr *LoadMI = prior(MII); + TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SSorRMId, RC); + MachineInstr *LoadMI = prior(InsertLoc); VRM.addSpillSlotUse(SSorRMId, LoadMI); ++NumLoads; } @@ -1844,8 +1981,8 @@ private: KilledMIRegs.insert(VirtReg); } - UpdateKills(*prior(MII), TRI, RegKills, KillOps); - DOUT << '\t' << *prior(MII); + UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps); + DOUT << '\t' << *prior(InsertLoc); } unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; MI.getOperand(i).setReg(RReg); @@ -2127,8 +2264,8 @@ private: if (ReusedOperands.isClobbered(PhysReg)) { // Another def has taken the assigned physreg. It must have been a // use&def which got it due to reuse. Undo the reuse! - PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, - Spills, MaybeDeadStores, RegKills, KillOps, VRM); + PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI, + Spills, MaybeDeadStores, RegKills, KillOps, VRM); } } @@ -2163,7 +2300,15 @@ private: } } ProcessNextInst: - DistanceMap.insert(std::make_pair(&MI, Dist++)); + // Delete dead instructions without side effects. + if (!Erased && !BackTracked && TII->isDeadInstruction(&MI)) { + InvalidateKills(MI, TRI, RegKills, KillOps); + VRM.RemoveMachineInstrFromMaps(&MI); + MBB.erase(&MI); + Erased = true; + } + if (!Erased) + DistanceMap.insert(std::make_pair(&MI, Dist++)); if (!Erased && !BackTracked) { for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II) UpdateKills(*II, TRI, RegKills, KillOps);