X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FInlineSpiller.cpp;h=28235b0f973d36f1689af48991972868f102b11e;hb=150cef218fc3a25046c0f35761806e5a056cd430;hp=2ee67555a6c83e716bc18bc37033d38a3e84b2b9;hpb=331de11a0acc6a095b98914b5f05ff242c9d7819;p=oota-llvm.git diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 2ee67555a6c..28235b0f973 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -12,7 +12,6 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "regalloc" #include "Spiller.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" @@ -21,8 +20,9 @@ #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/LiveRangeEdit.h" #include "llvm/CodeGen/LiveStackAnalysis.h" -#include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" +#include "llvm/CodeGen/MachineBranchProbabilityInfo.h" +#include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -38,6 +38,8 @@ using namespace llvm; +#define DEBUG_TYPE "regalloc" + STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); STATISTIC(NumSnippets, "Number of spilled snippets"); STATISTIC(NumSpills, "Number of spills inserted"); @@ -120,7 +122,7 @@ public: SibValueInfo(unsigned Reg, VNInfo *VNI) : AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false), - SpillReg(Reg), SpillVNI(VNI), SpillMBB(0), DefMI(0) {} + SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {} // Returns true when a def has been found. bool hasDef() const { return DefByOrigPHI || DefMI; } @@ -137,23 +139,18 @@ private: ~InlineSpiller() {} public: - InlineSpiller(MachineFunctionPass &pass, - MachineFunction &mf, - VirtRegMap &vrm) - : MF(mf), - LIS(pass.getAnalysis()), - LSS(pass.getAnalysis()), - AA(&pass.getAnalysis()), - MDT(pass.getAnalysis()), - Loops(pass.getAnalysis()), - VRM(vrm), - MFI(*mf.getFrameInfo()), - MRI(mf.getRegInfo()), - TII(*mf.getTarget().getInstrInfo()), - TRI(*mf.getTarget().getRegisterInfo()), - MBFI(pass.getAnalysis()) {} - - void spill(LiveRangeEdit &); + InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) + : MF(mf), LIS(pass.getAnalysis()), + LSS(pass.getAnalysis()), + AA(&pass.getAnalysis()), + MDT(pass.getAnalysis()), + Loops(pass.getAnalysis()), VRM(vrm), + MFI(*mf.getFrameInfo()), MRI(mf.getRegInfo()), + TII(*mf.getSubtarget().getInstrInfo()), + TRI(*mf.getSubtarget().getRegisterInfo()), + MBFI(pass.getAnalysis()) {} + + void spill(LiveRangeEdit &) override; private: bool isSnippet(const LiveInterval &SnipLI); @@ -166,7 +163,7 @@ private: bool isSibling(unsigned Reg); MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*); - void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = 0); + void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr); void analyzeSiblingValues(); bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI); @@ -178,7 +175,7 @@ private: bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); bool foldMemoryOperand(ArrayRef >, - MachineInstr *LoadMI = 0); + MachineInstr *LoadMI = nullptr); void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI); void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI); @@ -235,12 +232,13 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) return false; - MachineInstr *UseMI = 0; + MachineInstr *UseMI = nullptr; // Check that all uses satisfy our criteria. - for (MachineRegisterInfo::reg_nodbg_iterator - RI = MRI.reg_nodbg_begin(SnipLI.reg); - MachineInstr *MI = RI.skipInstruction();) { + for (MachineRegisterInfo::reg_instr_nodbg_iterator + RI = MRI.reg_instr_nodbg_begin(SnipLI.reg), + E = MRI.reg_instr_nodbg_end(); RI != E; ) { + MachineInstr *MI = &*(RI++); // Allow copies to/from Reg. if (isFullCopyOf(MI, Reg)) @@ -277,8 +275,9 @@ void InlineSpiller::collectRegsToSpill() { if (Original == Reg) return; - for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); - MachineInstr *MI = RI.skipInstruction();) { + for (MachineRegisterInfo::reg_instr_iterator + RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) { + MachineInstr *MI = &*(RI++); unsigned SnipReg = isFullCopyOf(MI, Reg); if (!isSibling(SnipReg)) continue; @@ -364,7 +363,7 @@ void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter, do { SVI = WorkList.pop_back_val(); TinyPtrVector *Deps = VNI ? &FirstDeps : &SVI->second.Deps; - VNI = 0; + VNI = nullptr; SibValueInfo &SV = SVI->second; if (!SV.SpillMBB) @@ -438,7 +437,20 @@ void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter, // Also hoist spills to blocks with smaller loop depth, but make sure // that the new value dominates. Non-phi dependents are always // dominated, phis need checking. + + const BranchProbability MarginProb(4, 5); // 80% + // Hoist a spill to outer loop if there are multiple dependents (it + // can be beneficial if more than one dependents are hoisted) or + // if DepSV (the hoisting source) is hotter than SV (the hoisting + // destination) (we add a 80% margin to bias a little towards + // loop depth). + bool HoistCondition = + (MBFI.getBlockFreq(DepSV.SpillMBB) >= + (MBFI.getBlockFreq(SV.SpillMBB) * MarginProb)) || + Deps->size() > 1; + if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && + HoistCondition && (!DepSVI->first->isPHIDef() || MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { Changed = true; @@ -476,7 +488,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, // Check if a cached value already exists. SibValueMap::iterator SVI; bool Inserted; - tie(SVI, Inserted) = + std::tie(SVI, Inserted) = SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI))); if (!Inserted) { DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':' @@ -495,7 +507,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, do { unsigned Reg; VNInfo *VNI; - tie(Reg, VNI) = WorkList.pop_back_val(); + std::tie(Reg, VNI) = WorkList.pop_back_val(); DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def << ":\t"); @@ -554,7 +566,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) { VNInfo *NonPHI = NonPHIs[i]; // Known value? Try an insertion. - tie(SVI, Inserted) = + std::tie(SVI, Inserted) = SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI))); // Add all the PHIs as dependents of NonPHI. for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi) @@ -578,7 +590,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, if (unsigned SrcReg = isFullCopyOf(MI, Reg)) { if (isSibling(SrcReg)) { LiveInterval &SrcLI = LIS.getInterval(SrcReg); - LiveRangeQuery SrcQ(SrcLI, VNI->def); + LiveQueryResult SrcQ = SrcLI.Query(VNI->def); assert(SrcQ.valueIn() && "Copy from non-existing value"); // Check if this COPY kills its source. SVI->second.KillsSource = SrcQ.isKill(); @@ -587,8 +599,8 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, << SrcVNI->id << '@' << SrcVNI->def << " kill=" << unsigned(SVI->second.KillsSource) << '\n'); // Known sibling source value? Try an insertion. - tie(SVI, Inserted) = SibValues.insert(std::make_pair(SrcVNI, - SibValueInfo(SrcReg, SrcVNI))); + std::tie(SVI, Inserted) = SibValues.insert( + std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI))); // This is the first time we see Src, add it to the worklist. if (Inserted) WorkList.push_back(std::make_pair(SrcReg, SrcVNI)); @@ -643,7 +655,7 @@ void InlineSpiller::analyzeSiblingValues() { VNInfo *VNI = *VI; if (VNI->isUnused()) continue; - MachineInstr *DefMI = 0; + MachineInstr *DefMI = nullptr; if (!VNI->isPHIDef()) { DefMI = LIS.getInstructionFromIndex(VNI->def); assert(DefMI && "No defining instruction"); @@ -745,7 +757,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { do { LiveInterval *LI; - tie(LI, VNI) = WorkList.pop_back_val(); + std::tie(LI, VNI) = WorkList.pop_back_val(); unsigned Reg = LI->reg; DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@' << VNI->def << " in " << *LI << '\n'); @@ -759,8 +771,10 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); // Find all spills and copies of VNI. - for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg); - MachineInstr *MI = UI.skipInstruction();) { + for (MachineRegisterInfo::use_instr_nodbg_iterator + UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); + UI != E; ) { + MachineInstr *MI = &*(UI++); if (!MI->isCopy() && !MI->mayStore()) continue; SlotIndex Idx = LIS.getInstructionIndex(MI); @@ -804,7 +818,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { SmallVector, 8> WorkList; WorkList.push_back(std::make_pair(LI, VNI)); do { - tie(LI, VNI) = WorkList.pop_back_val(); + std::tie(LI, VNI) = WorkList.pop_back_val(); if (!UsedValues.insert(VNI)) continue; @@ -920,10 +934,12 @@ void InlineSpiller::reMaterializeAll() { for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { unsigned Reg = RegsToSpill[i]; LiveInterval &LI = LIS.getInterval(Reg); - for (MachineRegisterInfo::use_nodbg_iterator - RI = MRI.use_nodbg_begin(Reg); - MachineInstr *MI = RI.skipBundle();) + for (MachineRegisterInfo::use_bundle_nodbg_iterator + RI = MRI.use_bundle_nodbg_begin(Reg), E = MRI.use_bundle_nodbg_end(); + RI != E; ) { + MachineInstr *MI = &*(RI++); anyRemat |= reMaterializeFor(LI, MI); + } } if (!anyRemat) return; @@ -1014,7 +1030,7 @@ static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, char NextLine = '\n'; char SlotIndent = '\t'; - if (llvm::next(B) == E) { + if (std::next(B) == E) { NextLine = ' '; SlotIndent = ' '; } @@ -1057,6 +1073,9 @@ foldMemoryOperand(ArrayRef > Ops, bool WasCopy = MI->isCopy(); unsigned ImpReg = 0; + bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::PATCHPOINT || + MI->getOpcode() == TargetOpcode::STACKMAP); + // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied // operands. SmallVector FoldOps; @@ -1068,7 +1087,7 @@ foldMemoryOperand(ArrayRef > Ops, continue; } // FIXME: Teach targets to deal with subregs. - if (MO.getSubReg()) + if (!SpillSubRegs && MO.getSubReg()) return false; // We cannot fold a load instruction into a def. if (LoadMI && MO.isDef()) @@ -1095,21 +1114,20 @@ foldMemoryOperand(ArrayRef > Ops, MRI.isReserved(Reg)) { continue; } + // Skip non-Defs, including undef uses and internal reads. + if (MO->isUse()) + continue; MIBundleOperands::PhysRegInfo RI = MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI); - if (MO->readsReg()) { - assert(RI.Reads && "Cannot fold physreg reader"); - continue; - } if (RI.Defines) continue; // FoldMI does not define this physreg. Remove the LI segment. assert(MO->isDead() && "Cannot fold physreg def"); for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) { - if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) { + if (LiveRange *LR = LIS.getCachedRegUnit(*Units)) { SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); - if (VNInfo *VNI = LI->getVNInfoAt(Idx)) - LI->removeValNo(VNI); + if (VNInfo *VNI = LR->getVNInfoAt(Idx)) + LR->removeValNo(VNI); } } } @@ -1169,12 +1187,12 @@ void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill, MachineBasicBlock &MBB = *MI->getParent(); MachineInstrSpan MIS(MI); - TII.storeRegToStackSlot(MBB, llvm::next(MI), NewVReg, isKill, StackSlot, + TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot, MRI.getRegClass(NewVReg), &TRI); - LIS.InsertMachineInstrRangeInMaps(llvm::next(MI), MIS.end()); + LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end()); - DEBUG(dumpMachineInstrRangeWithSlotIndex(llvm::next(MI), MIS.end(), LIS, + DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS, "spill")); ++NumSpills; } @@ -1185,8 +1203,10 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { LiveInterval &OldLI = LIS.getInterval(Reg); // Iterate over instructions using Reg. - for (MachineRegisterInfo::reg_iterator RegI = MRI.reg_begin(Reg); - MachineInstr *MI = RegI.skipBundle();) { + for (MachineRegisterInfo::reg_bundle_iterator + RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); + RegI != E; ) { + MachineInstr *MI = &*(RegI++); // Debug values are not allowed to affect codegen. if (MI->isDebugValue()) { @@ -1311,8 +1331,10 @@ void InlineSpiller::spillAll() { // Finally delete the SnippetCopies. for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { - for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(RegsToSpill[i]); - MachineInstr *MI = RI.skipInstruction();) { + for (MachineRegisterInfo::reg_instr_iterator + RI = MRI.reg_instr_begin(RegsToSpill[i]), E = MRI.reg_instr_end(); + RI != E; ) { + MachineInstr *MI = &*(RI++); assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); // FIXME: Do this with a LiveRangeEdit callback. LIS.RemoveMachineInstrFromMaps(MI); @@ -1333,11 +1355,11 @@ void InlineSpiller::spill(LiveRangeEdit &edit) { // Share a stack slot among all descendants of Original. Original = VRM.getOriginal(edit.getReg()); StackSlot = VRM.getStackSlot(Original); - StackInt = 0; + StackInt = nullptr; DEBUG(dbgs() << "Inline spilling " << MRI.getRegClass(edit.getReg())->getName() - << ':' << PrintReg(edit.getReg()) << ' ' << edit.getParent() + << ':' << edit.getParent() << "\nFrom original " << PrintReg(Original) << '\n'); assert(edit.getParent().isSpillable() && "Attempting to spill already spilled value.");