#define DEBUG_TYPE "regalloc"
#include "AllocationOrder.h"
#include "InterferenceCache.h"
+#include "LiveDebugVariables.h"
#include "LiveRangeEdit.h"
#include "RegAllocBase.h"
#include "Spiller.h"
#include "SpillPlacement.h"
#include "SplitKit.h"
#include "VirtRegMap.h"
+#include "RegisterCoalescer.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
// context
MachineFunction *MF;
- BitVector ReservedRegs;
// analyses
SlotIndexes *Indexes;
MachineLoopRanges *LoopRanges;
EdgeBundles *Bundles;
SpillPlacement *SpillPlacer;
+ LiveDebugVariables *DebugVars;
// state
std::auto_ptr<Spiller> SpillerInstance;
std::priority_queue<std::pair<unsigned, unsigned> > Queue;
+ unsigned NextCascade;
// Live ranges pass through a number of stages as we try to allocate them.
// Some of the stages may also create new live ranges:
RS_New, ///< Never seen before.
RS_First, ///< First time in the queue.
RS_Second, ///< Second time in the queue.
- RS_Region, ///< Produced by region splitting.
- RS_Block, ///< Produced by per-block splitting.
+ RS_Global, ///< Produced by global splitting.
RS_Local, ///< Produced by local splitting.
RS_Spill ///< Produced by spilling.
};
- IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
+ static const char *const StageName[];
+
+ // RegInfo - Keep additional information about each live range.
+ struct RegInfo {
+ LiveRangeStage Stage;
+
+ // Cascade - Eviction loop prevention. See canEvictInterference().
+ unsigned Cascade;
+
+ RegInfo() : Stage(RS_New), Cascade(0) {}
+ };
+
+ IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
LiveRangeStage getStage(const LiveInterval &VirtReg) const {
- return LiveRangeStage(LRStage[VirtReg.reg]);
+ return ExtraRegInfo[VirtReg.reg].Stage;
+ }
+
+ void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
+ ExtraRegInfo.resize(MRI->getNumVirtRegs());
+ ExtraRegInfo[VirtReg.reg].Stage = Stage;
}
template<typename Iterator>
void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
- LRStage.resize(MRI->getNumVirtRegs());
+ ExtraRegInfo.resize(MRI->getNumVirtRegs());
for (;Begin != End; ++Begin) {
unsigned Reg = (*Begin)->reg;
- if (LRStage[Reg] == RS_New)
- LRStage[Reg] = NewStage;
+ if (ExtraRegInfo[Reg].Stage == RS_New)
+ ExtraRegInfo[Reg].Stage = NewStage;
}
}
/// Cached per-block interference maps
InterferenceCache IntfCache;
- /// All basic blocks where the current register is live.
+ /// All basic blocks where the current register has uses.
SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
/// Global live range splitting candidate info.
struct GlobalSplitCandidate {
unsigned PhysReg;
BitVector LiveBundles;
+ SmallVector<unsigned, 8> ActiveBlocks;
+
+ void reset(unsigned Reg) {
+ PhysReg = Reg;
+ LiveBundles.clear();
+ ActiveBlocks.clear();
+ }
};
/// Candidate info for for each PhysReg in AllocationOrder.
/// class.
SmallVector<GlobalSplitCandidate, 32> GlobalCand;
- /// For every instruction in SA->UseSlots, store the previous non-copy
- /// instruction.
- SmallVector<SlotIndex, 8> PrevSlot;
-
public:
RAGreedy();
void LRE_WillShrinkVirtReg(unsigned);
void LRE_DidCloneVirtReg(unsigned, unsigned);
- float calcSplitConstraints(unsigned);
- float calcGlobalSplitCost(const BitVector&);
- void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
+ float calcSpillCost();
+ bool addSplitConstraints(InterferenceCache::Cursor, float&);
+ void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
+ void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor);
+ float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor);
+ void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
SmallVectorImpl<LiveInterval*>&);
void calcGapWeights(unsigned, SmallVectorImpl<float>&);
- SlotIndex getPrevMappedIndex(const MachineInstr*);
- void calcPrevSlots();
- unsigned nextSplitPoint(unsigned);
+ bool canEvict(LiveInterval &A, LiveInterval &B);
bool canEvictInterference(LiveInterval&, unsigned, float&);
+ unsigned tryAssign(LiveInterval&, AllocationOrder&,
+ SmallVectorImpl<LiveInterval*>&);
unsigned tryEvict(LiveInterval&, AllocationOrder&,
- SmallVectorImpl<LiveInterval*>&);
+ SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
char RAGreedy::ID = 0;
+#ifndef NDEBUG
+const char *const RAGreedy::StageName[] = {
+ "RS_New",
+ "RS_First",
+ "RS_Second",
+ "RS_Global",
+ "RS_Local",
+ "RS_Spill"
+};
+#endif
+
+// Hysteresis to use when comparing floats.
+// This helps stabilize decisions based on float comparisons.
+const float Hysteresis = 0.98f;
+
+
FunctionPass* llvm::createGreedyRegisterAllocator() {
return new RAGreedy();
}
-RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
+RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
+ initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
- initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+ initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
AU.addRequired<LiveIntervals>();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
+ AU.addRequired<LiveDebugVariables>();
+ AU.addPreserved<LiveDebugVariables>();
if (StrongPHIElim)
AU.addRequiredID(StrongPHIEliminationID);
AU.addRequiredTransitive<RegisterCoalescer>();
// LRE may clone a virtual register because dead code elimination causes it to
// be split into connected components. Ensure that the new register gets the
// same stage as the parent.
- LRStage.grow(New);
- LRStage[New] = LRStage[Old];
+ ExtraRegInfo.grow(New);
+ ExtraRegInfo[New] = ExtraRegInfo[Old];
}
void RAGreedy::releaseMemory() {
SpillerInstance.reset(0);
- LRStage.clear();
+ ExtraRegInfo.clear();
+ GlobalCand.clear();
RegAllocBase::releaseMemory();
}
"Can only enqueue virtual registers");
unsigned Prio;
- LRStage.grow(Reg);
- if (LRStage[Reg] == RS_New)
- LRStage[Reg] = RS_First;
+ ExtraRegInfo.grow(Reg);
+ if (ExtraRegInfo[Reg].Stage == RS_New)
+ ExtraRegInfo[Reg].Stage = RS_First;
- if (LRStage[Reg] == RS_Second)
+ if (ExtraRegInfo[Reg].Stage == RS_Second)
// Unsplit ranges that couldn't be allocated immediately are deferred until
// everything else has been allocated. Long ranges are allocated last so
// they are split against realistic interference.
return LI;
}
+
+//===----------------------------------------------------------------------===//
+// Direct Assignment
+//===----------------------------------------------------------------------===//
+
+/// tryAssign - Try to assign VirtReg to an available register.
+unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ Order.rewind();
+ unsigned PhysReg;
+ while ((PhysReg = Order.next()))
+ if (!checkPhysRegInterference(VirtReg, PhysReg))
+ break;
+ if (!PhysReg || Order.isHint(PhysReg))
+ return PhysReg;
+
+ // PhysReg is available. Try to evict interference from a cheaper alternative.
+ unsigned Cost = TRI->getCostPerUse(PhysReg);
+
+ // Most registers have 0 additional cost.
+ if (!Cost)
+ return PhysReg;
+
+ DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
+ << '\n');
+ unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
+ return CheapReg ? CheapReg : PhysReg;
+}
+
+
//===----------------------------------------------------------------------===//
// Interference eviction
//===----------------------------------------------------------------------===//
+/// canEvict - determine if A can evict the assigned live range B. The eviction
+/// policy defined by this function together with the allocation order defined
+/// by enqueue() decides which registers ultimately end up being split and
+/// spilled.
+///
+/// Cascade numbers are used to prevent infinite loops if this function is a
+/// cyclic relation.
+bool RAGreedy::canEvict(LiveInterval &A, LiveInterval &B) {
+ return A.weight > B.weight;
+}
+
/// canEvict - Return true if all interferences between VirtReg and PhysReg can
-/// be evicted. Set maxWeight to the maximal spill weight of an interference.
+/// be evicted.
+/// Return false if any interference is heavier than MaxWeight.
+/// On return, set MaxWeight to the maximal spill weight of an interference.
bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
float &MaxWeight) {
+ // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
+ // involved in an eviction before. If a cascade number was assigned, deny
+ // evicting anything with the same or a newer cascade number. This prevents
+ // infinite eviction loops.
+ //
+ // This works out so a register without a cascade number is allowed to evict
+ // anything, and it can be evicted by anything.
+ unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
+ if (!Cascade)
+ Cascade = NextCascade;
+
float Weight = 0;
for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
- // If there is 10 or more interferences, chances are one is smaller.
- if (Q.collectInterferingVRegs(10) >= 10)
+ // If there is 10 or more interferences, chances are one is heavier.
+ if (Q.collectInterferingVRegs(10, MaxWeight) >= 10)
return false;
- // Check if any interfering live range is heavier than VirtReg.
- for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
- LiveInterval *Intf = Q.interferingVRegs()[i];
+ // Check if any interfering live range is heavier than MaxWeight.
+ for (unsigned i = Q.interferingVRegs().size(); i; --i) {
+ LiveInterval *Intf = Q.interferingVRegs()[i - 1];
if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
return false;
- if (Intf->weight >= VirtReg.weight)
+ if (Cascade <= ExtraRegInfo[Intf->reg].Cascade)
+ return false;
+ if (Intf->weight >= MaxWeight)
+ return false;
+ if (!canEvict(VirtReg, *Intf))
return false;
Weight = std::max(Weight, Intf->weight);
}
/// @return Physreg to assign VirtReg, or 0.
unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
AllocationOrder &Order,
- SmallVectorImpl<LiveInterval*> &NewVRegs){
+ SmallVectorImpl<LiveInterval*> &NewVRegs,
+ unsigned CostPerUseLimit) {
NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
// Keep track of the lightest single interference seen so far.
- float BestWeight = 0;
+ float BestWeight = HUGE_VALF;
unsigned BestPhys = 0;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
- float Weight = 0;
+ if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
+ continue;
+ // The first use of a register in a function has cost 1.
+ if (CostPerUseLimit == 1 && !MRI->isPhysRegUsed(PhysReg))
+ continue;
+
+ float Weight = BestWeight;
if (!canEvictInterference(VirtReg, PhysReg, Weight))
continue;
// This is an eviction candidate.
- DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = "
+ DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = "
<< Weight << '\n');
if (BestPhys && Weight >= BestWeight)
continue;
if (!BestPhys)
return 0;
- DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
+ // We will evict interference. Make sure that VirtReg has a cascade number,
+ // and assign that cascade number to every evicted register. These live
+ // ranges than then only be evicted by a newer cascade, preventing infinite
+ // loops.
+ unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
+ if (!Cascade)
+ Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
+
+ DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI)
+ << " interference: Cascade " << Cascade << '\n');
for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
LiveInterval *Intf = Q.interferingVRegs()[i];
unassign(*Intf, VRM->getPhys(Intf->reg));
+ assert(ExtraRegInfo[Intf->reg].Cascade < Cascade &&
+ "Cannot decrease cascade number, illegal eviction");
+ ExtraRegInfo[Intf->reg].Cascade = Cascade;
++NumEvicted;
NewVRegs.push_back(Intf);
}
// Region Splitting
//===----------------------------------------------------------------------===//
-/// calcSplitConstraints - Fill out the SplitConstraints vector based on the
-/// interference pattern in Physreg and its aliases. Return the static cost of
-/// this split, assuming that all preferences in SplitConstraints are met.
-float RAGreedy::calcSplitConstraints(unsigned PhysReg) {
- InterferenceCache::Cursor Intf(IntfCache, PhysReg);
+/// addSplitConstraints - Fill out the SplitConstraints vector based on the
+/// interference pattern in Physreg and its aliases. Add the constraints to
+/// SpillPlacement and return the static cost of this split in Cost, assuming
+/// that all preferences in SplitConstraints are met.
+/// Return false if there are no bundles with positive bias.
+bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
+ float &Cost) {
+ ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
// Reset interference dependent info.
- SplitConstraints.resize(SA->LiveBlocks.size());
+ SplitConstraints.resize(UseBlocks.size());
float StaticCost = 0;
- for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
- SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ for (unsigned i = 0; i != UseBlocks.size(); ++i) {
+ const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
BC.Number = BI.MBB->getNumber();
Intf.moveToBlock(BC.Number);
- BC.Entry = (BI.Uses && BI.LiveIn) ?
- SpillPlacement::PrefReg : SpillPlacement::DontCare;
- BC.Exit = (BI.Uses && BI.LiveOut) ?
- SpillPlacement::PrefReg : SpillPlacement::DontCare;
+ BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
+ BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
if (!Intf.hasInterference())
continue;
// Interference for the live-in value.
if (BI.LiveIn) {
if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
- BC.Entry = SpillPlacement::MustSpill, Ins += BI.Uses;
- else if (!BI.Uses)
- BC.Entry = SpillPlacement::PrefSpill;
+ BC.Entry = SpillPlacement::MustSpill, ++Ins;
else if (Intf.first() < BI.FirstUse)
BC.Entry = SpillPlacement::PrefSpill, ++Ins;
- else if (Intf.first() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
+ else if (Intf.first() < BI.LastUse)
++Ins;
}
// Interference for the live-out value.
if (BI.LiveOut) {
if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
- BC.Exit = SpillPlacement::MustSpill, Ins += BI.Uses;
- else if (!BI.Uses)
- BC.Exit = SpillPlacement::PrefSpill;
+ BC.Exit = SpillPlacement::MustSpill, ++Ins;
else if (Intf.last() > BI.LastUse)
BC.Exit = SpillPlacement::PrefSpill, ++Ins;
- else if (Intf.last() > (BI.LiveThrough ? BI.FirstUse : BI.Def))
+ else if (Intf.last() > BI.FirstUse)
++Ins;
}
if (Ins)
StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
}
- return StaticCost;
+ Cost = StaticCost;
+
+ // Add constraints for use-blocks. Note that these are the only constraints
+ // that may add a positive bias, it is downhill from here.
+ SpillPlacer->addConstraints(SplitConstraints);
+ return SpillPlacer->scanActiveBundles();
+}
+
+
+/// addThroughConstraints - Add constraints and links to SpillPlacer from the
+/// live-through blocks in Blocks.
+void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
+ ArrayRef<unsigned> Blocks) {
+ const unsigned GroupSize = 8;
+ SpillPlacement::BlockConstraint BCS[GroupSize];
+ unsigned TBS[GroupSize];
+ unsigned B = 0, T = 0;
+
+ for (unsigned i = 0; i != Blocks.size(); ++i) {
+ unsigned Number = Blocks[i];
+ Intf.moveToBlock(Number);
+
+ if (!Intf.hasInterference()) {
+ assert(T < GroupSize && "Array overflow");
+ TBS[T] = Number;
+ if (++T == GroupSize) {
+ SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
+ T = 0;
+ }
+ continue;
+ }
+
+ assert(B < GroupSize && "Array overflow");
+ BCS[B].Number = Number;
+
+ // Interference for the live-in value.
+ if (Intf.first() <= Indexes->getMBBStartIdx(Number))
+ BCS[B].Entry = SpillPlacement::MustSpill;
+ else
+ BCS[B].Entry = SpillPlacement::PrefSpill;
+
+ // Interference for the live-out value.
+ if (Intf.last() >= SA->getLastSplitPoint(Number))
+ BCS[B].Exit = SpillPlacement::MustSpill;
+ else
+ BCS[B].Exit = SpillPlacement::PrefSpill;
+
+ if (++B == GroupSize) {
+ ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
+ SpillPlacer->addConstraints(Array);
+ B = 0;
+ }
+ }
+
+ ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
+ SpillPlacer->addConstraints(Array);
+ SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
+}
+
+void RAGreedy::growRegion(GlobalSplitCandidate &Cand,
+ InterferenceCache::Cursor Intf) {
+ // Keep track of through blocks that have not been added to SpillPlacer.
+ BitVector Todo = SA->getThroughBlocks();
+ SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
+ unsigned AddedTo = 0;
+#ifndef NDEBUG
+ unsigned Visited = 0;
+#endif
+
+ for (;;) {
+ ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
+ if (NewBundles.empty())
+ break;
+ // Find new through blocks in the periphery of PrefRegBundles.
+ for (int i = 0, e = NewBundles.size(); i != e; ++i) {
+ unsigned Bundle = NewBundles[i];
+ // Look at all blocks connected to Bundle in the full graph.
+ ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
+ for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
+ I != E; ++I) {
+ unsigned Block = *I;
+ if (!Todo.test(Block))
+ continue;
+ Todo.reset(Block);
+ // This is a new through block. Add it to SpillPlacer later.
+ ActiveBlocks.push_back(Block);
+#ifndef NDEBUG
+ ++Visited;
+#endif
+ }
+ }
+ // Any new blocks to add?
+ if (ActiveBlocks.size() > AddedTo) {
+ ArrayRef<unsigned> Add(&ActiveBlocks[AddedTo],
+ ActiveBlocks.size() - AddedTo);
+ addThroughConstraints(Intf, Add);
+ AddedTo = ActiveBlocks.size();
+ }
+ // Perhaps iterating can enable more bundles?
+ SpillPlacer->iterate();
+ }
+ DEBUG(dbgs() << ", v=" << Visited);
}
+/// calcSpillCost - Compute how expensive it would be to split the live range in
+/// SA around all use blocks instead of forming bundle regions.
+float RAGreedy::calcSpillCost() {
+ float Cost = 0;
+ const LiveInterval &LI = SA->getParent();
+ ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
+ for (unsigned i = 0; i != UseBlocks.size(); ++i) {
+ const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
+ unsigned Number = BI.MBB->getNumber();
+ // We normally only need one spill instruction - a load or a store.
+ Cost += SpillPlacer->getBlockFrequency(Number);
+
+ // Unless the value is redefined in the block.
+ if (BI.LiveIn && BI.LiveOut) {
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = Indexes->getMBBRange(Number);
+ LiveInterval::const_iterator I = LI.find(Start);
+ assert(I != LI.end() && "Expected live-in value");
+ // Is there a different live-out value? If so, we need an extra spill
+ // instruction.
+ if (I->end < Stop)
+ Cost += SpillPlacer->getBlockFrequency(Number);
+ }
+ }
+ return Cost;
+}
/// calcGlobalSplitCost - Return the global split cost of following the split
/// pattern in LiveBundles. This cost should be added to the local cost of the
/// interference pattern in SplitConstraints.
///
-float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) {
+float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
+ InterferenceCache::Cursor Intf) {
float GlobalCost = 0;
- for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
- SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ const BitVector &LiveBundles = Cand.LiveBundles;
+ ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
+ for (unsigned i = 0; i != UseBlocks.size(); ++i) {
+ const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
unsigned Ins = 0;
- if (!BI.Uses)
- Ins += RegIn != RegOut;
- else {
- if (BI.LiveIn)
- Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
- if (BI.LiveOut)
- Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
- }
+ if (BI.LiveIn)
+ Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
+ if (BI.LiveOut)
+ Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
if (Ins)
GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
}
+
+ for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
+ unsigned Number = Cand.ActiveBlocks[i];
+ bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
+ bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
+ if (!RegIn && !RegOut)
+ continue;
+ if (RegIn && RegOut) {
+ // We need double spill code if this block has interference.
+ Intf.moveToBlock(Number);
+ if (Intf.hasInterference())
+ GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
+ continue;
+ }
+ // live-in / stack-out or stack-in live-out.
+ GlobalCost += SpillPlacer->getBlockFrequency(Number);
+ }
return GlobalCost;
}
/// avoiding interference. The 'stack' interval is the complement constructed by
/// SplitEditor. It will contain the rest.
///
-void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
- const BitVector &LiveBundles,
+void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
+ GlobalSplitCandidate &Cand,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ const BitVector &LiveBundles = Cand.LiveBundles;
+
DEBUG({
- dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
+ dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
<< " with bundles";
for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
dbgs() << " EB#" << i;
dbgs() << ".\n";
});
- InterferenceCache::Cursor Intf(IntfCache, PhysReg);
+ InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg);
LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
SE->reset(LREdit);
// Create the main cross-block interval.
- SE->openIntv();
-
- // First add all defs that are live out of a block.
- for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
- SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
- bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
- bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
-
- // Should the register be live out?
- if (!BI.LiveOut || !RegOut)
+ const unsigned MainIntv = SE->openIntv();
+
+ // First handle all the blocks with uses.
+ ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
+ for (unsigned i = 0; i != UseBlocks.size(); ++i) {
+ const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
+ bool RegIn = BI.LiveIn &&
+ LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
+ bool RegOut = BI.LiveOut &&
+ LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
+
+ // Create separate intervals for isolated blocks with multiple uses.
+ //
+ // |---o---o---| Enter and leave on the stack.
+ // ____-----____ Create local interval for uses.
+ //
+ // | o---o---| Defined in block, leave on stack.
+ // -----____ Create local interval for uses.
+ //
+ // |---o---x | Enter on stack, killed in block.
+ // ____----- Create local interval for uses.
+ //
+ if (!RegIn && !RegOut) {
+ DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
+ if (!BI.isOneInstr()) {
+ SE->splitSingleBlock(BI);
+ SE->selectIntv(MainIntv);
+ }
continue;
+ }
SlotIndex Start, Stop;
tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
Intf.moveToBlock(BI.MBB->getNumber());
- DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
- << Bundles->getBundle(BI.MBB->getNumber(), 1)
+ DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
+ << (BI.LiveIn ? (RegIn ? " => " : " -> ") : " ")
+ << "BB#" << BI.MBB->getNumber()
+ << (BI.LiveOut ? (RegOut ? " => " : " -> ") : " ")
+ << " EB#" << Bundles->getBundle(BI.MBB->getNumber(), 1)
<< " [" << Start << ';'
<< SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
+ << ") uses [" << BI.FirstUse << ';' << BI.LastUse
<< ") intf [" << Intf.first() << ';' << Intf.last() << ')');
// The interference interval should either be invalid or overlap MBB.
assert((!Intf.hasInterference() || Intf.last() > Start)
&& "Bad interference");
- // Check interference leaving the block.
+ // We are now ready to decide where to split in the current block. There
+ // are many variables guiding the decision:
+ //
+ // - RegIn / RegOut: The global splitting algorithm's decisions for our
+ // ingoing and outgoing bundles.
+ //
+ // - BI.BlockIn / BI.BlockOut: Is the live range live-in and/or live-out
+ // from this block.
+ //
+ // - Intf.hasInterference(): Is there interference in this block.
+ //
+ // - Intf.first() / Inft.last(): The range of interference.
+ //
+ // The live range should be split such that MainIntv is live-in when RegIn
+ // is set, and live-out when RegOut is set. MainIntv should never overlap
+ // the interference, and the stack interval should never have more than one
+ // use per block.
+
+ // No splits can be inserted after LastSplitPoint, overlap instead.
+ SlotIndex LastSplitPoint = Stop;
+ if (BI.LiveOut)
+ LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
+
+ // At this point, we know that either RegIn or RegOut is set. We dealt with
+ // the all-stack case above.
+
+ // Blocks without interference are relatively easy.
if (!Intf.hasInterference()) {
- // Block is interference-free.
- DEBUG(dbgs() << ", no interference");
- if (!BI.Uses) {
- assert(BI.LiveThrough && "No uses, but not live through block?");
- // Block is live-through without interference.
- DEBUG(dbgs() << ", no uses"
- << (RegIn ? ", live-through.\n" : ", stack in.\n"));
- if (!RegIn)
- SE->enterIntvAtEnd(*BI.MBB);
- continue;
- }
- if (!BI.LiveThrough) {
- DEBUG(dbgs() << ", not live-through.\n");
- SE->useIntv(SE->enterIntvBefore(BI.Def), Stop);
- continue;
- }
- if (!RegIn) {
- // Block is live-through, but entry bundle is on the stack.
- // Reload just before the first use.
- DEBUG(dbgs() << ", not live-in, enter before first use.\n");
- SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
- continue;
+ DEBUG(dbgs() << ", no interference.\n");
+ SE->selectIntv(MainIntv);
+ // The easiest case has MainIntv live through.
+ //
+ // |---o---o---| Live-in, live-out.
+ // ============= Use MainIntv everywhere.
+ //
+ SlotIndex From = Start, To = Stop;
+
+ // Block entry. Reload before the first use if MainIntv is not live-in.
+ //
+ // |---o-- Enter on stack.
+ // ____=== Reload before first use.
+ //
+ // | o-- Defined in block.
+ // === Use MainIntv from def.
+ //
+ if (!RegIn)
+ From = SE->enterIntvBefore(BI.FirstUse);
+
+ // Block exit. Handle cases where MainIntv is not live-out.
+ if (!BI.LiveOut)
+ //
+ // --x | Killed in block.
+ // === Use MainIntv up to kill.
+ //
+ To = SE->leaveIntvAfter(BI.LastUse);
+ else if (!RegOut) {
+ //
+ // --o---| Live-out on stack.
+ // ===____ Use MainIntv up to last use, switch to stack.
+ //
+ // -----o| Live-out on stack, last use after last split point.
+ // ====== Extend MainIntv to last use, overlapping.
+ // \____ Copy to stack interval before last split point.
+ //
+ if (BI.LastUse < LastSplitPoint)
+ To = SE->leaveIntvAfter(BI.LastUse);
+ else {
+ // The last use is after the last split point, it is probably an
+ // indirect branch.
+ To = SE->leaveIntvBefore(LastSplitPoint);
+ // Run a double interval from the split to the last use. This makes
+ // it possible to spill the complement without affecting the indirect
+ // branch.
+ SE->overlapIntv(To, BI.LastUse);
+ }
}
- DEBUG(dbgs() << ", live-through.\n");
+
+ // Paint in MainIntv liveness for this block.
+ SE->useIntv(From, To);
continue;
}
- // Block has interference.
- DEBUG(dbgs() << ", interference to " << Intf.last());
+ // We are now looking at a block with interference, and we know that either
+ // RegIn or RegOut is set.
+ assert(Intf.hasInterference() && (RegIn || RegOut) && "Bad invariant");
+
+ // If the live range is not live through the block, it is possible that the
+ // interference doesn't even overlap. Deal with those cases first. Since
+ // no copy instructions are required, we can tolerate interference starting
+ // or ending at the same instruction that kills or defines our live range.
- if (!BI.LiveThrough && Intf.last() <= BI.Def) {
- // The interference doesn't reach the outgoing segment.
- DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
- SE->useIntv(BI.Def, Stop);
+ // Live-in, killed before interference.
+ //
+ // ~~~ Interference after kill.
+ // |---o---x | Killed in block.
+ // ========= Use MainIntv everywhere.
+ //
+ if (RegIn && !BI.LiveOut && BI.LastUse <= Intf.first()) {
+ DEBUG(dbgs() << ", live-in, killed before interference.\n");
+ SE->selectIntv(MainIntv);
+ SlotIndex To = SE->leaveIntvAfter(BI.LastUse);
+ SE->useIntv(Start, To);
continue;
}
-
- if (!BI.Uses) {
- // No uses in block, avoid interference by reloading as late as possible.
- DEBUG(dbgs() << ", no uses.\n");
- SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
- assert(SegStart >= Intf.last() && "Couldn't avoid interference");
+ // Live-out, defined after interference.
+ //
+ // ~~~ Interference before def.
+ // | o---o---| Defined in block.
+ // ========= Use MainIntv everywhere.
+ //
+ if (RegOut && !BI.LiveIn && BI.FirstUse >= Intf.last()) {
+ DEBUG(dbgs() << ", live-out, defined after interference.\n");
+ SE->selectIntv(MainIntv);
+ SlotIndex From = SE->enterIntvBefore(BI.FirstUse);
+ SE->useIntv(From, Stop);
continue;
}
- SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
- if (Intf.last().getBoundaryIndex() < BI.LastUse) {
- // There are interference-free uses at the end of the block.
- // Find the first use that can get the live-out register.
- SmallVectorImpl<SlotIndex>::const_iterator UI =
- std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
- Intf.last().getBoundaryIndex());
- assert(UI != SA->UseSlots.end() && "Couldn't find last use");
- SlotIndex Use = *UI;
- assert(Use <= BI.LastUse && "Couldn't find last use");
- // Only attempt a split befroe the last split point.
- if (Use.getBaseIndex() <= LastSplitPoint) {
- DEBUG(dbgs() << ", free use at " << Use << ".\n");
- SlotIndex SegStart = SE->enterIntvBefore(Use);
- assert(SegStart >= Intf.last() && "Couldn't avoid interference");
- assert(SegStart < LastSplitPoint && "Impossible split point");
- SE->useIntv(SegStart, Stop);
- continue;
+ // The interference is now known to overlap the live range, but it may
+ // still be easy to avoid if all the interference is on one side of the
+ // uses, and we enter or leave on the stack.
+
+ // Live-out on stack, interference after last use.
+ //
+ // ~~~ Interference after last use.
+ // |---o---o---| Live-out on stack.
+ // =========____ Leave MainIntv after last use.
+ //
+ // ~ Interference after last use.
+ // |---o---o--o| Live-out on stack, late last use.
+ // =========____ Copy to stack after LSP, overlap MainIntv.
+ //
+ if (!RegOut && Intf.first() > BI.LastUse.getBoundaryIndex()) {
+ assert(RegIn && "Stack-in, stack-out should already be handled");
+ if (BI.LastUse < LastSplitPoint) {
+ DEBUG(dbgs() << ", live-in, stack-out, interference after last use.\n");
+ SE->selectIntv(MainIntv);
+ SlotIndex To = SE->leaveIntvAfter(BI.LastUse);
+ assert(To <= Intf.first() && "Expected to avoid interference");
+ SE->useIntv(Start, To);
+ } else {
+ DEBUG(dbgs() << ", live-in, stack-out, avoid last split point\n");
+ SE->selectIntv(MainIntv);
+ SlotIndex To = SE->leaveIntvBefore(LastSplitPoint);
+ assert(To <= Intf.first() && "Expected to avoid interference");
+ SE->overlapIntv(To, BI.LastUse);
+ SE->useIntv(Start, To);
}
+ continue;
}
- // Interference is after the last use.
- DEBUG(dbgs() << " after last use.\n");
- SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
- assert(SegStart >= Intf.last() && "Couldn't avoid interference");
- }
-
- // Now all defs leading to live bundles are handled, do everything else.
- for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
- SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
- bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
- bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
-
- // Is the register live-in?
- if (!BI.LiveIn || !RegIn)
+ // Live-in on stack, interference before first use.
+ //
+ // ~~~ Interference before first use.
+ // |---o---o---| Live-in on stack.
+ // ____========= Enter MainIntv before first use.
+ //
+ if (!RegIn && Intf.last() < BI.FirstUse.getBaseIndex()) {
+ assert(RegOut && "Stack-in, stack-out should already be handled");
+ DEBUG(dbgs() << ", stack-in, interference before first use.\n");
+ SE->selectIntv(MainIntv);
+ SlotIndex From = SE->enterIntvBefore(BI.FirstUse);
+ assert(From >= Intf.last() && "Expected to avoid interference");
+ SE->useIntv(From, Stop);
continue;
+ }
- // We have an incoming register. Check for interference.
- SlotIndex Start, Stop;
- tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
- Intf.moveToBlock(BI.MBB->getNumber());
- DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
- << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
- << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
- << ')');
-
- // Check interference entering the block.
- if (!Intf.hasInterference()) {
- // Block is interference-free.
- DEBUG(dbgs() << ", no interference");
- if (!BI.Uses) {
- assert(BI.LiveThrough && "No uses, but not live through block?");
- // Block is live-through without interference.
- if (RegOut) {
- DEBUG(dbgs() << ", no uses, live-through.\n");
- SE->useIntv(Start, Stop);
- } else {
- DEBUG(dbgs() << ", no uses, stack-out.\n");
- SE->leaveIntvAtTop(*BI.MBB);
- }
- continue;
- }
- if (!BI.LiveThrough) {
- DEBUG(dbgs() << ", killed in block.\n");
- SE->useIntv(Start, SE->leaveIntvAfter(BI.Kill));
- continue;
+ // The interference is overlapping somewhere we wanted to use MainIntv. That
+ // means we need to create a local interval that can be allocated a
+ // different register.
+ DEBUG(dbgs() << ", creating local interval.\n");
+ unsigned LocalIntv = SE->openIntv();
+
+ // We may be creating copies directly between MainIntv and LocalIntv,
+ // bypassing the stack interval. When we do that, we should never use the
+ // leaveIntv* methods as they define values in the stack interval. By
+ // starting from the end of the block and working our way backwards, we can
+ // get by with only enterIntv* methods.
+ //
+ // When selecting split points, we generally try to maximize the stack
+ // interval as long at it contains no uses, maximize the main interval as
+ // long as it doesn't overlap interference, and minimize the local interval
+ // that we don't know how to allocate yet.
+
+ // Handle the block exit, set Pos to the first handled slot.
+ SlotIndex Pos = BI.LastUse;
+ if (RegOut) {
+ assert(Intf.last() < LastSplitPoint && "Cannot be live-out in register");
+ // Create a snippet of MainIntv that is live-out.
+ //
+ // ~~~ Interference overlapping uses.
+ // --o---| Live-out in MainIntv.
+ // ----=== Switch from LocalIntv to MainIntv after interference.
+ //
+ SE->selectIntv(MainIntv);
+ Pos = SE->enterIntvAfter(Intf.last());
+ assert(Pos >= Intf.last() && "Expected to avoid interference");
+ SE->useIntv(Pos, Stop);
+ SE->selectIntv(LocalIntv);
+ } else if (BI.LiveOut) {
+ if (BI.LastUse < LastSplitPoint) {
+ // Live-out on the stack.
+ //
+ // ~~~ Interference overlapping uses.
+ // --o---| Live-out on stack.
+ // ---____ Switch from LocalIntv to stack after last use.
+ //
+ Pos = SE->leaveIntvAfter(BI.LastUse);
+ } else {
+ // Live-out on the stack, last use after last split point.
+ //
+ // ~~~ Interference overlapping uses.
+ // --o--o| Live-out on stack, late use.
+ // ------ Copy to stack before LSP, overlap LocalIntv.
+ // \__
+ //
+ Pos = SE->leaveIntvBefore(LastSplitPoint);
+ // We need to overlap LocalIntv so it can reach LastUse.
+ SE->overlapIntv(Pos, BI.LastUse);
}
- if (!RegOut) {
- SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
- // Block is live-through, but exit bundle is on the stack.
- // Spill immediately after the last use.
- if (BI.LastUse < LastSplitPoint) {
- DEBUG(dbgs() << ", uses, stack-out.\n");
- SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
- continue;
- }
- // The last use is after the last split point, it is probably an
- // indirect jump.
- DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
- << LastSplitPoint << ", stack-out.\n");
- SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
- SE->useIntv(Start, SegEnd);
- // Run a double interval from the split to the last use.
- // This makes it possible to spill the complement without affecting the
- // indirect branch.
- SE->overlapIntv(SegEnd, BI.LastUse);
+ }
+
+ // When not live-out, leave Pos at LastUse. We have handled everything from
+ // Pos to Stop. Find the starting point for LocalIntv.
+ assert(SE->currentIntv() == LocalIntv && "Expecting local interval");
+
+ if (RegIn) {
+ assert(Start < Intf.first() && "Cannot be live-in with interference");
+ // Live-in in MainIntv, only use LocalIntv for interference.
+ //
+ // ~~~ Interference overlapping uses.
+ // |---o-- Live-in in MainIntv.
+ // ====--- Switch to LocalIntv before interference.
+ //
+ SlotIndex Switch = SE->enterIntvBefore(std::min(Pos, Intf.first()));
+ assert(Switch <= Intf.first() && "Expected to avoid interference");
+ SE->useIntv(Switch, Pos);
+ SE->selectIntv(MainIntv);
+ SE->useIntv(Start, Switch);
+ } else {
+ // Live-in on stack, enter LocalIntv before first use.
+ //
+ // ~~~ Interference overlapping uses.
+ // |---o-- Live-in in MainIntv.
+ // ____--- Reload to LocalIntv before interference.
+ //
+ // Defined in block.
+ //
+ // ~~~ Interference overlapping uses.
+ // | o-- Defined in block.
+ // --- Begin LocalIntv at first use.
+ //
+ SlotIndex Switch = SE->enterIntvBefore(std::min(Pos, BI.FirstUse));
+ SE->useIntv(Switch, Pos);
+ }
+ }
+
+ // Handle live-through blocks.
+ SE->selectIntv(MainIntv);
+ for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
+ unsigned Number = Cand.ActiveBlocks[i];
+ bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
+ bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
+ DEBUG(dbgs() << "Live through BB#" << Number << '\n');
+ if (RegIn && RegOut) {
+ Intf.moveToBlock(Number);
+ if (!Intf.hasInterference()) {
+ SE->useIntv(Indexes->getMBBStartIdx(Number),
+ Indexes->getMBBEndIdx(Number));
continue;
}
- // Register is live-through.
- DEBUG(dbgs() << ", uses, live-through.\n");
- SE->useIntv(Start, Stop);
- continue;
}
+ MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
+ if (RegIn)
+ SE->leaveIntvAtTop(*MBB);
+ if (RegOut)
+ SE->enterIntvAtEnd(*MBB);
+ }
+
+ ++NumGlobalSplits;
+
+ SmallVector<unsigned, 8> IntvMap;
+ SE->finish(&IntvMap);
+ DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
+
+ ExtraRegInfo.resize(MRI->getNumVirtRegs());
+ unsigned OrigBlocks = SA->getNumLiveBlocks();
- // Block has interference.
- DEBUG(dbgs() << ", interference from " << Intf.first());
+ // Sort out the new intervals created by splitting. We get four kinds:
+ // - Remainder intervals should not be split again.
+ // - Candidate intervals can be assigned to Cand.PhysReg.
+ // - Block-local splits are candidates for local splitting.
+ // - DCE leftovers should go back on the queue.
+ for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
+ LiveInterval &Reg = *LREdit.get(i);
- if (!BI.LiveThrough && Intf.first() >= BI.Kill) {
- // The interference doesn't reach the outgoing segment.
- DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
- SE->useIntv(Start, BI.Kill);
+ // Ignore old intervals from DCE.
+ if (getStage(Reg) != RS_New)
continue;
- }
- if (!BI.Uses) {
- // No uses in block, avoid interference by spilling as soon as possible.
- DEBUG(dbgs() << ", no uses.\n");
- SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
- assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
+ // Remainder interval. Don't try splitting again, spill if it doesn't
+ // allocate.
+ if (IntvMap[i] == 0) {
+ setStage(Reg, RS_Global);
continue;
}
- if (Intf.first().getBaseIndex() > BI.FirstUse) {
- // There are interference-free uses at the beginning of the block.
- // Find the last use that can get the register.
- SmallVectorImpl<SlotIndex>::const_iterator UI =
- std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
- Intf.first().getBaseIndex());
- assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
- SlotIndex Use = (--UI)->getBoundaryIndex();
- DEBUG(dbgs() << ", free use at " << *UI << ".\n");
- SlotIndex SegEnd = SE->leaveIntvAfter(Use);
- assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
- SE->useIntv(Start, SegEnd);
+
+ // Main interval. Allow repeated splitting as long as the number of live
+ // blocks is strictly decreasing.
+ if (IntvMap[i] == MainIntv) {
+ if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
+ DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
+ << " blocks as original.\n");
+ // Don't allow repeated splitting as a safe guard against looping.
+ setStage(Reg, RS_Global);
+ }
continue;
}
- // Interference is before the first use.
- DEBUG(dbgs() << " before first use.\n");
- SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
- assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
+ // Other intervals are treated as new. This includes local intervals created
+ // for blocks with multiple uses, and anything created by DCE.
}
- SE->closeIntv();
-
- // FIXME: Should we be more aggressive about splitting the stack region into
- // per-block segments? The current approach allows the stack region to
- // separate into connected components. Some components may be allocatable.
- SE->finish();
- ++NumGlobalSplits;
-
if (VerifyEnabled)
MF->verify(this, "After splitting live range around region");
}
unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
- BitVector LiveBundles, BestBundles;
- float BestCost = 0;
- unsigned BestReg = 0;
+ float BestCost = Hysteresis * calcSpillCost();
+ DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
+ const unsigned NoCand = ~0u;
+ unsigned BestCand = NoCand;
Order.rewind();
for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
if (GlobalCand.size() <= Cand)
GlobalCand.resize(Cand+1);
- GlobalCand[Cand].PhysReg = PhysReg;
+ GlobalCand[Cand].reset(PhysReg);
- float Cost = calcSplitConstraints(PhysReg);
+ SpillPlacer->prepare(GlobalCand[Cand].LiveBundles);
+ float Cost;
+ InterferenceCache::Cursor Intf(IntfCache, PhysReg);
+ if (!addSplitConstraints(Intf, Cost)) {
+ DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
+ continue;
+ }
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
- if (BestReg && Cost >= BestCost) {
- DEBUG(dbgs() << " higher.\n");
+ if (Cost >= BestCost) {
+ DEBUG({
+ if (BestCand == NoCand)
+ dbgs() << " worse than no bundles\n";
+ else
+ dbgs() << " worse than "
+ << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
+ });
continue;
}
+ growRegion(GlobalCand[Cand], Intf);
+
+ SpillPlacer->finish();
- SpillPlacer->placeSpills(SplitConstraints, LiveBundles);
// No live bundles, defer to splitSingleBlocks().
- if (!LiveBundles.any()) {
+ if (!GlobalCand[Cand].LiveBundles.any()) {
DEBUG(dbgs() << " no bundles.\n");
continue;
}
- Cost += calcGlobalSplitCost(LiveBundles);
+ Cost += calcGlobalSplitCost(GlobalCand[Cand], Intf);
DEBUG({
dbgs() << ", total = " << Cost << " with bundles";
- for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
+ for (int i = GlobalCand[Cand].LiveBundles.find_first(); i>=0;
+ i = GlobalCand[Cand].LiveBundles.find_next(i))
dbgs() << " EB#" << i;
dbgs() << ".\n";
});
- if (!BestReg || Cost < BestCost) {
- BestReg = PhysReg;
- BestCost = 0.98f * Cost; // Prevent rounding effects.
- BestBundles.swap(LiveBundles);
+ if (Cost < BestCost) {
+ BestCand = Cand;
+ BestCost = Hysteresis * Cost; // Prevent rounding effects.
}
}
- if (!BestReg)
+ if (BestCand == NoCand)
return 0;
- splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
- setStage(NewVRegs.begin(), NewVRegs.end(), RS_Region);
+ splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
return 0;
}
///
void RAGreedy::calcGapWeights(unsigned PhysReg,
SmallVectorImpl<float> &GapWeight) {
- assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
- const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
+ assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
+ const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
const unsigned NumGaps = Uses.size()-1;
}
}
-/// getPrevMappedIndex - Return the slot index of the last non-copy instruction
-/// before MI that has a slot index. If MI is the first mapped instruction in
-/// its block, return the block start index instead.
-///
-SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
- assert(MI && "Missing MachineInstr");
- const MachineBasicBlock *MBB = MI->getParent();
- MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
- while (I != B)
- if (!(--I)->isDebugValue() && !I->isCopy())
- return Indexes->getInstructionIndex(I);
- return Indexes->getMBBStartIdx(MBB);
-}
-
-/// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
-/// real non-copy instruction for each instruction in SA->UseSlots.
-///
-void RAGreedy::calcPrevSlots() {
- const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
- PrevSlot.clear();
- PrevSlot.reserve(Uses.size());
- for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
- const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
- PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
- }
-}
-
-/// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
-/// be beneficial to split before UseSlots[i].
-///
-/// 0 is always a valid split point
-unsigned RAGreedy::nextSplitPoint(unsigned i) {
- const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
- const unsigned Size = Uses.size();
- assert(i != Size && "No split points after the end");
- // Allow split before i when Uses[i] is not adjacent to the previous use.
- while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
- ;
- return i;
-}
-
/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
/// basic block.
///
unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
- assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
- const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
+ assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
+ const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
// Note that it is possible to have an interval that is live-in or live-out
// while only covering a single block - A phi-def can use undef values from
dbgs() << '\n';
});
- // For every use, find the previous mapped non-copy instruction.
- // We use this to detect valid split points, and to estimate new interval
- // sizes.
- calcPrevSlots();
+ // Since we allow local split results to be split again, there is a risk of
+ // creating infinite loops. It is tempting to require that the new live
+ // ranges have less instructions than the original. That would guarantee
+ // convergence, but it is too strict. A live range with 3 instructions can be
+ // split 2+3 (including the COPY), and we want to allow that.
+ //
+ // Instead we use these rules:
+ //
+ // 1. Allow any split for ranges with getStage() < RS_Local. (Except for the
+ // noop split, of course).
+ // 2. Require progress be made for ranges with getStage() >= RS_Local. All
+ // the new ranges must have fewer instructions than before the split.
+ // 3. New ranges with the same number of instructions are marked RS_Local,
+ // smaller ranges are marked RS_New.
+ //
+ // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
+ // excessive splitting and infinite loops.
+ //
+ bool ProgressRequired = getStage(VirtReg) >= RS_Local;
+ // Best split candidate.
unsigned BestBefore = NumGaps;
unsigned BestAfter = 0;
float BestDiff = 0;
// The new spill weight must be larger than any gap interference.
// We will split before Uses[SplitBefore] and after Uses[SplitAfter].
- unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
+ unsigned SplitBefore = 0, SplitAfter = 1;
// MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
// It is the spill weight that needs to be evicted.
float MaxGap = GapWeight[0];
- for (unsigned i = 1; i != SplitAfter; ++i)
- MaxGap = std::max(MaxGap, GapWeight[i]);
for (;;) {
// Live before/after split?
}
// Should the interval be extended or shrunk?
bool Shrink = true;
- if (MaxGap < HUGE_VALF) {
- // Estimate the new spill weight.
- //
- // Each instruction reads and writes the register, except the first
- // instr doesn't read when !FirstLive, and the last instr doesn't write
- // when !LastLive.
- //
- // We will be inserting copies before and after, so the total number of
- // reads and writes is 2 * EstUses.
- //
- const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
- 2*(LiveBefore + LiveAfter);
- // Try to guess the size of the new interval. This should be trivial,
- // but the slot index of an inserted copy can be a lot smaller than the
- // instruction it is inserted before if there are many dead indexes
- // between them.
- //
- // We measure the distance from the instruction before SplitBefore to
- // get a conservative estimate.
- //
- // The final distance can still be different if inserting copies
- // triggers a slot index renumbering.
+ // How many gaps would the new range have?
+ unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
+
+ // Legally, without causing looping?
+ bool Legal = !ProgressRequired || NewGaps < NumGaps;
+
+ if (Legal && MaxGap < HUGE_VALF) {
+ // Estimate the new spill weight. Each instruction reads or writes the
+ // register. Conservatively assume there are no read-modify-write
+ // instructions.
//
- const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
- PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
+ // Try to guess the size of the new interval.
+ const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
+ Uses[SplitBefore].distance(Uses[SplitAfter]) +
+ (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
// Would this split be possible to allocate?
// Never allocate all gaps, we wouldn't be making progress.
- float Diff = EstWeight - MaxGap;
- DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
- if (Diff > 0) {
+ DEBUG(dbgs() << " w=" << EstWeight);
+ if (EstWeight * Hysteresis >= MaxGap) {
Shrink = false;
+ float Diff = EstWeight - MaxGap;
if (Diff > BestDiff) {
DEBUG(dbgs() << " (best)");
- BestDiff = Diff;
+ BestDiff = Hysteresis * Diff;
BestBefore = SplitBefore;
BestAfter = SplitAfter;
}
// Try to shrink.
if (Shrink) {
- SplitBefore = nextSplitPoint(SplitBefore);
- if (SplitBefore < SplitAfter) {
+ if (++SplitBefore < SplitAfter) {
DEBUG(dbgs() << " shrink\n");
// Recompute the max when necessary.
if (GapWeight[SplitBefore - 1] >= MaxGap) {
}
DEBUG(dbgs() << " extend\n");
- for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
- SplitAfter != e; ++SplitAfter)
- MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
- continue;
+ MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
}
}
SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
SE->useIntv(SegStart, SegStop);
- SE->closeIntv();
- SE->finish();
- setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
+ SmallVector<unsigned, 8> IntvMap;
+ SE->finish(&IntvMap);
+ DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
+
+ // If the new range has the same number of instructions as before, mark it as
+ // RS_Local so the next split will be forced to make progress. Otherwise,
+ // leave the new intervals as RS_New so they can compete.
+ bool LiveBefore = BestBefore != 0 || BI.LiveIn;
+ bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
+ unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
+ if (NewGaps >= NumGaps) {
+ DEBUG(dbgs() << "Tagging non-progress ranges: ");
+ assert(!ProgressRequired && "Didn't make progress when it was required.");
+ for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
+ if (IntvMap[i] == 1) {
+ setStage(*LREdit.get(i), RS_Local);
+ DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
+ }
+ DEBUG(dbgs() << '\n');
+ }
++NumLocalSplits;
return 0;
// Don't iterate global splitting.
// Move straight to spilling if this range was produced by a global split.
- LiveRangeStage Stage = getStage(VirtReg);
- if (Stage >= RS_Block)
+ if (getStage(VirtReg) >= RS_Global)
return 0;
SA->analyze(&VirtReg);
- // First try to split around a region spanning multiple blocks.
- if (Stage < RS_Region) {
- unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
- if (PhysReg || !NewVRegs.empty())
+ // FIXME: SplitAnalysis may repair broken live ranges coming from the
+ // coalescer. That may cause the range to become allocatable which means that
+ // tryRegionSplit won't be making progress. This check should be replaced with
+ // an assertion when the coalescer is fixed.
+ if (SA->didRepairRange()) {
+ // VirtReg has changed, so all cached queries are invalid.
+ invalidateVirtRegs();
+ if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
return PhysReg;
}
+ // First try to split around a region spanning multiple blocks.
+ unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
+ if (PhysReg || !NewVRegs.empty())
+ return PhysReg;
+
// Then isolate blocks with multiple uses.
- if (Stage < RS_Block) {
- SplitAnalysis::BlockPtrSet Blocks;
- if (SA->getMultiUseBlocks(Blocks)) {
- LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
- SE->reset(LREdit);
- SE->splitSingleBlocks(Blocks);
- setStage(NewVRegs.begin(), NewVRegs.end(), RS_Block);
- if (VerifyEnabled)
- MF->verify(this, "After splitting live range around basic blocks");
- }
+ SplitAnalysis::BlockPtrSet Blocks;
+ if (SA->getMultiUseBlocks(Blocks)) {
+ LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
+ SE->reset(LREdit);
+ SE->splitSingleBlocks(Blocks);
+ setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
+ if (VerifyEnabled)
+ MF->verify(this, "After splitting live range around basic blocks");
}
// Don't assign any physregs.
unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
// First try assigning a free register.
- AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
- while (unsigned PhysReg = Order.next()) {
- if (!checkPhysRegInterference(VirtReg, PhysReg))
- return PhysReg;
- }
-
- if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
+ AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
+ if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
return PhysReg;
+ LiveRangeStage Stage = getStage(VirtReg);
+ DEBUG(dbgs() << StageName[Stage]
+ << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
+
+ // Try to evict a less worthy live range, but only for ranges from the primary
+ // queue. The RS_Second ranges already failed to do this, and they should not
+ // get a second chance until they have been split.
+ if (Stage != RS_Second)
+ if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
+ return PhysReg;
+
assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
// The first time we see a live range, don't try to split or spill.
// Wait until the second time, when all smaller ranges have been allocated.
// This gives a better picture of the interference to split around.
- LiveRangeStage Stage = getStage(VirtReg);
if (Stage == RS_First) {
- LRStage[VirtReg.reg] = RS_Second;
+ setStage(VirtReg, RS_Second);
DEBUG(dbgs() << "wait for second round\n");
NewVRegs.push_back(&VirtReg);
return 0;
}
- assert(Stage < RS_Spill && "Cannot allocate after spilling");
+ // If we couldn't allocate a register from spilling, there is probably some
+ // invalid inline assembly. The base class wil report it.
+ if (Stage >= RS_Spill)
+ return ~0u;
// Try splitting VirtReg or interferences.
unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
Indexes = &getAnalysis<SlotIndexes>();
DomTree = &getAnalysis<MachineDominatorTree>();
- ReservedRegs = TRI->getReservedRegs(*MF);
SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
Loops = &getAnalysis<MachineLoopInfo>();
LoopRanges = &getAnalysis<MachineLoopRanges>();
Bundles = &getAnalysis<EdgeBundles>();
SpillPlacer = &getAnalysis<SpillPlacement>();
+ DebugVars = &getAnalysis<LiveDebugVariables>();
SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
- LRStage.clear();
- LRStage.resize(MRI->getNumVirtRegs());
+ ExtraRegInfo.clear();
+ ExtraRegInfo.resize(MRI->getNumVirtRegs());
+ NextCascade = 1;
IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
allocatePhysRegs();
VRM->rewrite(Indexes);
}
+ // Write out new DBG_VALUE instructions.
+ DebugVars->emitDebugValues(VRM);
+
// The pass output is in VirtRegMap. Release all the transient data.
releaseMemory();