X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FRegAllocGreedy.cpp;h=feec3d4f7c317f65ffe38dba0b9c7d58312d445c;hb=aba6559370c3d453588103fb667ffa3b11b76652;hp=e86d45b29fea145340313628a33dc1892289c141;hpb=d2056e51c662765f98413fa071afbff53d87b384;p=oota-llvm.git diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp index e86d45b29fe..feec3d4f7c3 100644 --- a/lib/CodeGen/RegAllocGreedy.cpp +++ b/lib/CodeGen/RegAllocGreedy.cpp @@ -33,11 +33,9 @@ #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineLoopInfo.h" -#include "llvm/CodeGen/MachineLoopRanges.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/RegAllocRegistry.h" -#include "llvm/CodeGen/RegisterCoalescer.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -53,9 +51,14 @@ STATISTIC(NumGlobalSplits, "Number of split global live ranges"); STATISTIC(NumLocalSplits, "Number of split local live ranges"); STATISTIC(NumEvicted, "Number of interferences evicted"); -static cl::opt -ComplexEviction("complex-eviction", cl::Hidden, - cl::desc("Use complex eviction heuristics")); +static cl::opt +SplitSpillMode("split-spill-mode", cl::Hidden, + cl::desc("Spill mode for splitting live ranges"), + cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), + clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), + clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), + clEnumValEnd), + cl::init(SplitEditor::SM_Partition)); static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", createGreedyRegisterAllocator); @@ -67,14 +70,12 @@ class RAGreedy : public MachineFunctionPass, // context MachineFunction *MF; - BitVector ReservedRegs; // analyses SlotIndexes *Indexes; LiveStacks *LS; MachineDominatorTree *DomTree; MachineLoopInfo *Loops; - MachineLoopRanges *LoopRanges; EdgeBundles *Bundles; SpillPlacement *SpillPlacer; LiveDebugVariables *DebugVars; @@ -82,6 +83,7 @@ class RAGreedy : public MachineFunctionPass, // state std::auto_ptr SpillerInstance; std::priority_queue > Queue; + unsigned NextCascade; // Live ranges pass through a number of stages as we try to allocate them. // Some of the stages may also create new live ranges: @@ -97,33 +99,88 @@ class RAGreedy : public MachineFunctionPass, // range splitting algorithm terminates, something that is otherwise hard to // ensure. enum LiveRangeStage { - RS_New, ///< Never seen before. - RS_First, ///< First time in the queue. - RS_Evicted, ///< Requeued after being evicted. - RS_Second, ///< Second time in the queue, ready for splitting. - RS_Global, ///< Produced by global splitting. - RS_Local, ///< Produced by local splitting. - RS_Spill ///< Produced by spilling. + /// Newly created live range that has never been queued. + RS_New, + + /// Only attempt assignment and eviction. Then requeue as RS_Split. + RS_Assign, + + /// Attempt live range splitting if assignment is impossible. + RS_Split, + + /// Attempt more aggressive live range splitting that is guaranteed to make + /// progress. This is used for split products that may not be making + /// progress. + RS_Split2, + + /// Live range will be spilled. No more splitting will be attempted. + RS_Spill, + + /// There is nothing more we can do to this live range. Abort compilation + /// if it can't be assigned. + RS_Done }; static const char *const StageName[]; - IndexedMap LRStage; + // RegInfo - Keep additional information about each live range. + struct RegInfo { + LiveRangeStage Stage; + + // Cascade - Eviction loop prevention. See canEvictInterference(). + unsigned Cascade; + + RegInfo() : Stage(RS_New), Cascade(0) {} + }; + + IndexedMap ExtraRegInfo; LiveRangeStage getStage(const LiveInterval &VirtReg) const { - return LiveRangeStage(LRStage[VirtReg.reg]); + return ExtraRegInfo[VirtReg.reg].Stage; + } + + void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { + ExtraRegInfo.resize(MRI->getNumVirtRegs()); + ExtraRegInfo[VirtReg.reg].Stage = Stage; } template void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { - LRStage.resize(MRI->getNumVirtRegs()); + ExtraRegInfo.resize(MRI->getNumVirtRegs()); for (;Begin != End; ++Begin) { unsigned Reg = (*Begin)->reg; - if (LRStage[Reg] == RS_New) - LRStage[Reg] = NewStage; + if (ExtraRegInfo[Reg].Stage == RS_New) + ExtraRegInfo[Reg].Stage = NewStage; } } + /// Cost of evicting interference. + struct EvictionCost { + unsigned BrokenHints; ///< Total number of broken hints. + float MaxWeight; ///< Maximum spill weight evicted. + + EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} + + bool operator<(const EvictionCost &O) const { + if (BrokenHints != O.BrokenHints) + return BrokenHints < O.BrokenHints; + return MaxWeight < O.MaxWeight; + } + }; + + // Register mask interference. The current VirtReg is checked for register + // mask interference on entry to selectOrSplit(). If there is no + // interference, UsableRegs is left empty. If there is interference, + // UsableRegs has a bit mask of registers that can be used without register + // mask interference. + BitVector UsableRegs; + + /// clobberedByRegMask - Returns true if PhysReg is not directly usable + /// because of register mask clobbers. + bool clobberedByRegMask(unsigned PhysReg) const { + return !UsableRegs.empty() && !UsableRegs.test(PhysReg); + } + // splitting state. std::auto_ptr SA; std::auto_ptr SE; @@ -136,15 +193,38 @@ class RAGreedy : public MachineFunctionPass, /// Global live range splitting candidate info. struct GlobalSplitCandidate { + // Register intended for assignment, or 0. unsigned PhysReg; + + // SplitKit interval index for this candidate. + unsigned IntvIdx; + + // Interference for PhysReg. + InterferenceCache::Cursor Intf; + + // Bundles where this candidate should be live. BitVector LiveBundles; SmallVector ActiveBlocks; - void reset(unsigned Reg) { + void reset(InterferenceCache &Cache, unsigned Reg) { PhysReg = Reg; + IntvIdx = 0; + Intf.setPhysReg(Cache, Reg); LiveBundles.clear(); ActiveBlocks.clear(); } + + // Set B[i] = C for every live bundle where B[i] was NoCand. + unsigned getBundles(SmallVectorImpl &B, unsigned C) { + unsigned Count = 0; + for (int i = LiveBundles.find_first(); i >= 0; + i = LiveBundles.find_next(i)) + if (B[i] == NoCand) { + B[i] = C; + Count++; + } + return Count; + } }; /// Candidate info for for each PhysReg in AllocationOrder. @@ -152,9 +232,11 @@ class RAGreedy : public MachineFunctionPass, /// class. SmallVector GlobalCand; - /// For every instruction in SA->UseSlots, store the previous non-copy - /// instruction. - SmallVector PrevSlot; + enum { NoCand = ~0u }; + + /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to + /// NoCand which indicates the stack interval. + SmallVector BundleCand; public: RAGreedy(); @@ -179,7 +261,6 @@ public: static char ID; private: - void LRE_WillEraseInstruction(MachineInstr*); bool LRE_CanEraseVirtReg(unsigned); void LRE_WillShrinkVirtReg(unsigned); void LRE_DidCloneVirtReg(unsigned, unsigned); @@ -187,18 +268,15 @@ private: float calcSpillCost(); bool addSplitConstraints(InterferenceCache::Cursor, float&); void addThroughConstraints(InterferenceCache::Cursor, ArrayRef); - void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor); - float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor); - void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&, - SmallVectorImpl&); + void growRegion(GlobalSplitCandidate &Cand); + float calcGlobalSplitCost(GlobalSplitCandidate&); + bool calcCompactRegion(GlobalSplitCandidate&); + void splitAroundRegion(LiveRangeEdit&, ArrayRef); void calcGapWeights(unsigned, SmallVectorImpl&); - SlotIndex getPrevMappedIndex(const MachineInstr*); - void calcPrevSlots(); - unsigned nextSplitPoint(unsigned); - bool hasDefInRange(const LiveInterval&, const LiveInterval&); - bool hasUseInRange(const LiveInterval&, const LiveInterval&); - bool canEvict(LiveInterval &A, LiveInterval &B); - bool canEvictInterference(LiveInterval&, unsigned, float&, bool); + bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); + bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); + void evictInterference(LiveInterval&, unsigned, + SmallVectorImpl&); unsigned tryAssign(LiveInterval&, AllocationOrder&, SmallVectorImpl&); @@ -206,6 +284,8 @@ private: SmallVectorImpl&, unsigned = ~0u); unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, SmallVectorImpl&); + unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, + SmallVectorImpl&); unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, SmallVectorImpl&); unsigned trySplit(LiveInterval&, AllocationOrder&, @@ -217,13 +297,12 @@ char RAGreedy::ID = 0; #ifndef NDEBUG const char *const RAGreedy::StageName[] = { - "RS_New", - "RS_First", - "RS_Evicted", - "RS_Second", - "RS_Global", - "RS_Local", - "RS_Spill" + "RS_New", + "RS_Assign", + "RS_Split", + "RS_Split2", + "RS_Spill", + "RS_Done" }; #endif @@ -236,18 +315,17 @@ FunctionPass* llvm::createGreedyRegisterAllocator() { return new RAGreedy(); } -RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) { +RAGreedy::RAGreedy(): MachineFunctionPass(ID) { initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); - initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry()); - initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry()); + initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); + initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); initializeLiveStacksPass(*PassRegistry::getPassRegistry()); initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); - initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry()); initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); @@ -262,9 +340,6 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - if (StrongPHIElim) - AU.addRequiredID(StrongPHIEliminationID); - AU.addRequiredTransitive(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); @@ -272,8 +347,6 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); AU.addRequired(); AU.addPreserved(); AU.addRequired(); @@ -286,11 +359,6 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { // LiveRangeEdit delegate methods //===----------------------------------------------------------------------===// -void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) { - // LRE itself will remove from SlotIndexes and parent basic block. - VRM->RemoveMachineInstrFromMaps(MI); -} - bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { if (unsigned PhysReg = VRM->getPhys(VirtReg)) { unassign(LIS->getInterval(VirtReg), PhysReg); @@ -313,16 +381,22 @@ void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { } void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { + // Cloning a register we haven't even heard about yet? Just ignore it. + if (!ExtraRegInfo.inBounds(Old)) + return; + // LRE may clone a virtual register because dead code elimination causes it to - // be split into connected components. Ensure that the new register gets the + // be split into connected components. The new components are much smaller + // than the original, so they should get a new chance at being assigned. // same stage as the parent. - LRStage.grow(New); - LRStage[New] = LRStage[Old]; + ExtraRegInfo[Old].Stage = RS_Assign; + ExtraRegInfo.grow(New); + ExtraRegInfo[New] = ExtraRegInfo[Old]; } void RAGreedy::releaseMemory() { SpillerInstance.reset(0); - LRStage.clear(); + ExtraRegInfo.clear(); GlobalCand.clear(); RegAllocBase::releaseMemory(); } @@ -336,18 +410,17 @@ void RAGreedy::enqueue(LiveInterval *LI) { "Can only enqueue virtual registers"); unsigned Prio; - LRStage.grow(Reg); - if (LRStage[Reg] == RS_New) - LRStage[Reg] = RS_First; + ExtraRegInfo.grow(Reg); + if (ExtraRegInfo[Reg].Stage == RS_New) + ExtraRegInfo[Reg].Stage = RS_Assign; - if (LRStage[Reg] == RS_Second) + if (ExtraRegInfo[Reg].Stage == RS_Split) { // Unsplit ranges that couldn't be allocated immediately are deferred until - // everything else has been allocated. Long ranges are allocated last so - // they are split against realistic interference. - Prio = (1u << 31) - Size; - else { - // Everything else is allocated in long->short order. Long ranges that don't - // fit should be spilled ASAP so they don't create interference. + // everything else has been allocated. + Prio = Size; + } else { + // Everything is allocated in long->short order. Long ranges that don't fit + // should be spilled (or split) ASAP so they don't create interference. Prio = (1u << 31) + Size; // Boost ranges that have a physical register hint. @@ -377,13 +450,30 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, SmallVectorImpl &NewVRegs) { Order.rewind(); unsigned PhysReg; - while ((PhysReg = Order.next())) + while ((PhysReg = Order.next())) { + if (clobberedByRegMask(PhysReg)) + continue; if (!checkPhysRegInterference(VirtReg, PhysReg)) break; + } if (!PhysReg || Order.isHint(PhysReg)) return PhysReg; - // PhysReg is available. Try to evict interference from a cheaper alternative. + // PhysReg is available, but there may be a better choice. + + // If we missed a simple hint, try to cheaply evict interference from the + // preferred register. + if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) + if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) { + DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); + EvictionCost MaxCost(1); + if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { + evictInterference(VirtReg, Hint, NewVRegs); + return Hint; + } + } + + // Try to evict interference from a cheaper alternative. unsigned Cost = TRI->getCostPerUse(PhysReg); // Most registers have 0 additional cost. @@ -401,73 +491,58 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, // Interference eviction //===----------------------------------------------------------------------===// -/// hasDefInRange - Returns true when any def of A happens where B is live. -/// -/// The SSA form of live intervals guarantees: +/// shouldEvict - determine if A should evict the assigned live range B. The +/// eviction policy defined by this function together with the allocation order +/// defined by enqueue() decides which registers ultimately end up being split +/// and spilled. /// -/// A.overlaps(B) == hasDefInRange(A, B) || hasDefInRange(B, A) +/// Cascade numbers are used to prevent infinite loops if this function is a +/// cyclic relation. /// -bool RAGreedy::hasDefInRange(const LiveInterval &A, const LiveInterval &B) { - for (LiveInterval::const_vni_iterator I = A.vni_begin(), E = A.vni_end(); - I != E; ++I) { - const VNInfo *VNI = *I; - if (VNI->isUnused()) - continue; - if (B.liveAt(VNI->def)) - return true; - } - return false; -} - -/// hasUseInRange - Returns true when any def or use of A happens where B is -/// live. The following is always true: -/// -/// A.overlaps(B) == hasUseInRange(A, B) || hasUseInRange(B, A) -/// -bool RAGreedy::hasUseInRange(const LiveInterval &A, const LiveInterval &B) { - if (hasDefInRange(A, B)) +/// @param A The live range to be assigned. +/// @param IsHint True when A is about to be assigned to its preferred +/// register. +/// @param B The live range to be evicted. +/// @param BreaksHint True when B is already assigned to its preferred register. +bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, + LiveInterval &B, bool BreaksHint) { + bool CanSplit = getStage(B) < RS_Spill; + + // Be fairly aggressive about following hints as long as the evictee can be + // split. + if (CanSplit && IsHint && !BreaksHint) return true; - for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(A.reg), - E = MRI->use_nodbg_end(); I != E; ++I) { - if (I.getOperand().isUndef()) - continue; - SlotIndex Idx = Indexes->getInstructionIndex(&*I).getDefIndex(); - if (B.liveAt(Idx)) - return true; - } - return false; -} -/// canEvict - determine if A can evict the assigned live range B. The eviction -/// policy defined by this function together with the allocation order defined -/// by enqueue() decides which registers ultimately end up being split and -/// spilled. -/// -/// Safeguards ensure that canEvict can never cause an infinite loop. -/// -bool RAGreedy::canEvict(LiveInterval &A, LiveInterval &B) { - if (!ComplexEviction) - return A.weight > B.weight; - - // Evict B if it has no uses in A's live range. - if (!hasUseInRange(B, A)) { - DEBUG(dbgs() << "Bypass: " << B << '\n'); - return true; - } return A.weight > B.weight; } -/// canEvict - Return true if all interferences between VirtReg and PhysReg can -/// be evicted. -/// Return false if any interference is heavier than MaxWeight. -/// On return, set MaxWeight to the maximal spill weight of an interference. +/// canEvictInterference - Return true if all interferences between VirtReg and +/// PhysReg can be evicted. When OnlyCheap is set, don't do anything +/// +/// @param VirtReg Live range that is about to be assigned. +/// @param PhysReg Desired register for assignment. +/// @prarm IsHint True when PhysReg is VirtReg's preferred register. +/// @param MaxCost Only look for cheaper candidates and update with new cost +/// when returning true. +/// @returns True when interference can be evicted cheaper than MaxCost. bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, - float &MaxWeight, bool OnlyCheap) { - float Weight = 0; - for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { + bool IsHint, EvictionCost &MaxCost) { + // Find VirtReg's cascade number. This will be unassigned if VirtReg was never + // involved in an eviction before. If a cascade number was assigned, deny + // evicting anything with the same or a newer cascade number. This prevents + // infinite eviction loops. + // + // This works out so a register without a cascade number is allowed to evict + // anything, and it can be evicted by anything. + unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; + if (!Cascade) + Cascade = NextCascade; + + EvictionCost Cost; + for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); // If there is 10 or more interferences, chances are one is heavier. - if (Q.collectInterferingVRegs(10, MaxWeight) >= 10) + if (Q.collectInterferingVRegs(10) >= 10) return false; // Check if any interfering live range is heavier than MaxWeight. @@ -475,75 +550,112 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, LiveInterval *Intf = Q.interferingVRegs()[i - 1]; if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) return false; - if (getStage(*Intf) == RS_Spill) + // Never evict spill products. They cannot split or spill. + if (getStage(*Intf) == RS_Done) return false; - if (Intf->weight >= MaxWeight) - return false; - // When we are simply looking for a cheaper alternative, don't try too - // hard. The evicted range shouldn't end up getting split. - if (OnlyCheap) { - // Don't evict something that won't be able to reevict something else. - if (getStage(*Intf) != RS_First) - return false; - // Don't break a satisfied hint. - if (VRM->getRegAllocPref(Intf->reg) == *AliasI) + // Once a live range becomes small enough, it is urgent that we find a + // register for it. This is indicated by an infinite spill weight. These + // urgent live ranges get to evict almost anything. + bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable(); + // Only evict older cascades or live ranges without a cascade. + unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; + if (Cascade <= IntfCascade) { + if (!Urgent) return false; + // We permit breaking cascades for urgent evictions. It should be the + // last resort, though, so make it really expensive. + Cost.BrokenHints += 10; } - if (VirtReg.isSpillable() && !canEvict(VirtReg, *Intf)) + // Would this break a satisfied hint? + bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); + // Update eviction cost. + Cost.BrokenHints += BreaksHint; + Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); + // Abort if this would be too expensive. + if (!(Cost < MaxCost)) + return false; + // Finally, apply the eviction policy for non-urgent evictions. + if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) return false; - Weight = std::max(Weight, Intf->weight); } } - MaxWeight = Weight; + MaxCost = Cost; return true; } +/// evictInterference - Evict any interferring registers that prevent VirtReg +/// from being assigned to Physreg. This assumes that canEvictInterference +/// returned true. +void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, + SmallVectorImpl &NewVRegs) { + // Make sure that VirtReg has a cascade number, and assign that cascade + // number to every evicted register. These live ranges than then only be + // evicted by a newer cascade, preventing infinite loops. + unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; + if (!Cascade) + Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; + + DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) + << " interference: Cascade " << Cascade << '\n'); + for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { + LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); + assert(Q.seenAllInterferences() && "Didn't check all interfererences."); + for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { + LiveInterval *Intf = Q.interferingVRegs()[i]; + unassign(*Intf, VRM->getPhys(Intf->reg)); + assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || + VirtReg.isSpillable() < Intf->isSpillable()) && + "Cannot decrease cascade number, illegal eviction"); + ExtraRegInfo[Intf->reg].Cascade = Cascade; + ++NumEvicted; + NewVRegs.push_back(Intf); + } + } +} + /// tryEvict - Try to evict all interferences for a physreg. -/// @param VirtReg Currently unassigned virtual register. -/// @param Order Physregs to try. -/// @param CostPerUseLimit Only look at physregs below this cost per use. -/// @return Physreg to assign VirtReg, or 0. +/// @param VirtReg Currently unassigned virtual register. +/// @param Order Physregs to try. +/// @return Physreg to assign VirtReg, or 0. unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl &NewVRegs, unsigned CostPerUseLimit) { - // Ranges that may have been evicted or requeued for splitting may never evict - // other ranges. That could cause looping. - // Spill ranges can always evict. - LiveRangeStage Stage = getStage(VirtReg); - if (Stage >= RS_Evicted && VirtReg.isSpillable()) - return 0; NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); - bool OnlyCheap = CostPerUseLimit != ~0u; - - // Keep track of the lightest single interference seen so far. - // When scavenging for a cheap register, never consider evicting heavier - // ranges. - float BestWeight = OnlyCheap ? VirtReg.weight : HUGE_VALF; + // Keep track of the cheapest interference seen so far. + EvictionCost BestCost(~0u); unsigned BestPhys = 0; + // When we are just looking for a reduced cost per use, don't break any + // hints, and only evict smaller spill weights. + if (CostPerUseLimit < ~0u) { + BestCost.BrokenHints = 0; + BestCost.MaxWeight = VirtReg.weight; + } + Order.rewind(); while (unsigned PhysReg = Order.next()) { - if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) + if (clobberedByRegMask(PhysReg)) continue; - // The first use of a register in a function has cost 1. - if (CostPerUseLimit == 1 && !MRI->isPhysRegUsed(PhysReg)) - continue; - - float Weight = BestWeight; - if (!canEvictInterference(VirtReg, PhysReg, Weight, OnlyCheap)) + if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) continue; - - // This is an eviction candidate. - DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = " - << Weight << '\n'); - if (BestPhys && Weight >= BestWeight) + // The first use of a callee-saved register in a function has cost 1. + // Don't start using a CSR when the CostPerUseLimit is low. + if (CostPerUseLimit == 1) + if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) + if (!MRI->isPhysRegUsed(CSR)) { + DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " + << PrintReg(CSR, TRI) << '\n'); + continue; + } + + if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) continue; // Best so far. BestPhys = PhysReg; - BestWeight = Weight; + // Stop if the hint can be used. if (Order.isHint(PhysReg)) break; @@ -552,22 +664,7 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, if (!BestPhys) return 0; - DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n"); - for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) { - LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); - assert(Q.seenAllInterferences() && "Didn't check all interfererences."); - for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { - LiveInterval *Intf = Q.interferingVRegs()[i]; - unassign(*Intf, VRM->getPhys(Intf->reg)); - ++NumEvicted; - NewVRegs.push_back(Intf); - // Prevent looping by marking the evicted ranges as RS_Evicted. - // When OnlyCheap is set, Intf is guaranteed to have a smaller spill - // weight which also prevents looping. - if (!OnlyCheap && getStage(*Intf) < RS_Evicted) - LRStage[Intf->reg] = RS_Evicted; - } - } + evictInterference(VirtReg, BestPhys, NewVRegs); return BestPhys; } @@ -596,6 +693,7 @@ bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, Intf.moveToBlock(BC.Number); BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; + BC.ChangesValue = BI.FirstDef; if (!Intf.hasInterference()) continue; @@ -607,9 +705,9 @@ bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, if (BI.LiveIn) { if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) BC.Entry = SpillPlacement::MustSpill, ++Ins; - else if (Intf.first() < BI.FirstUse) + else if (Intf.first() < BI.FirstInstr) BC.Entry = SpillPlacement::PrefSpill, ++Ins; - else if (Intf.first() < BI.LastUse) + else if (Intf.first() < BI.LastInstr) ++Ins; } @@ -617,9 +715,9 @@ bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, if (BI.LiveOut) { if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) BC.Exit = SpillPlacement::MustSpill, ++Ins; - else if (Intf.last() > BI.LastUse) + else if (Intf.last() > BI.LastInstr) BC.Exit = SpillPlacement::PrefSpill, ++Ins; - else if (Intf.last() > BI.FirstUse) + else if (Intf.last() > BI.FirstInstr) ++Ins; } @@ -653,7 +751,7 @@ void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, assert(T < GroupSize && "Array overflow"); TBS[T] = Number; if (++T == GroupSize) { - SpillPlacer->addLinks(ArrayRef(TBS, T)); + SpillPlacer->addLinks(makeArrayRef(TBS, T)); T = 0; } continue; @@ -683,11 +781,10 @@ void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, ArrayRef Array(BCS, B); SpillPlacer->addConstraints(Array); - SpillPlacer->addLinks(ArrayRef(TBS, T)); + SpillPlacer->addLinks(makeArrayRef(TBS, T)); } -void RAGreedy::growRegion(GlobalSplitCandidate &Cand, - InterferenceCache::Cursor Intf) { +void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { // Keep track of through blocks that have not been added to SpillPlacer. BitVector Todo = SA->getThroughBlocks(); SmallVectorImpl &ActiveBlocks = Cand.ActiveBlocks; @@ -698,8 +795,6 @@ void RAGreedy::growRegion(GlobalSplitCandidate &Cand, for (;;) { ArrayRef NewBundles = SpillPlacer->getRecentPositive(); - if (NewBundles.empty()) - break; // Find new through blocks in the periphery of PrefRegBundles. for (int i = 0, e = NewBundles.size(); i != e; ++i) { unsigned Bundle = NewBundles[i]; @@ -719,23 +814,75 @@ void RAGreedy::growRegion(GlobalSplitCandidate &Cand, } } // Any new blocks to add? - if (ActiveBlocks.size() > AddedTo) { - ArrayRef Add(&ActiveBlocks[AddedTo], - ActiveBlocks.size() - AddedTo); - addThroughConstraints(Intf, Add); - AddedTo = ActiveBlocks.size(); - } + if (ActiveBlocks.size() == AddedTo) + break; + + // Compute through constraints from the interference, or assume that all + // through blocks prefer spilling when forming compact regions. + ArrayRef NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); + if (Cand.PhysReg) + addThroughConstraints(Cand.Intf, NewBlocks); + else + // Provide a strong negative bias on through blocks to prevent unwanted + // liveness on loop backedges. + SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); + AddedTo = ActiveBlocks.size(); + // Perhaps iterating can enable more bundles? SpillPlacer->iterate(); } DEBUG(dbgs() << ", v=" << Visited); } +/// calcCompactRegion - Compute the set of edge bundles that should be live +/// when splitting the current live range into compact regions. Compact +/// regions can be computed without looking at interference. They are the +/// regions formed by removing all the live-through blocks from the live range. +/// +/// Returns false if the current live range is already compact, or if the +/// compact regions would form single block regions anyway. +bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { + // Without any through blocks, the live range is already compact. + if (!SA->getNumThroughBlocks()) + return false; + + // Compact regions don't correspond to any physreg. + Cand.reset(IntfCache, 0); + + DEBUG(dbgs() << "Compact region bundles"); + + // Use the spill placer to determine the live bundles. GrowRegion pretends + // that all the through blocks have interference when PhysReg is unset. + SpillPlacer->prepare(Cand.LiveBundles); + + // The static split cost will be zero since Cand.Intf reports no interference. + float Cost; + if (!addSplitConstraints(Cand.Intf, Cost)) { + DEBUG(dbgs() << ", none.\n"); + return false; + } + + growRegion(Cand); + SpillPlacer->finish(); + + if (!Cand.LiveBundles.any()) { + DEBUG(dbgs() << ", none.\n"); + return false; + } + + DEBUG({ + for (int i = Cand.LiveBundles.find_first(); i>=0; + i = Cand.LiveBundles.find_next(i)) + dbgs() << " EB#" << i; + dbgs() << ".\n"; + }); + return true; +} + /// calcSpillCost - Compute how expensive it would be to split the live range in /// SA around all use blocks instead of forming bundle regions. float RAGreedy::calcSpillCost() { float Cost = 0; - const LiveInterval &LI = SA->getParent(); ArrayRef UseBlocks = SA->getUseBlocks(); for (unsigned i = 0; i != UseBlocks.size(); ++i) { const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; @@ -744,16 +891,8 @@ float RAGreedy::calcSpillCost() { Cost += SpillPlacer->getBlockFrequency(Number); // Unless the value is redefined in the block. - if (BI.LiveIn && BI.LiveOut) { - SlotIndex Start, Stop; - tie(Start, Stop) = Indexes->getMBBRange(Number); - LiveInterval::const_iterator I = LI.find(Start); - assert(I != LI.end() && "Expected live-in value"); - // Is there a different live-out value? If so, we need an extra spill - // instruction. - if (I->end < Stop) - Cost += SpillPlacer->getBlockFrequency(Number); - } + if (BI.LiveIn && BI.LiveOut && BI.FirstDef) + Cost += SpillPlacer->getBlockFrequency(Number); } return Cost; } @@ -762,8 +901,7 @@ float RAGreedy::calcSpillCost() { /// pattern in LiveBundles. This cost should be added to the local cost of the /// interference pattern in SplitConstraints. /// -float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, - InterferenceCache::Cursor Intf) { +float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { float GlobalCost = 0; const BitVector &LiveBundles = Cand.LiveBundles; ArrayRef UseBlocks = SA->getUseBlocks(); @@ -790,8 +928,8 @@ float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, continue; if (RegIn && RegOut) { // We need double spill code if this block has interference. - Intf.moveToBlock(Number); - if (Intf.hasInterference()) + Cand.Intf.moveToBlock(Number); + if (Cand.Intf.hasInterference()) GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); continue; } @@ -801,238 +939,117 @@ float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, return GlobalCost; } -/// splitAroundRegion - Split VirtReg around the region determined by -/// LiveBundles. Make an effort to avoid interference from PhysReg. +/// splitAroundRegion - Split the current live range around the regions +/// determined by BundleCand and GlobalCand. /// -/// The 'register' interval is going to contain as many uses as possible while -/// avoiding interference. The 'stack' interval is the complement constructed by -/// SplitEditor. It will contain the rest. +/// Before calling this function, GlobalCand and BundleCand must be initialized +/// so each bundle is assigned to a valid candidate, or NoCand for the +/// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor +/// objects must be initialized for the current live range, and intervals +/// created for the used candidates. /// -void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, - GlobalSplitCandidate &Cand, - SmallVectorImpl &NewVRegs) { - const BitVector &LiveBundles = Cand.LiveBundles; - - DEBUG({ - dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI) - << " with bundles"; - for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i)) - dbgs() << " EB#" << i; - dbgs() << ".\n"; - }); - - InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg); - LiveRangeEdit LREdit(VirtReg, NewVRegs, this); - SE->reset(LREdit); - - // Create the main cross-block interval. - const unsigned MainIntv = SE->openIntv(); - - // First add all defs that are live out of a block. +/// @param LREdit The LiveRangeEdit object handling the current split. +/// @param UsedCands List of used GlobalCand entries. Every BundleCand value +/// must appear in this list. +void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, + ArrayRef UsedCands) { + // These are the intervals created for new global ranges. We may create more + // intervals for local ranges. + const unsigned NumGlobalIntvs = LREdit.size(); + DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); + assert(NumGlobalIntvs && "No global intervals configured"); + + // Isolate even single instructions when dealing with a proper sub-class. + // That guarantees register class inflation for the stack interval because it + // is all copies. + unsigned Reg = SA->getParent().reg; + bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); + + // First handle all the blocks with uses. ArrayRef UseBlocks = SA->getUseBlocks(); for (unsigned i = 0; i != UseBlocks.size(); ++i) { const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; - bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)]; - bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)]; - - // Create separate intervals for isolated blocks with multiple uses. - if (!RegIn && !RegOut && BI.FirstUse != BI.LastUse) { - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); - SE->splitSingleBlock(BI); - SE->selectIntv(MainIntv); - continue; - } - - // Should the register be live out? - if (!BI.LiveOut || !RegOut) - continue; - - SlotIndex Start, Stop; - tie(Start, Stop) = Indexes->getMBBRange(BI.MBB); - Intf.moveToBlock(BI.MBB->getNumber()); - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#" - << Bundles->getBundle(BI.MBB->getNumber(), 1) - << " [" << Start << ';' - << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop - << ") intf [" << Intf.first() << ';' << Intf.last() << ')'); - - // The interference interval should either be invalid or overlap MBB. - assert((!Intf.hasInterference() || Intf.first() < Stop) - && "Bad interference"); - assert((!Intf.hasInterference() || Intf.last() > Start) - && "Bad interference"); - - // Check interference leaving the block. - if (!Intf.hasInterference()) { - // Block is interference-free. - DEBUG(dbgs() << ", no interference"); - if (!BI.LiveThrough) { - DEBUG(dbgs() << ", not live-through.\n"); - SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop); - continue; + unsigned Number = BI.MBB->getNumber(); + unsigned IntvIn = 0, IntvOut = 0; + SlotIndex IntfIn, IntfOut; + if (BI.LiveIn) { + unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; + if (CandIn != NoCand) { + GlobalSplitCandidate &Cand = GlobalCand[CandIn]; + IntvIn = Cand.IntvIdx; + Cand.Intf.moveToBlock(Number); + IntfIn = Cand.Intf.first(); } - if (!RegIn) { - // Block is live-through, but entry bundle is on the stack. - // Reload just before the first use. - DEBUG(dbgs() << ", not live-in, enter before first use.\n"); - SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop); - continue; + } + if (BI.LiveOut) { + unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; + if (CandOut != NoCand) { + GlobalSplitCandidate &Cand = GlobalCand[CandOut]; + IntvOut = Cand.IntvIdx; + Cand.Intf.moveToBlock(Number); + IntfOut = Cand.Intf.last(); } - DEBUG(dbgs() << ", live-through.\n"); - continue; } - // Block has interference. - DEBUG(dbgs() << ", interference to " << Intf.last()); - - if (!BI.LiveThrough && Intf.last() <= BI.FirstUse) { - // The interference doesn't reach the outgoing segment. - DEBUG(dbgs() << " doesn't affect def from " << BI.FirstUse << '\n'); - SE->useIntv(BI.FirstUse, Stop); + // Create separate intervals for isolated blocks with multiple uses. + if (!IntvIn && !IntvOut) { + DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); + if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) + SE->splitSingleBlock(BI); continue; } - SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber()); - if (Intf.last().getBoundaryIndex() < BI.LastUse) { - // There are interference-free uses at the end of the block. - // Find the first use that can get the live-out register. - SmallVectorImpl::const_iterator UI = - std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(), - Intf.last().getBoundaryIndex()); - assert(UI != SA->UseSlots.end() && "Couldn't find last use"); - SlotIndex Use = *UI; - assert(Use <= BI.LastUse && "Couldn't find last use"); - // Only attempt a split befroe the last split point. - if (Use.getBaseIndex() <= LastSplitPoint) { - DEBUG(dbgs() << ", free use at " << Use << ".\n"); - SlotIndex SegStart = SE->enterIntvBefore(Use); - assert(SegStart >= Intf.last() && "Couldn't avoid interference"); - assert(SegStart < LastSplitPoint && "Impossible split point"); - SE->useIntv(SegStart, Stop); - continue; - } - } - - // Interference is after the last use. - DEBUG(dbgs() << " after last use.\n"); - SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB); - assert(SegStart >= Intf.last() && "Couldn't avoid interference"); + if (IntvIn && IntvOut) + SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); + else if (IntvIn) + SE->splitRegInBlock(BI, IntvIn, IntfIn); + else + SE->splitRegOutBlock(BI, IntvOut, IntfOut); } - // Now all defs leading to live bundles are handled, do everything else. - for (unsigned i = 0; i != UseBlocks.size(); ++i) { - const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; - bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)]; - bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)]; - - // Is the register live-in? - if (!BI.LiveIn || !RegIn) - continue; - - // We have an incoming register. Check for interference. - SlotIndex Start, Stop; - tie(Start, Stop) = Indexes->getMBBRange(BI.MBB); - Intf.moveToBlock(BI.MBB->getNumber()); - DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0) - << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';' - << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop - << ')'); - - // Check interference entering the block. - if (!Intf.hasInterference()) { - // Block is interference-free. - DEBUG(dbgs() << ", no interference"); - if (!BI.LiveThrough) { - DEBUG(dbgs() << ", killed in block.\n"); - SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse)); - continue; - } - if (!RegOut) { - SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber()); - // Block is live-through, but exit bundle is on the stack. - // Spill immediately after the last use. - if (BI.LastUse < LastSplitPoint) { - DEBUG(dbgs() << ", uses, stack-out.\n"); - SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse)); - continue; - } - // The last use is after the last split point, it is probably an - // indirect jump. - DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point " - << LastSplitPoint << ", stack-out.\n"); - SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint); - SE->useIntv(Start, SegEnd); - // Run a double interval from the split to the last use. - // This makes it possible to spill the complement without affecting the - // indirect branch. - SE->overlapIntv(SegEnd, BI.LastUse); + // Handle live-through blocks. The relevant live-through blocks are stored in + // the ActiveBlocks list with each candidate. We need to filter out + // duplicates. + BitVector Todo = SA->getThroughBlocks(); + for (unsigned c = 0; c != UsedCands.size(); ++c) { + ArrayRef Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; + for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { + unsigned Number = Blocks[i]; + if (!Todo.test(Number)) continue; - } - // Register is live-through. - DEBUG(dbgs() << ", uses, live-through.\n"); - SE->useIntv(Start, Stop); - continue; - } - - // Block has interference. - DEBUG(dbgs() << ", interference from " << Intf.first()); - - if (!BI.LiveThrough && Intf.first() >= BI.LastUse) { - // The interference doesn't reach the outgoing segment. - DEBUG(dbgs() << " doesn't affect kill at " << BI.LastUse << '\n'); - SE->useIntv(Start, BI.LastUse); - continue; - } + Todo.reset(Number); - if (Intf.first().getBaseIndex() > BI.FirstUse) { - // There are interference-free uses at the beginning of the block. - // Find the last use that can get the register. - SmallVectorImpl::const_iterator UI = - std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(), - Intf.first().getBaseIndex()); - assert(UI != SA->UseSlots.begin() && "Couldn't find first use"); - SlotIndex Use = (--UI)->getBoundaryIndex(); - DEBUG(dbgs() << ", free use at " << *UI << ".\n"); - SlotIndex SegEnd = SE->leaveIntvAfter(Use); - assert(SegEnd <= Intf.first() && "Couldn't avoid interference"); - SE->useIntv(Start, SegEnd); - continue; - } + unsigned IntvIn = 0, IntvOut = 0; + SlotIndex IntfIn, IntfOut; - // Interference is before the first use. - DEBUG(dbgs() << " before first use.\n"); - SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB); - assert(SegEnd <= Intf.first() && "Couldn't avoid interference"); - } + unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; + if (CandIn != NoCand) { + GlobalSplitCandidate &Cand = GlobalCand[CandIn]; + IntvIn = Cand.IntvIdx; + Cand.Intf.moveToBlock(Number); + IntfIn = Cand.Intf.first(); + } - // Handle live-through blocks. - for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { - unsigned Number = Cand.ActiveBlocks[i]; - bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; - bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; - DEBUG(dbgs() << "Live through BB#" << Number << '\n'); - if (RegIn && RegOut) { - Intf.moveToBlock(Number); - if (!Intf.hasInterference()) { - SE->useIntv(Indexes->getMBBStartIdx(Number), - Indexes->getMBBEndIdx(Number)); - continue; + unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; + if (CandOut != NoCand) { + GlobalSplitCandidate &Cand = GlobalCand[CandOut]; + IntvOut = Cand.IntvIdx; + Cand.Intf.moveToBlock(Number); + IntfOut = Cand.Intf.last(); } + if (!IntvIn && !IntvOut) + continue; + SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); } - MachineBasicBlock *MBB = MF->getBlockNumbered(Number); - if (RegIn) - SE->leaveIntvAtTop(*MBB); - if (RegOut) - SE->enterIntvAtEnd(*MBB); } ++NumGlobalSplits; SmallVector IntvMap; SE->finish(&IntvMap); - DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); + DebugVars->splitRegister(Reg, LREdit.regs()); - LRStage.resize(MRI->getNumVirtRegs()); + ExtraRegInfo.resize(MRI->getNumVirtRegs()); unsigned OrigBlocks = SA->getNumLiveBlocks(); // Sort out the new intervals created by splitting. We get four kinds: @@ -1041,27 +1058,27 @@ void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, // - Block-local splits are candidates for local splitting. // - DCE leftovers should go back on the queue. for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { - unsigned Reg = LREdit.get(i)->reg; + LiveInterval &Reg = *LREdit.get(i); // Ignore old intervals from DCE. - if (LRStage[Reg] != RS_New) + if (getStage(Reg) != RS_New) continue; // Remainder interval. Don't try splitting again, spill if it doesn't // allocate. if (IntvMap[i] == 0) { - LRStage[Reg] = RS_Global; + setStage(Reg, RS_Spill); continue; } - // Main interval. Allow repeated splitting as long as the number of live + // Global intervals. Allow repeated splitting as long as the number of live // blocks is strictly decreasing. - if (IntvMap[i] == MainIntv) { - if (SA->countLiveBlocks(LREdit.get(i)) >= OrigBlocks) { + if (IntvMap[i] < NumGlobalIntvs) { + if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks << " blocks as original.\n"); // Don't allow repeated splitting as a safe guard against looping. - LRStage[Reg] = RS_Global; + setStage(Reg, RS_Split2); } continue; } @@ -1076,21 +1093,52 @@ void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl &NewVRegs) { - float BestCost = Hysteresis * calcSpillCost(); - DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); - const unsigned NoCand = ~0u; + unsigned NumCands = 0; unsigned BestCand = NoCand; + float BestCost; + SmallVector UsedCands; + + // Check if we can split this live range around a compact region. + bool HasCompact = calcCompactRegion(GlobalCand.front()); + if (HasCompact) { + // Yes, keep GlobalCand[0] as the compact region candidate. + NumCands = 1; + BestCost = HUGE_VALF; + } else { + // No benefit from the compact region, our fallback will be per-block + // splitting. Make sure we find a solution that is cheaper than spilling. + BestCost = Hysteresis * calcSpillCost(); + DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); + } Order.rewind(); - for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) { - if (GlobalCand.size() <= Cand) - GlobalCand.resize(Cand+1); - GlobalCand[Cand].reset(PhysReg); + while (unsigned PhysReg = Order.next()) { + // Discard bad candidates before we run out of interference cache cursors. + // This will only affect register classes with a lot of registers (>32). + if (NumCands == IntfCache.getMaxCursors()) { + unsigned WorstCount = ~0u; + unsigned Worst = 0; + for (unsigned i = 0; i != NumCands; ++i) { + if (i == BestCand || !GlobalCand[i].PhysReg) + continue; + unsigned Count = GlobalCand[i].LiveBundles.count(); + if (Count < WorstCount) + Worst = i, WorstCount = Count; + } + --NumCands; + GlobalCand[Worst] = GlobalCand[NumCands]; + if (BestCand == NumCands) + BestCand = Worst; + } + + if (GlobalCand.size() <= NumCands) + GlobalCand.resize(NumCands+1); + GlobalSplitCandidate &Cand = GlobalCand[NumCands]; + Cand.reset(IntfCache, PhysReg); - SpillPlacer->prepare(GlobalCand[Cand].LiveBundles); + SpillPlacer->prepare(Cand.LiveBundles); float Cost; - InterferenceCache::Cursor Intf(IntfCache, PhysReg); - if (!addSplitConstraints(Intf, Cost)) { + if (!addSplitConstraints(Cand.Intf, Cost)) { DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); continue; } @@ -1105,38 +1153,118 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, }); continue; } - growRegion(GlobalCand[Cand], Intf); + growRegion(Cand); SpillPlacer->finish(); // No live bundles, defer to splitSingleBlocks(). - if (!GlobalCand[Cand].LiveBundles.any()) { + if (!Cand.LiveBundles.any()) { DEBUG(dbgs() << " no bundles.\n"); continue; } - Cost += calcGlobalSplitCost(GlobalCand[Cand], Intf); + Cost += calcGlobalSplitCost(Cand); DEBUG({ dbgs() << ", total = " << Cost << " with bundles"; - for (int i = GlobalCand[Cand].LiveBundles.find_first(); i>=0; - i = GlobalCand[Cand].LiveBundles.find_next(i)) + for (int i = Cand.LiveBundles.find_first(); i>=0; + i = Cand.LiveBundles.find_next(i)) dbgs() << " EB#" << i; dbgs() << ".\n"; }); if (Cost < BestCost) { - BestCand = Cand; + BestCand = NumCands; BestCost = Hysteresis * Cost; // Prevent rounding effects. } + ++NumCands; } - if (BestCand == NoCand) + // No solutions found, fall back to single block splitting. + if (!HasCompact && BestCand == NoCand) return 0; - splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs); + // Prepare split editor. + LiveRangeEdit LREdit(VirtReg, NewVRegs, this); + SE->reset(LREdit, SplitSpillMode); + + // Assign all edge bundles to the preferred candidate, or NoCand. + BundleCand.assign(Bundles->getNumBundles(), NoCand); + + // Assign bundles for the best candidate region. + if (BestCand != NoCand) { + GlobalSplitCandidate &Cand = GlobalCand[BestCand]; + if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { + UsedCands.push_back(BestCand); + Cand.IntvIdx = SE->openIntv(); + DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " + << B << " bundles, intv " << Cand.IntvIdx << ".\n"); + (void)B; + } + } + + // Assign bundles for the compact region. + if (HasCompact) { + GlobalSplitCandidate &Cand = GlobalCand.front(); + assert(!Cand.PhysReg && "Compact region has no physreg"); + if (unsigned B = Cand.getBundles(BundleCand, 0)) { + UsedCands.push_back(0); + Cand.IntvIdx = SE->openIntv(); + DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " + << Cand.IntvIdx << ".\n"); + (void)B; + } + } + + splitAroundRegion(LREdit, UsedCands); return 0; } +//===----------------------------------------------------------------------===// +// Per-Block Splitting +//===----------------------------------------------------------------------===// + +/// tryBlockSplit - Split a global live range around every block with uses. This +/// creates a lot of local live ranges, that will be split by tryLocalSplit if +/// they don't allocate. +unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, + SmallVectorImpl &NewVRegs) { + assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); + unsigned Reg = VirtReg.reg; + bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); + LiveRangeEdit LREdit(VirtReg, NewVRegs, this); + SE->reset(LREdit, SplitSpillMode); + ArrayRef UseBlocks = SA->getUseBlocks(); + for (unsigned i = 0; i != UseBlocks.size(); ++i) { + const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; + if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) + SE->splitSingleBlock(BI); + } + // No blocks were split. + if (LREdit.empty()) + return 0; + + // We did split for some blocks. + SmallVector IntvMap; + SE->finish(&IntvMap); + + // Tell LiveDebugVariables about the new ranges. + DebugVars->splitRegister(Reg, LREdit.regs()); + + ExtraRegInfo.resize(MRI->getNumVirtRegs()); + + // Sort out the new intervals created by splitting. The remainder interval + // goes straight to spilling, the new local ranges get to stay RS_New. + for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { + LiveInterval &LI = *LREdit.get(i); + if (getStage(LI) == RS_New && IntvMap[i] == 0) + setStage(LI, RS_Spill); + } + + if (VerifyEnabled) + MF->verify(this, "After splitting live range around basic blocks"); + return 0; +} + //===----------------------------------------------------------------------===// // Local Splitting //===----------------------------------------------------------------------===// @@ -1151,29 +1279,31 @@ void RAGreedy::calcGapWeights(unsigned PhysReg, SmallVectorImpl &GapWeight) { assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); - const SmallVectorImpl &Uses = SA->UseSlots; + ArrayRef Uses = SA->getUseSlots(); const unsigned NumGaps = Uses.size()-1; // Start and end points for the interference check. - SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse; - SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse; + SlotIndex StartIdx = + BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; + SlotIndex StopIdx = + BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; GapWeight.assign(NumGaps, 0.0f); // Add interference from each overlapping register. - for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { + for (const uint16_t *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { if (!query(const_cast(SA->getParent()), *AI) .checkInterference()) continue; - // We know that VirtReg is a continuous interval from FirstUse to LastUse, - // so we don't need InterferenceQuery. + // We know that VirtReg is a continuous interval from FirstInstr to + // LastInstr, so we don't need InterferenceQuery. // // Interference that overlaps an instruction is counted in both gaps // surrounding the instruction. The exception is interference before // StartIdx and after StopIdx. // - LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx); + LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx); for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { // Skip the gaps before IntI. while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) @@ -1195,47 +1325,6 @@ void RAGreedy::calcGapWeights(unsigned PhysReg, } } -/// getPrevMappedIndex - Return the slot index of the last non-copy instruction -/// before MI that has a slot index. If MI is the first mapped instruction in -/// its block, return the block start index instead. -/// -SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) { - assert(MI && "Missing MachineInstr"); - const MachineBasicBlock *MBB = MI->getParent(); - MachineBasicBlock::const_iterator B = MBB->begin(), I = MI; - while (I != B) - if (!(--I)->isDebugValue() && !I->isCopy()) - return Indexes->getInstructionIndex(I); - return Indexes->getMBBStartIdx(MBB); -} - -/// calcPrevSlots - Fill in the PrevSlot array with the index of the previous -/// real non-copy instruction for each instruction in SA->UseSlots. -/// -void RAGreedy::calcPrevSlots() { - const SmallVectorImpl &Uses = SA->UseSlots; - PrevSlot.clear(); - PrevSlot.reserve(Uses.size()); - for (unsigned i = 0, e = Uses.size(); i != e; ++i) { - const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]); - PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex()); - } -} - -/// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may -/// be beneficial to split before UseSlots[i]. -/// -/// 0 is always a valid split point -unsigned RAGreedy::nextSplitPoint(unsigned i) { - const SmallVectorImpl &Uses = SA->UseSlots; - const unsigned Size = Uses.size(); - assert(i != Size && "No split points after the end"); - // Allow split before i when Uses[i] is not adjacent to the previous use. - while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex()) - ; - return i; -} - /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only /// basic block. /// @@ -1248,10 +1337,10 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, // while only covering a single block - A phi-def can use undef values from // predecessors, and the block could be a single-block loop. // We don't bother doing anything clever about such a case, we simply assume - // that the interval is continuous from FirstUse to LastUse. We should make - // sure that we don't do anything illegal to such an interval, though. + // that the interval is continuous from FirstInstr to LastInstr. We should + // make sure that we don't do anything illegal to such an interval, though. - const SmallVectorImpl &Uses = SA->UseSlots; + ArrayRef Uses = SA->getUseSlots(); if (Uses.size() <= 2) return 0; const unsigned NumGaps = Uses.size()-1; @@ -1259,15 +1348,61 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, DEBUG({ dbgs() << "tryLocalSplit: "; for (unsigned i = 0, e = Uses.size(); i != e; ++i) - dbgs() << ' ' << SA->UseSlots[i]; + dbgs() << ' ' << Uses[i]; dbgs() << '\n'; }); - // For every use, find the previous mapped non-copy instruction. - // We use this to detect valid split points, and to estimate new interval - // sizes. - calcPrevSlots(); + // If VirtReg is live across any register mask operands, compute a list of + // gaps with register masks. + SmallVector RegMaskGaps; + if (!UsableRegs.empty()) { + // Get regmask slots for the whole block. + ArrayRef RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); + DEBUG(dbgs() << RMS.size() << " regmasks in block:"); + // Constrain to VirtReg's live range. + unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), + Uses.front().getRegSlot()) - RMS.begin(); + unsigned re = RMS.size(); + for (unsigned i = 0; i != NumGaps && ri != re; ++i) { + // Look for Uses[i] <= RMS <= Uses[i+1]. + assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); + if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) + continue; + // Skip a regmask on the same instruction as the last use. It doesn't + // overlap the live range. + if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) + break; + DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); + RegMaskGaps.push_back(i); + // Advance ri to the next gap. A regmask on one of the uses counts in + // both gaps. + while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) + ++ri; + } + DEBUG(dbgs() << '\n'); + } + // Since we allow local split results to be split again, there is a risk of + // creating infinite loops. It is tempting to require that the new live + // ranges have less instructions than the original. That would guarantee + // convergence, but it is too strict. A live range with 3 instructions can be + // split 2+3 (including the COPY), and we want to allow that. + // + // Instead we use these rules: + // + // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the + // noop split, of course). + // 2. Require progress be made for ranges with getStage() == RS_Split2. All + // the new ranges must have fewer instructions than before the split. + // 3. New ranges with the same number of instructions are marked RS_Split2, + // smaller ranges are marked RS_New. + // + // These rules allow a 3 -> 2+3 split once, which we need. They also prevent + // excessive splitting and infinite loops. + // + bool ProgressRequired = getStage(VirtReg) >= RS_Split2; + + // Best split candidate. unsigned BestBefore = NumGaps; unsigned BestAfter = 0; float BestDiff = 0; @@ -1281,17 +1416,20 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. calcGapWeights(PhysReg, GapWeight); + // Remove any gaps with regmask clobbers. + if (clobberedByRegMask(PhysReg)) + for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) + GapWeight[RegMaskGaps[i]] = HUGE_VALF; + // Try to find the best sequence of gaps to close. // The new spill weight must be larger than any gap interference. // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. - unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1; + unsigned SplitBefore = 0, SplitAfter = 1; // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). // It is the spill weight that needs to be evicted. float MaxGap = GapWeight[0]; - for (unsigned i = 1; i != SplitAfter; ++i) - MaxGap = std::max(MaxGap, GapWeight[i]); for (;;) { // Live before/after split? @@ -1309,32 +1447,22 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, } // Should the interval be extended or shrunk? bool Shrink = true; - if (MaxGap < HUGE_VALF) { - // Estimate the new spill weight. - // - // Each instruction reads and writes the register, except the first - // instr doesn't read when !FirstLive, and the last instr doesn't write - // when !LastLive. - // - // We will be inserting copies before and after, so the total number of - // reads and writes is 2 * EstUses. - // - const unsigned EstUses = 2*(SplitAfter - SplitBefore) + - 2*(LiveBefore + LiveAfter); - // Try to guess the size of the new interval. This should be trivial, - // but the slot index of an inserted copy can be a lot smaller than the - // instruction it is inserted before if there are many dead indexes - // between them. - // - // We measure the distance from the instruction before SplitBefore to - // get a conservative estimate. - // - // The final distance can still be different if inserting copies - // triggers a slot index renumbering. + // How many gaps would the new range have? + unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; + + // Legally, without causing looping? + bool Legal = !ProgressRequired || NewGaps < NumGaps; + + if (Legal && MaxGap < HUGE_VALF) { + // Estimate the new spill weight. Each instruction reads or writes the + // register. Conservatively assume there are no read-modify-write + // instructions. // - const float EstWeight = normalizeSpillWeight(blockFreq * EstUses, - PrevSlot[SplitBefore].distance(Uses[SplitAfter])); + // Try to guess the size of the new interval. + const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), + Uses[SplitBefore].distance(Uses[SplitAfter]) + + (LiveBefore + LiveAfter)*SlotIndex::InstrDist); // Would this split be possible to allocate? // Never allocate all gaps, we wouldn't be making progress. DEBUG(dbgs() << " w=" << EstWeight); @@ -1352,8 +1480,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, // Try to shrink. if (Shrink) { - SplitBefore = nextSplitPoint(SplitBefore); - if (SplitBefore < SplitAfter) { + if (++SplitBefore < SplitAfter) { DEBUG(dbgs() << " shrink\n"); // Recompute the max when necessary. if (GapWeight[SplitBefore - 1] >= MaxGap) { @@ -1373,10 +1500,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, } DEBUG(dbgs() << " extend\n"); - for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1; - SplitAfter != e; ++SplitAfter) - MaxGap = std::max(MaxGap, GapWeight[SplitAfter]); - continue; + MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); } } @@ -1395,9 +1519,26 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); SE->useIntv(SegStart, SegStop); - SE->finish(); + SmallVector IntvMap; + SE->finish(&IntvMap); DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); - setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local); + + // If the new range has the same number of instructions as before, mark it as + // RS_Split2 so the next split will be forced to make progress. Otherwise, + // leave the new intervals as RS_New so they can compete. + bool LiveBefore = BestBefore != 0 || BI.LiveIn; + bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; + unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; + if (NewGaps >= NumGaps) { + DEBUG(dbgs() << "Tagging non-progress ranges: "); + assert(!ProgressRequired && "Didn't make progress when it was required."); + for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) + if (IntvMap[i] == 1) { + setStage(*LREdit.get(i), RS_Split2); + DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); + } + DEBUG(dbgs() << '\n'); + } ++NumLocalSplits; return 0; @@ -1412,6 +1553,10 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl&NewVRegs) { + // Ranges must be Split2 or less. + if (getStage(VirtReg) >= RS_Spill) + return 0; + // Local intervals are handled separately. if (LIS->intervalIsInOneMBB(VirtReg)) { NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); @@ -1421,11 +1566,6 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); - // Don't iterate global splitting. - // Move straight to spilling if this range was produced by a global split. - if (getStage(VirtReg) >= RS_Global) - return 0; - SA->analyze(&VirtReg); // FIXME: SplitAnalysis may repair broken live ranges coming from the @@ -1439,24 +1579,17 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, return PhysReg; } - // First try to split around a region spanning multiple blocks. - unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); - if (PhysReg || !NewVRegs.empty()) - return PhysReg; - - // Then isolate blocks with multiple uses. - SplitAnalysis::BlockPtrSet Blocks; - if (SA->getMultiUseBlocks(Blocks)) { - LiveRangeEdit LREdit(VirtReg, NewVRegs, this); - SE->reset(LREdit); - SE->splitSingleBlocks(Blocks); - setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global); - if (VerifyEnabled) - MF->verify(this, "After splitting live range around basic blocks"); + // First try to split around a region spanning multiple blocks. RS_Split2 + // ranges already made dubious progress with region splitting, so they go + // straight to single block splitting. + if (getStage(VirtReg) < RS_Split2) { + unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); + if (PhysReg || !NewVRegs.empty()) + return PhysReg; } - // Don't assign any physregs. - return 0; + // Then isolate blocks. + return tryBlockSplit(VirtReg, Order, NewVRegs); } @@ -1466,24 +1599,34 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, SmallVectorImpl &NewVRegs) { + // Check if VirtReg is live across any calls. + UsableRegs.clear(); + if (LIS->checkRegMaskInterference(VirtReg, UsableRegs)) + DEBUG(dbgs() << "Live across regmasks.\n"); + // First try assigning a free register. - AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs); + AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) return PhysReg; LiveRangeStage Stage = getStage(VirtReg); - DEBUG(dbgs() << StageName[Stage] << '\n'); - - if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) - return PhysReg; + DEBUG(dbgs() << StageName[Stage] + << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); + + // Try to evict a less worthy live range, but only for ranges from the primary + // queue. The RS_Split ranges already failed to do this, and they should not + // get a second chance until they have been split. + if (Stage != RS_Split) + if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) + return PhysReg; assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); // The first time we see a live range, don't try to split or spill. // Wait until the second time, when all smaller ranges have been allocated. // This gives a better picture of the interference to split around. - if (Stage == RS_First) { - LRStage[VirtReg.reg] = RS_Second; + if (Stage < RS_Split) { + setStage(VirtReg, RS_Split); DEBUG(dbgs() << "wait for second round\n"); NewVRegs.push_back(&VirtReg); return 0; @@ -1491,7 +1634,7 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, // If we couldn't allocate a register from spilling, there is probably some // invalid inline assembly. The base class wil report it. - if (Stage >= RS_Spill) + if (Stage >= RS_Done || !VirtReg.isSpillable()) return ~0u; // Try splitting VirtReg or interferences. @@ -1503,7 +1646,7 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); LiveRangeEdit LRE(VirtReg, NewVRegs, this); spiller().spill(LRE); - setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill); + setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); if (VerifyEnabled) MF->verify(this, "After spilling"); @@ -1525,19 +1668,19 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { RegAllocBase::init(getAnalysis(), getAnalysis()); Indexes = &getAnalysis(); DomTree = &getAnalysis(); - ReservedRegs = TRI->getReservedRegs(*MF); SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); Loops = &getAnalysis(); - LoopRanges = &getAnalysis(); Bundles = &getAnalysis(); SpillPlacer = &getAnalysis(); DebugVars = &getAnalysis(); SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); - LRStage.clear(); - LRStage.resize(MRI->getNumVirtRegs()); - IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI); + ExtraRegInfo.clear(); + ExtraRegInfo.resize(MRI->getNumVirtRegs()); + NextCascade = 1; + IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI); + GlobalCand.resize(32); // This will grow as needed. allocatePhysRegs(); addMBBLiveIns(MF); @@ -1550,9 +1693,15 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { } // Write out new DBG_VALUE instructions. - DebugVars->emitDebugValues(VRM); + { + NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled); + DebugVars->emitDebugValues(VRM); + } - // The pass output is in VirtRegMap. Release all the transient data. + // All machine operands and other references to virtual registers have been + // replaced. Remove the virtual registers and release all the transient data. + VRM->clearAllVirt(); + MRI->clearVirtRegs(); releaseMemory(); return true;