1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "RegisterCoalescer.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Function.h"
29 #include "llvm/PassAnalysisSupport.h"
30 #include "llvm/CodeGen/CalcSpillWeights.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
33 #include "llvm/CodeGen/LiveStackAnalysis.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/CodeGen/RegAllocRegistry.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumEvicted, "Number of interferences evicted");
54 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
55 createGreedyRegisterAllocator);
58 class RAGreedy : public MachineFunctionPass,
60 private LiveRangeEdit::Delegate {
68 MachineDominatorTree *DomTree;
69 MachineLoopInfo *Loops;
71 SpillPlacement *SpillPlacer;
72 LiveDebugVariables *DebugVars;
75 std::auto_ptr<Spiller> SpillerInstance;
76 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
79 // Live ranges pass through a number of stages as we try to allocate them.
80 // Some of the stages may also create new live ranges:
82 // - Region splitting.
83 // - Per-block splitting.
87 // Ranges produced by one of the stages skip the previous stages when they are
88 // dequeued. This improves performance because we can skip interference checks
89 // that are unlikely to give any results. It also guarantees that the live
90 // range splitting algorithm terminates, something that is otherwise hard to
93 /// Newly created live range that has never been queued.
96 /// Only attempt assignment and eviction. Then requeue as RS_Split.
99 /// Attempt live range splitting if assignment is impossible.
102 /// Attempt more aggressive live range splitting that is guaranteed to make
103 /// progress. This is used for split products that may not be making
107 /// Live range will be spilled. No more splitting will be attempted.
110 /// There is nothing more we can do to this live range. Abort compilation
111 /// if it can't be assigned.
115 static const char *const StageName[];
117 // RegInfo - Keep additional information about each live range.
119 LiveRangeStage Stage;
121 // Cascade - Eviction loop prevention. See canEvictInterference().
124 RegInfo() : Stage(RS_New), Cascade(0) {}
127 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
129 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
130 return ExtraRegInfo[VirtReg.reg].Stage;
133 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
134 ExtraRegInfo.resize(MRI->getNumVirtRegs());
135 ExtraRegInfo[VirtReg.reg].Stage = Stage;
138 template<typename Iterator>
139 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
140 ExtraRegInfo.resize(MRI->getNumVirtRegs());
141 for (;Begin != End; ++Begin) {
142 unsigned Reg = (*Begin)->reg;
143 if (ExtraRegInfo[Reg].Stage == RS_New)
144 ExtraRegInfo[Reg].Stage = NewStage;
148 /// Cost of evicting interference.
149 struct EvictionCost {
150 unsigned BrokenHints; ///< Total number of broken hints.
151 float MaxWeight; ///< Maximum spill weight evicted.
153 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
155 bool operator<(const EvictionCost &O) const {
156 if (BrokenHints != O.BrokenHints)
157 return BrokenHints < O.BrokenHints;
158 return MaxWeight < O.MaxWeight;
163 std::auto_ptr<SplitAnalysis> SA;
164 std::auto_ptr<SplitEditor> SE;
166 /// Cached per-block interference maps
167 InterferenceCache IntfCache;
169 /// All basic blocks where the current register has uses.
170 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
172 /// Global live range splitting candidate info.
173 struct GlobalSplitCandidate {
175 InterferenceCache::Cursor Intf;
176 BitVector LiveBundles;
177 SmallVector<unsigned, 8> ActiveBlocks;
179 void reset(InterferenceCache &Cache, unsigned Reg) {
181 Intf.setPhysReg(Cache, Reg);
183 ActiveBlocks.clear();
187 /// Candidate info for for each PhysReg in AllocationOrder.
188 /// This vector never shrinks, but grows to the size of the largest register
190 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
195 /// Return the pass name.
196 virtual const char* getPassName() const {
197 return "Greedy Register Allocator";
200 /// RAGreedy analysis usage.
201 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
202 virtual void releaseMemory();
203 virtual Spiller &spiller() { return *SpillerInstance; }
204 virtual void enqueue(LiveInterval *LI);
205 virtual LiveInterval *dequeue();
206 virtual unsigned selectOrSplit(LiveInterval&,
207 SmallVectorImpl<LiveInterval*>&);
209 /// Perform register allocation.
210 virtual bool runOnMachineFunction(MachineFunction &mf);
215 void LRE_WillEraseInstruction(MachineInstr*);
216 bool LRE_CanEraseVirtReg(unsigned);
217 void LRE_WillShrinkVirtReg(unsigned);
218 void LRE_DidCloneVirtReg(unsigned, unsigned);
220 float calcSpillCost();
221 bool addSplitConstraints(InterferenceCache::Cursor, float&);
222 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
223 void growRegion(GlobalSplitCandidate &Cand);
224 float calcGlobalSplitCost(GlobalSplitCandidate&);
225 bool calcCompactRegion(GlobalSplitCandidate&);
226 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
227 SmallVectorImpl<LiveInterval*>&);
228 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
229 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
230 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
231 void evictInterference(LiveInterval&, unsigned,
232 SmallVectorImpl<LiveInterval*>&);
234 unsigned tryAssign(LiveInterval&, AllocationOrder&,
235 SmallVectorImpl<LiveInterval*>&);
236 unsigned tryEvict(LiveInterval&, AllocationOrder&,
237 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
238 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
239 SmallVectorImpl<LiveInterval*>&);
240 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
241 SmallVectorImpl<LiveInterval*>&);
242 unsigned trySplit(LiveInterval&, AllocationOrder&,
243 SmallVectorImpl<LiveInterval*>&);
245 } // end anonymous namespace
247 char RAGreedy::ID = 0;
250 const char *const RAGreedy::StageName[] = {
260 // Hysteresis to use when comparing floats.
261 // This helps stabilize decisions based on float comparisons.
262 const float Hysteresis = 0.98f;
265 FunctionPass* llvm::createGreedyRegisterAllocator() {
266 return new RAGreedy();
269 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
270 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
271 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
272 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
273 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
274 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
275 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
276 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
277 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
278 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
279 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
280 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
281 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
282 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
285 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
286 AU.setPreservesCFG();
287 AU.addRequired<AliasAnalysis>();
288 AU.addPreserved<AliasAnalysis>();
289 AU.addRequired<LiveIntervals>();
290 AU.addRequired<SlotIndexes>();
291 AU.addPreserved<SlotIndexes>();
292 AU.addRequired<LiveDebugVariables>();
293 AU.addPreserved<LiveDebugVariables>();
295 AU.addRequiredID(StrongPHIEliminationID);
296 AU.addRequiredTransitive<RegisterCoalescer>();
297 AU.addRequired<CalculateSpillWeights>();
298 AU.addRequired<LiveStacks>();
299 AU.addPreserved<LiveStacks>();
300 AU.addRequired<MachineDominatorTree>();
301 AU.addPreserved<MachineDominatorTree>();
302 AU.addRequired<MachineLoopInfo>();
303 AU.addPreserved<MachineLoopInfo>();
304 AU.addRequired<VirtRegMap>();
305 AU.addPreserved<VirtRegMap>();
306 AU.addRequired<EdgeBundles>();
307 AU.addRequired<SpillPlacement>();
308 MachineFunctionPass::getAnalysisUsage(AU);
312 //===----------------------------------------------------------------------===//
313 // LiveRangeEdit delegate methods
314 //===----------------------------------------------------------------------===//
316 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
317 // LRE itself will remove from SlotIndexes and parent basic block.
318 VRM->RemoveMachineInstrFromMaps(MI);
321 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
322 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
323 unassign(LIS->getInterval(VirtReg), PhysReg);
326 // Unassigned virtreg is probably in the priority queue.
327 // RegAllocBase will erase it after dequeueing.
331 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
332 unsigned PhysReg = VRM->getPhys(VirtReg);
336 // Register is assigned, put it back on the queue for reassignment.
337 LiveInterval &LI = LIS->getInterval(VirtReg);
338 unassign(LI, PhysReg);
342 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
343 // LRE may clone a virtual register because dead code elimination causes it to
344 // be split into connected components. Ensure that the new register gets the
345 // same stage as the parent.
346 ExtraRegInfo.grow(New);
347 ExtraRegInfo[New] = ExtraRegInfo[Old];
350 void RAGreedy::releaseMemory() {
351 SpillerInstance.reset(0);
352 ExtraRegInfo.clear();
354 RegAllocBase::releaseMemory();
357 void RAGreedy::enqueue(LiveInterval *LI) {
358 // Prioritize live ranges by size, assigning larger ranges first.
359 // The queue holds (size, reg) pairs.
360 const unsigned Size = LI->getSize();
361 const unsigned Reg = LI->reg;
362 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
363 "Can only enqueue virtual registers");
366 ExtraRegInfo.grow(Reg);
367 if (ExtraRegInfo[Reg].Stage == RS_New)
368 ExtraRegInfo[Reg].Stage = RS_Assign;
370 if (ExtraRegInfo[Reg].Stage == RS_Split)
371 // Unsplit ranges that couldn't be allocated immediately are deferred until
372 // everything else has been allocated. Long ranges are allocated last so
373 // they are split against realistic interference.
374 Prio = (1u << 31) - Size;
376 // Everything else is allocated in long->short order. Long ranges that don't
377 // fit should be spilled ASAP so they don't create interference.
378 Prio = (1u << 31) + Size;
380 // Boost ranges that have a physical register hint.
381 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
385 Queue.push(std::make_pair(Prio, Reg));
388 LiveInterval *RAGreedy::dequeue() {
391 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
397 //===----------------------------------------------------------------------===//
399 //===----------------------------------------------------------------------===//
401 /// tryAssign - Try to assign VirtReg to an available register.
402 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
403 AllocationOrder &Order,
404 SmallVectorImpl<LiveInterval*> &NewVRegs) {
407 while ((PhysReg = Order.next()))
408 if (!checkPhysRegInterference(VirtReg, PhysReg))
410 if (!PhysReg || Order.isHint(PhysReg))
413 // PhysReg is available, but there may be a better choice.
415 // If we missed a simple hint, try to cheaply evict interference from the
416 // preferred register.
417 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
418 if (Order.isHint(Hint)) {
419 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
420 EvictionCost MaxCost(1);
421 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
422 evictInterference(VirtReg, Hint, NewVRegs);
427 // Try to evict interference from a cheaper alternative.
428 unsigned Cost = TRI->getCostPerUse(PhysReg);
430 // Most registers have 0 additional cost.
434 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
436 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
437 return CheapReg ? CheapReg : PhysReg;
441 //===----------------------------------------------------------------------===//
442 // Interference eviction
443 //===----------------------------------------------------------------------===//
445 /// shouldEvict - determine if A should evict the assigned live range B. The
446 /// eviction policy defined by this function together with the allocation order
447 /// defined by enqueue() decides which registers ultimately end up being split
450 /// Cascade numbers are used to prevent infinite loops if this function is a
453 /// @param A The live range to be assigned.
454 /// @param IsHint True when A is about to be assigned to its preferred
456 /// @param B The live range to be evicted.
457 /// @param BreaksHint True when B is already assigned to its preferred register.
458 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
459 LiveInterval &B, bool BreaksHint) {
460 bool CanSplit = getStage(B) < RS_Spill;
462 // Be fairly aggressive about following hints as long as the evictee can be
464 if (CanSplit && IsHint && !BreaksHint)
467 return A.weight > B.weight;
470 /// canEvictInterference - Return true if all interferences between VirtReg and
471 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
473 /// @param VirtReg Live range that is about to be assigned.
474 /// @param PhysReg Desired register for assignment.
475 /// @prarm IsHint True when PhysReg is VirtReg's preferred register.
476 /// @param MaxCost Only look for cheaper candidates and update with new cost
477 /// when returning true.
478 /// @returns True when interference can be evicted cheaper than MaxCost.
479 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
480 bool IsHint, EvictionCost &MaxCost) {
481 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
482 // involved in an eviction before. If a cascade number was assigned, deny
483 // evicting anything with the same or a newer cascade number. This prevents
484 // infinite eviction loops.
486 // This works out so a register without a cascade number is allowed to evict
487 // anything, and it can be evicted by anything.
488 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
490 Cascade = NextCascade;
493 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
494 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
495 // If there is 10 or more interferences, chances are one is heavier.
496 if (Q.collectInterferingVRegs(10) >= 10)
499 // Check if any interfering live range is heavier than MaxWeight.
500 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
501 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
502 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
504 // Never evict spill products. They cannot split or spill.
505 if (getStage(*Intf) == RS_Done)
507 // Once a live range becomes small enough, it is urgent that we find a
508 // register for it. This is indicated by an infinite spill weight. These
509 // urgent live ranges get to evict almost anything.
510 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable();
511 // Only evict older cascades or live ranges without a cascade.
512 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
513 if (Cascade <= IntfCascade) {
516 // We permit breaking cascades for urgent evictions. It should be the
517 // last resort, though, so make it really expensive.
518 Cost.BrokenHints += 10;
520 // Would this break a satisfied hint?
521 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
522 // Update eviction cost.
523 Cost.BrokenHints += BreaksHint;
524 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
525 // Abort if this would be too expensive.
526 if (!(Cost < MaxCost))
528 // Finally, apply the eviction policy for non-urgent evictions.
529 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
537 /// evictInterference - Evict any interferring registers that prevent VirtReg
538 /// from being assigned to Physreg. This assumes that canEvictInterference
540 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
541 SmallVectorImpl<LiveInterval*> &NewVRegs) {
542 // Make sure that VirtReg has a cascade number, and assign that cascade
543 // number to every evicted register. These live ranges than then only be
544 // evicted by a newer cascade, preventing infinite loops.
545 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
547 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
549 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
550 << " interference: Cascade " << Cascade << '\n');
551 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
552 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
553 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
554 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
555 LiveInterval *Intf = Q.interferingVRegs()[i];
556 unassign(*Intf, VRM->getPhys(Intf->reg));
557 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
558 VirtReg.isSpillable() < Intf->isSpillable()) &&
559 "Cannot decrease cascade number, illegal eviction");
560 ExtraRegInfo[Intf->reg].Cascade = Cascade;
562 NewVRegs.push_back(Intf);
567 /// tryEvict - Try to evict all interferences for a physreg.
568 /// @param VirtReg Currently unassigned virtual register.
569 /// @param Order Physregs to try.
570 /// @return Physreg to assign VirtReg, or 0.
571 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
572 AllocationOrder &Order,
573 SmallVectorImpl<LiveInterval*> &NewVRegs,
574 unsigned CostPerUseLimit) {
575 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
577 // Keep track of the cheapest interference seen so far.
578 EvictionCost BestCost(~0u);
579 unsigned BestPhys = 0;
581 // When we are just looking for a reduced cost per use, don't break any
582 // hints, and only evict smaller spill weights.
583 if (CostPerUseLimit < ~0u) {
584 BestCost.BrokenHints = 0;
585 BestCost.MaxWeight = VirtReg.weight;
589 while (unsigned PhysReg = Order.next()) {
590 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
592 // The first use of a callee-saved register in a function has cost 1.
593 // Don't start using a CSR when the CostPerUseLimit is low.
594 if (CostPerUseLimit == 1)
595 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
596 if (!MRI->isPhysRegUsed(CSR)) {
597 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
598 << PrintReg(CSR, TRI) << '\n');
602 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
608 // Stop if the hint can be used.
609 if (Order.isHint(PhysReg))
616 evictInterference(VirtReg, BestPhys, NewVRegs);
621 //===----------------------------------------------------------------------===//
623 //===----------------------------------------------------------------------===//
625 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
626 /// interference pattern in Physreg and its aliases. Add the constraints to
627 /// SpillPlacement and return the static cost of this split in Cost, assuming
628 /// that all preferences in SplitConstraints are met.
629 /// Return false if there are no bundles with positive bias.
630 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
632 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
634 // Reset interference dependent info.
635 SplitConstraints.resize(UseBlocks.size());
636 float StaticCost = 0;
637 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
638 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
639 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
641 BC.Number = BI.MBB->getNumber();
642 Intf.moveToBlock(BC.Number);
643 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
644 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
646 if (!Intf.hasInterference())
649 // Number of spill code instructions to insert.
652 // Interference for the live-in value.
654 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
655 BC.Entry = SpillPlacement::MustSpill, ++Ins;
656 else if (Intf.first() < BI.FirstUse)
657 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
658 else if (Intf.first() < BI.LastUse)
662 // Interference for the live-out value.
664 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
665 BC.Exit = SpillPlacement::MustSpill, ++Ins;
666 else if (Intf.last() > BI.LastUse)
667 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
668 else if (Intf.last() > BI.FirstUse)
672 // Accumulate the total frequency of inserted spill code.
674 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
678 // Add constraints for use-blocks. Note that these are the only constraints
679 // that may add a positive bias, it is downhill from here.
680 SpillPlacer->addConstraints(SplitConstraints);
681 return SpillPlacer->scanActiveBundles();
685 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
686 /// live-through blocks in Blocks.
687 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
688 ArrayRef<unsigned> Blocks) {
689 const unsigned GroupSize = 8;
690 SpillPlacement::BlockConstraint BCS[GroupSize];
691 unsigned TBS[GroupSize];
692 unsigned B = 0, T = 0;
694 for (unsigned i = 0; i != Blocks.size(); ++i) {
695 unsigned Number = Blocks[i];
696 Intf.moveToBlock(Number);
698 if (!Intf.hasInterference()) {
699 assert(T < GroupSize && "Array overflow");
701 if (++T == GroupSize) {
702 SpillPlacer->addLinks(makeArrayRef(TBS, T));
708 assert(B < GroupSize && "Array overflow");
709 BCS[B].Number = Number;
711 // Interference for the live-in value.
712 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
713 BCS[B].Entry = SpillPlacement::MustSpill;
715 BCS[B].Entry = SpillPlacement::PrefSpill;
717 // Interference for the live-out value.
718 if (Intf.last() >= SA->getLastSplitPoint(Number))
719 BCS[B].Exit = SpillPlacement::MustSpill;
721 BCS[B].Exit = SpillPlacement::PrefSpill;
723 if (++B == GroupSize) {
724 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
725 SpillPlacer->addConstraints(Array);
730 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
731 SpillPlacer->addConstraints(Array);
732 SpillPlacer->addLinks(makeArrayRef(TBS, T));
735 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
736 // Keep track of through blocks that have not been added to SpillPlacer.
737 BitVector Todo = SA->getThroughBlocks();
738 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
739 unsigned AddedTo = 0;
741 unsigned Visited = 0;
745 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
746 // Find new through blocks in the periphery of PrefRegBundles.
747 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
748 unsigned Bundle = NewBundles[i];
749 // Look at all blocks connected to Bundle in the full graph.
750 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
751 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
754 if (!Todo.test(Block))
757 // This is a new through block. Add it to SpillPlacer later.
758 ActiveBlocks.push_back(Block);
764 // Any new blocks to add?
765 if (ActiveBlocks.size() == AddedTo)
768 // Compute through constraints from the interference, or assume that all
769 // through blocks prefer spilling when forming compact regions.
770 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
772 addThroughConstraints(Cand.Intf, NewBlocks);
774 SpillPlacer->addPrefSpill(NewBlocks);
775 AddedTo = ActiveBlocks.size();
777 // Perhaps iterating can enable more bundles?
778 SpillPlacer->iterate();
780 DEBUG(dbgs() << ", v=" << Visited);
783 /// calcCompactRegion - Compute the set of edge bundles that should be live
784 /// when splitting the current live range into compact regions. Compact
785 /// regions can be computed without looking at interference. They are the
786 /// regions formed by removing all the live-through blocks from the live range.
788 /// Returns false if the current live range is already compact, or if the
789 /// compact regions would form single block regions anyway.
790 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
791 // Without any through blocks, the live range is already compact.
792 if (!SA->getNumThroughBlocks())
795 // Compact regions don't correspond to any physreg.
796 Cand.reset(IntfCache, 0);
798 DEBUG(dbgs() << "Compact region bundles");
800 // Use the spill placer to determine the live bundles. GrowRegion pretends
801 // that all the through blocks have interference when PhysReg is unset.
802 SpillPlacer->prepare(Cand.LiveBundles);
804 // The static split cost will be zero since Cand.Intf reports no interference.
806 if (!addSplitConstraints(Cand.Intf, Cost)) {
807 DEBUG(dbgs() << ", none.\n");
812 SpillPlacer->finish();
814 if (!Cand.LiveBundles.any()) {
815 DEBUG(dbgs() << ", none.\n");
820 for (int i = Cand.LiveBundles.find_first(); i>=0;
821 i = Cand.LiveBundles.find_next(i))
822 dbgs() << " EB#" << i;
828 /// calcSpillCost - Compute how expensive it would be to split the live range in
829 /// SA around all use blocks instead of forming bundle regions.
830 float RAGreedy::calcSpillCost() {
832 const LiveInterval &LI = SA->getParent();
833 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
834 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
835 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
836 unsigned Number = BI.MBB->getNumber();
837 // We normally only need one spill instruction - a load or a store.
838 Cost += SpillPlacer->getBlockFrequency(Number);
840 // Unless the value is redefined in the block.
841 if (BI.LiveIn && BI.LiveOut) {
842 SlotIndex Start, Stop;
843 tie(Start, Stop) = Indexes->getMBBRange(Number);
844 LiveInterval::const_iterator I = LI.find(Start);
845 assert(I != LI.end() && "Expected live-in value");
846 // Is there a different live-out value? If so, we need an extra spill
849 Cost += SpillPlacer->getBlockFrequency(Number);
855 /// calcGlobalSplitCost - Return the global split cost of following the split
856 /// pattern in LiveBundles. This cost should be added to the local cost of the
857 /// interference pattern in SplitConstraints.
859 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
860 float GlobalCost = 0;
861 const BitVector &LiveBundles = Cand.LiveBundles;
862 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
863 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
864 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
865 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
866 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
867 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
871 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
873 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
875 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
878 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
879 unsigned Number = Cand.ActiveBlocks[i];
880 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
881 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
882 if (!RegIn && !RegOut)
884 if (RegIn && RegOut) {
885 // We need double spill code if this block has interference.
886 Cand.Intf.moveToBlock(Number);
887 if (Cand.Intf.hasInterference())
888 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
891 // live-in / stack-out or stack-in live-out.
892 GlobalCost += SpillPlacer->getBlockFrequency(Number);
897 /// splitAroundRegion - Split VirtReg around the region determined by
898 /// LiveBundles. Make an effort to avoid interference from PhysReg.
900 /// The 'register' interval is going to contain as many uses as possible while
901 /// avoiding interference. The 'stack' interval is the complement constructed by
902 /// SplitEditor. It will contain the rest.
904 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
905 GlobalSplitCandidate &Cand,
906 SmallVectorImpl<LiveInterval*> &NewVRegs) {
907 const BitVector &LiveBundles = Cand.LiveBundles;
910 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
912 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
913 dbgs() << " EB#" << i;
917 InterferenceCache::Cursor &Intf = Cand.Intf;
918 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
921 // Create the main cross-block interval.
922 const unsigned MainIntv = SE->openIntv();
924 // First handle all the blocks with uses.
925 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
926 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
927 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
928 bool RegIn = BI.LiveIn &&
929 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
930 bool RegOut = BI.LiveOut &&
931 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
933 // Create separate intervals for isolated blocks with multiple uses.
934 if (!RegIn && !RegOut) {
935 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
936 if (!BI.isOneInstr()) {
937 SE->splitSingleBlock(BI);
938 SE->selectIntv(MainIntv);
943 Intf.moveToBlock(BI.MBB->getNumber());
946 SE->splitLiveThroughBlock(BI.MBB->getNumber(),
947 MainIntv, Intf.first(),
948 MainIntv, Intf.last());
950 SE->splitRegInBlock(BI, MainIntv, Intf.first());
952 SE->splitRegOutBlock(BI, MainIntv, Intf.last());
955 // Handle live-through blocks.
956 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
957 unsigned Number = Cand.ActiveBlocks[i];
958 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
959 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
960 if (!RegIn && !RegOut)
962 Intf.moveToBlock(Number);
963 SE->splitLiveThroughBlock(Number, RegIn ? MainIntv : 0, Intf.first(),
964 RegOut ? MainIntv : 0, Intf.last());
969 SmallVector<unsigned, 8> IntvMap;
970 SE->finish(&IntvMap);
971 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
973 ExtraRegInfo.resize(MRI->getNumVirtRegs());
974 unsigned OrigBlocks = SA->getNumLiveBlocks();
976 // Sort out the new intervals created by splitting. We get four kinds:
977 // - Remainder intervals should not be split again.
978 // - Candidate intervals can be assigned to Cand.PhysReg.
979 // - Block-local splits are candidates for local splitting.
980 // - DCE leftovers should go back on the queue.
981 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
982 LiveInterval &Reg = *LREdit.get(i);
984 // Ignore old intervals from DCE.
985 if (getStage(Reg) != RS_New)
988 // Remainder interval. Don't try splitting again, spill if it doesn't
990 if (IntvMap[i] == 0) {
991 setStage(Reg, RS_Spill);
995 // Main interval. Allow repeated splitting as long as the number of live
996 // blocks is strictly decreasing. Otherwise force per-block splitting.
997 if (IntvMap[i] == MainIntv) {
998 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
999 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1000 << " blocks as original.\n");
1001 // Don't allow repeated splitting as a safe guard against looping.
1002 setStage(Reg, RS_Split2);
1007 // Other intervals are treated as new. This includes local intervals created
1008 // for blocks with multiple uses, and anything created by DCE.
1012 MF->verify(this, "After splitting live range around region");
1015 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1016 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1017 float BestCost = Hysteresis * calcSpillCost();
1018 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1019 const unsigned NoCand = ~0u;
1020 unsigned BestCand = NoCand;
1021 unsigned NumCands = 0;
1024 while (unsigned PhysReg = Order.next()) {
1025 // Discard bad candidates before we run out of interference cache cursors.
1026 // This will only affect register classes with a lot of registers (>32).
1027 if (NumCands == IntfCache.getMaxCursors()) {
1028 unsigned WorstCount = ~0u;
1030 for (unsigned i = 0; i != NumCands; ++i) {
1033 unsigned Count = GlobalCand[i].LiveBundles.count();
1034 if (Count < WorstCount)
1035 Worst = i, WorstCount = Count;
1038 GlobalCand[Worst] = GlobalCand[NumCands];
1041 if (GlobalCand.size() <= NumCands)
1042 GlobalCand.resize(NumCands+1);
1043 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1044 Cand.reset(IntfCache, PhysReg);
1046 SpillPlacer->prepare(Cand.LiveBundles);
1048 if (!addSplitConstraints(Cand.Intf, Cost)) {
1049 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1052 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1053 if (Cost >= BestCost) {
1055 if (BestCand == NoCand)
1056 dbgs() << " worse than no bundles\n";
1058 dbgs() << " worse than "
1059 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1065 SpillPlacer->finish();
1067 // No live bundles, defer to splitSingleBlocks().
1068 if (!Cand.LiveBundles.any()) {
1069 DEBUG(dbgs() << " no bundles.\n");
1073 Cost += calcGlobalSplitCost(Cand);
1075 dbgs() << ", total = " << Cost << " with bundles";
1076 for (int i = Cand.LiveBundles.find_first(); i>=0;
1077 i = Cand.LiveBundles.find_next(i))
1078 dbgs() << " EB#" << i;
1081 if (Cost < BestCost) {
1082 BestCand = NumCands;
1083 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1088 if (BestCand == NoCand)
1091 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
1096 //===----------------------------------------------------------------------===//
1098 //===----------------------------------------------------------------------===//
1101 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1102 /// in order to use PhysReg between two entries in SA->UseSlots.
1104 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1106 void RAGreedy::calcGapWeights(unsigned PhysReg,
1107 SmallVectorImpl<float> &GapWeight) {
1108 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1109 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1110 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1111 const unsigned NumGaps = Uses.size()-1;
1113 // Start and end points for the interference check.
1114 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
1115 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
1117 GapWeight.assign(NumGaps, 0.0f);
1119 // Add interference from each overlapping register.
1120 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1121 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1122 .checkInterference())
1125 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
1126 // so we don't need InterferenceQuery.
1128 // Interference that overlaps an instruction is counted in both gaps
1129 // surrounding the instruction. The exception is interference before
1130 // StartIdx and after StopIdx.
1132 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
1133 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1134 // Skip the gaps before IntI.
1135 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1136 if (++Gap == NumGaps)
1141 // Update the gaps covered by IntI.
1142 const float weight = IntI.value()->weight;
1143 for (; Gap != NumGaps; ++Gap) {
1144 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1145 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1154 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1157 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1158 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1159 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1160 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1162 // Note that it is possible to have an interval that is live-in or live-out
1163 // while only covering a single block - A phi-def can use undef values from
1164 // predecessors, and the block could be a single-block loop.
1165 // We don't bother doing anything clever about such a case, we simply assume
1166 // that the interval is continuous from FirstUse to LastUse. We should make
1167 // sure that we don't do anything illegal to such an interval, though.
1169 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1170 if (Uses.size() <= 2)
1172 const unsigned NumGaps = Uses.size()-1;
1175 dbgs() << "tryLocalSplit: ";
1176 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1177 dbgs() << ' ' << SA->UseSlots[i];
1181 // Since we allow local split results to be split again, there is a risk of
1182 // creating infinite loops. It is tempting to require that the new live
1183 // ranges have less instructions than the original. That would guarantee
1184 // convergence, but it is too strict. A live range with 3 instructions can be
1185 // split 2+3 (including the COPY), and we want to allow that.
1187 // Instead we use these rules:
1189 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1190 // noop split, of course).
1191 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1192 // the new ranges must have fewer instructions than before the split.
1193 // 3. New ranges with the same number of instructions are marked RS_Split2,
1194 // smaller ranges are marked RS_New.
1196 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1197 // excessive splitting and infinite loops.
1199 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1201 // Best split candidate.
1202 unsigned BestBefore = NumGaps;
1203 unsigned BestAfter = 0;
1206 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1207 SmallVector<float, 8> GapWeight;
1210 while (unsigned PhysReg = Order.next()) {
1211 // Keep track of the largest spill weight that would need to be evicted in
1212 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1213 calcGapWeights(PhysReg, GapWeight);
1215 // Try to find the best sequence of gaps to close.
1216 // The new spill weight must be larger than any gap interference.
1218 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1219 unsigned SplitBefore = 0, SplitAfter = 1;
1221 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1222 // It is the spill weight that needs to be evicted.
1223 float MaxGap = GapWeight[0];
1226 // Live before/after split?
1227 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1228 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1230 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1231 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1232 << " i=" << MaxGap);
1234 // Stop before the interval gets so big we wouldn't be making progress.
1235 if (!LiveBefore && !LiveAfter) {
1236 DEBUG(dbgs() << " all\n");
1239 // Should the interval be extended or shrunk?
1242 // How many gaps would the new range have?
1243 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1245 // Legally, without causing looping?
1246 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1248 if (Legal && MaxGap < HUGE_VALF) {
1249 // Estimate the new spill weight. Each instruction reads or writes the
1250 // register. Conservatively assume there are no read-modify-write
1253 // Try to guess the size of the new interval.
1254 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1255 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1256 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1257 // Would this split be possible to allocate?
1258 // Never allocate all gaps, we wouldn't be making progress.
1259 DEBUG(dbgs() << " w=" << EstWeight);
1260 if (EstWeight * Hysteresis >= MaxGap) {
1262 float Diff = EstWeight - MaxGap;
1263 if (Diff > BestDiff) {
1264 DEBUG(dbgs() << " (best)");
1265 BestDiff = Hysteresis * Diff;
1266 BestBefore = SplitBefore;
1267 BestAfter = SplitAfter;
1274 if (++SplitBefore < SplitAfter) {
1275 DEBUG(dbgs() << " shrink\n");
1276 // Recompute the max when necessary.
1277 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1278 MaxGap = GapWeight[SplitBefore];
1279 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1280 MaxGap = std::max(MaxGap, GapWeight[i]);
1287 // Try to extend the interval.
1288 if (SplitAfter >= NumGaps) {
1289 DEBUG(dbgs() << " end\n");
1293 DEBUG(dbgs() << " extend\n");
1294 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1298 // Didn't find any candidates?
1299 if (BestBefore == NumGaps)
1302 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1303 << '-' << Uses[BestAfter] << ", " << BestDiff
1304 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1306 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1310 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1311 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1312 SE->useIntv(SegStart, SegStop);
1313 SmallVector<unsigned, 8> IntvMap;
1314 SE->finish(&IntvMap);
1315 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1317 // If the new range has the same number of instructions as before, mark it as
1318 // RS_Split2 so the next split will be forced to make progress. Otherwise,
1319 // leave the new intervals as RS_New so they can compete.
1320 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1321 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1322 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1323 if (NewGaps >= NumGaps) {
1324 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1325 assert(!ProgressRequired && "Didn't make progress when it was required.");
1326 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1327 if (IntvMap[i] == 1) {
1328 setStage(*LREdit.get(i), RS_Split2);
1329 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1331 DEBUG(dbgs() << '\n');
1338 //===----------------------------------------------------------------------===//
1339 // Live Range Splitting
1340 //===----------------------------------------------------------------------===//
1342 /// trySplit - Try to split VirtReg or one of its interferences, making it
1344 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1345 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1346 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1347 // Local intervals are handled separately.
1348 if (LIS->intervalIsInOneMBB(VirtReg)) {
1349 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1350 SA->analyze(&VirtReg);
1351 return tryLocalSplit(VirtReg, Order, NewVRegs);
1354 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1356 // Ranges must be Split2 or less.
1357 if (getStage(VirtReg) >= RS_Spill)
1360 SA->analyze(&VirtReg);
1362 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1363 // coalescer. That may cause the range to become allocatable which means that
1364 // tryRegionSplit won't be making progress. This check should be replaced with
1365 // an assertion when the coalescer is fixed.
1366 if (SA->didRepairRange()) {
1367 // VirtReg has changed, so all cached queries are invalid.
1368 invalidateVirtRegs();
1369 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1373 // First try to split around a region spanning multiple blocks. RS_Split2
1374 // ranges already made dubious progress with region splitting, so they go
1375 // straight to single block splitting.
1376 if (getStage(VirtReg) < RS_Split2) {
1377 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1378 if (PhysReg || !NewVRegs.empty())
1382 // Then isolate blocks with multiple uses.
1383 SplitAnalysis::BlockPtrSet Blocks;
1384 if (SA->getMultiUseBlocks(Blocks)) {
1385 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1387 SE->splitSingleBlocks(Blocks);
1388 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1390 MF->verify(this, "After splitting live range around basic blocks");
1393 // Don't assign any physregs.
1398 //===----------------------------------------------------------------------===//
1400 //===----------------------------------------------------------------------===//
1402 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1403 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1404 // First try assigning a free register.
1405 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1406 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1409 LiveRangeStage Stage = getStage(VirtReg);
1410 DEBUG(dbgs() << StageName[Stage]
1411 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1413 // Try to evict a less worthy live range, but only for ranges from the primary
1414 // queue. The RS_Split ranges already failed to do this, and they should not
1415 // get a second chance until they have been split.
1416 if (Stage != RS_Split)
1417 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1420 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1422 // The first time we see a live range, don't try to split or spill.
1423 // Wait until the second time, when all smaller ranges have been allocated.
1424 // This gives a better picture of the interference to split around.
1425 if (Stage < RS_Split) {
1426 setStage(VirtReg, RS_Split);
1427 DEBUG(dbgs() << "wait for second round\n");
1428 NewVRegs.push_back(&VirtReg);
1432 // If we couldn't allocate a register from spilling, there is probably some
1433 // invalid inline assembly. The base class wil report it.
1434 if (Stage >= RS_Done || !VirtReg.isSpillable())
1437 // Try splitting VirtReg or interferences.
1438 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1439 if (PhysReg || !NewVRegs.empty())
1442 // Finally spill VirtReg itself.
1443 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1444 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1445 spiller().spill(LRE);
1446 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
1449 MF->verify(this, "After spilling");
1451 // The live virtual register requesting allocation was spilled, so tell
1452 // the caller not to allocate anything during this round.
1456 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1457 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1458 << "********** Function: "
1459 << ((Value*)mf.getFunction())->getName() << '\n');
1463 MF->verify(this, "Before greedy register allocator");
1465 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1466 Indexes = &getAnalysis<SlotIndexes>();
1467 DomTree = &getAnalysis<MachineDominatorTree>();
1468 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1469 Loops = &getAnalysis<MachineLoopInfo>();
1470 Bundles = &getAnalysis<EdgeBundles>();
1471 SpillPlacer = &getAnalysis<SpillPlacement>();
1472 DebugVars = &getAnalysis<LiveDebugVariables>();
1474 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1475 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1476 ExtraRegInfo.clear();
1477 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1479 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1483 LIS->addKillFlags();
1487 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1488 VRM->rewrite(Indexes);
1491 // Write out new DBG_VALUE instructions.
1492 DebugVars->emitDebugValues(VRM);
1494 // The pass output is in VirtRegMap. Release all the transient data.