1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "RegisterCoalescer.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Function.h"
29 #include "llvm/PassAnalysisSupport.h"
30 #include "llvm/CodeGen/CalcSpillWeights.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
33 #include "llvm/CodeGen/LiveStackAnalysis.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineLoopRanges.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/CodeGen/RegAllocRegistry.h"
41 #include "llvm/Target/TargetOptions.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Support/Timer.h"
51 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
52 STATISTIC(NumLocalSplits, "Number of split local live ranges");
53 STATISTIC(NumEvicted, "Number of interferences evicted");
55 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56 createGreedyRegisterAllocator);
59 class RAGreedy : public MachineFunctionPass,
61 private LiveRangeEdit::Delegate {
69 MachineDominatorTree *DomTree;
70 MachineLoopInfo *Loops;
71 MachineLoopRanges *LoopRanges;
73 SpillPlacement *SpillPlacer;
74 LiveDebugVariables *DebugVars;
77 std::auto_ptr<Spiller> SpillerInstance;
78 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
81 // Live ranges pass through a number of stages as we try to allocate them.
82 // Some of the stages may also create new live ranges:
84 // - Region splitting.
85 // - Per-block splitting.
89 // Ranges produced by one of the stages skip the previous stages when they are
90 // dequeued. This improves performance because we can skip interference checks
91 // that are unlikely to give any results. It also guarantees that the live
92 // range splitting algorithm terminates, something that is otherwise hard to
95 RS_New, ///< Never seen before.
96 RS_First, ///< First time in the queue.
97 RS_Second, ///< Second time in the queue.
98 RS_Global, ///< Produced by global splitting.
99 RS_Local, ///< Produced by local splitting.
100 RS_Spill ///< Produced by spilling.
103 static const char *const StageName[];
105 // RegInfo - Keep additional information about each live range.
107 LiveRangeStage Stage;
109 // Cascade - Eviction loop prevention. See canEvictInterference().
112 RegInfo() : Stage(RS_New), Cascade(0) {}
115 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
117 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
118 return ExtraRegInfo[VirtReg.reg].Stage;
121 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
122 ExtraRegInfo.resize(MRI->getNumVirtRegs());
123 ExtraRegInfo[VirtReg.reg].Stage = Stage;
126 template<typename Iterator>
127 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
128 ExtraRegInfo.resize(MRI->getNumVirtRegs());
129 for (;Begin != End; ++Begin) {
130 unsigned Reg = (*Begin)->reg;
131 if (ExtraRegInfo[Reg].Stage == RS_New)
132 ExtraRegInfo[Reg].Stage = NewStage;
136 /// Cost of evicting interference.
137 struct EvictionCost {
138 unsigned BrokenHints; ///< Total number of broken hints.
139 float MaxWeight; ///< Maximum spill weight evicted.
141 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
143 bool operator<(const EvictionCost &O) const {
144 if (BrokenHints != O.BrokenHints)
145 return BrokenHints < O.BrokenHints;
146 return MaxWeight < O.MaxWeight;
151 std::auto_ptr<SplitAnalysis> SA;
152 std::auto_ptr<SplitEditor> SE;
154 /// Cached per-block interference maps
155 InterferenceCache IntfCache;
157 /// All basic blocks where the current register has uses.
158 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
160 /// Global live range splitting candidate info.
161 struct GlobalSplitCandidate {
163 InterferenceCache::Cursor Intf;
164 BitVector LiveBundles;
165 SmallVector<unsigned, 8> ActiveBlocks;
167 void reset(InterferenceCache &Cache, unsigned Reg) {
169 Intf.setPhysReg(Cache, Reg);
171 ActiveBlocks.clear();
175 /// Candidate info for for each PhysReg in AllocationOrder.
176 /// This vector never shrinks, but grows to the size of the largest register
178 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
183 /// Return the pass name.
184 virtual const char* getPassName() const {
185 return "Greedy Register Allocator";
188 /// RAGreedy analysis usage.
189 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
190 virtual void releaseMemory();
191 virtual Spiller &spiller() { return *SpillerInstance; }
192 virtual void enqueue(LiveInterval *LI);
193 virtual LiveInterval *dequeue();
194 virtual unsigned selectOrSplit(LiveInterval&,
195 SmallVectorImpl<LiveInterval*>&);
197 /// Perform register allocation.
198 virtual bool runOnMachineFunction(MachineFunction &mf);
203 void LRE_WillEraseInstruction(MachineInstr*);
204 bool LRE_CanEraseVirtReg(unsigned);
205 void LRE_WillShrinkVirtReg(unsigned);
206 void LRE_DidCloneVirtReg(unsigned, unsigned);
208 float calcSpillCost();
209 bool addSplitConstraints(InterferenceCache::Cursor, float&);
210 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
211 void growRegion(GlobalSplitCandidate &Cand);
212 float calcGlobalSplitCost(GlobalSplitCandidate&);
213 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
214 SmallVectorImpl<LiveInterval*>&);
215 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
216 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
217 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
218 void evictInterference(LiveInterval&, unsigned,
219 SmallVectorImpl<LiveInterval*>&);
221 unsigned tryAssign(LiveInterval&, AllocationOrder&,
222 SmallVectorImpl<LiveInterval*>&);
223 unsigned tryEvict(LiveInterval&, AllocationOrder&,
224 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
225 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
226 SmallVectorImpl<LiveInterval*>&);
227 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
228 SmallVectorImpl<LiveInterval*>&);
229 unsigned trySplit(LiveInterval&, AllocationOrder&,
230 SmallVectorImpl<LiveInterval*>&);
232 } // end anonymous namespace
234 char RAGreedy::ID = 0;
237 const char *const RAGreedy::StageName[] = {
247 // Hysteresis to use when comparing floats.
248 // This helps stabilize decisions based on float comparisons.
249 const float Hysteresis = 0.98f;
252 FunctionPass* llvm::createGreedyRegisterAllocator() {
253 return new RAGreedy();
256 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
257 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
258 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
259 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
260 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
261 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
262 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
263 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
264 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
265 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
266 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
267 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
268 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
269 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
270 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
273 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
274 AU.setPreservesCFG();
275 AU.addRequired<AliasAnalysis>();
276 AU.addPreserved<AliasAnalysis>();
277 AU.addRequired<LiveIntervals>();
278 AU.addRequired<SlotIndexes>();
279 AU.addPreserved<SlotIndexes>();
280 AU.addRequired<LiveDebugVariables>();
281 AU.addPreserved<LiveDebugVariables>();
283 AU.addRequiredID(StrongPHIEliminationID);
284 AU.addRequiredTransitive<RegisterCoalescer>();
285 AU.addRequired<CalculateSpillWeights>();
286 AU.addRequired<LiveStacks>();
287 AU.addPreserved<LiveStacks>();
288 AU.addRequired<MachineDominatorTree>();
289 AU.addPreserved<MachineDominatorTree>();
290 AU.addRequired<MachineLoopInfo>();
291 AU.addPreserved<MachineLoopInfo>();
292 AU.addRequired<MachineLoopRanges>();
293 AU.addPreserved<MachineLoopRanges>();
294 AU.addRequired<VirtRegMap>();
295 AU.addPreserved<VirtRegMap>();
296 AU.addRequired<EdgeBundles>();
297 AU.addRequired<SpillPlacement>();
298 MachineFunctionPass::getAnalysisUsage(AU);
302 //===----------------------------------------------------------------------===//
303 // LiveRangeEdit delegate methods
304 //===----------------------------------------------------------------------===//
306 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
307 // LRE itself will remove from SlotIndexes and parent basic block.
308 VRM->RemoveMachineInstrFromMaps(MI);
311 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
312 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
313 unassign(LIS->getInterval(VirtReg), PhysReg);
316 // Unassigned virtreg is probably in the priority queue.
317 // RegAllocBase will erase it after dequeueing.
321 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
322 unsigned PhysReg = VRM->getPhys(VirtReg);
326 // Register is assigned, put it back on the queue for reassignment.
327 LiveInterval &LI = LIS->getInterval(VirtReg);
328 unassign(LI, PhysReg);
332 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
333 // LRE may clone a virtual register because dead code elimination causes it to
334 // be split into connected components. Ensure that the new register gets the
335 // same stage as the parent.
336 ExtraRegInfo.grow(New);
337 ExtraRegInfo[New] = ExtraRegInfo[Old];
340 void RAGreedy::releaseMemory() {
341 SpillerInstance.reset(0);
342 ExtraRegInfo.clear();
344 RegAllocBase::releaseMemory();
347 void RAGreedy::enqueue(LiveInterval *LI) {
348 // Prioritize live ranges by size, assigning larger ranges first.
349 // The queue holds (size, reg) pairs.
350 const unsigned Size = LI->getSize();
351 const unsigned Reg = LI->reg;
352 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
353 "Can only enqueue virtual registers");
356 ExtraRegInfo.grow(Reg);
357 if (ExtraRegInfo[Reg].Stage == RS_New)
358 ExtraRegInfo[Reg].Stage = RS_First;
360 if (ExtraRegInfo[Reg].Stage == RS_Second)
361 // Unsplit ranges that couldn't be allocated immediately are deferred until
362 // everything else has been allocated. Long ranges are allocated last so
363 // they are split against realistic interference.
364 Prio = (1u << 31) - Size;
366 // Everything else is allocated in long->short order. Long ranges that don't
367 // fit should be spilled ASAP so they don't create interference.
368 Prio = (1u << 31) + Size;
370 // Boost ranges that have a physical register hint.
371 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
375 Queue.push(std::make_pair(Prio, Reg));
378 LiveInterval *RAGreedy::dequeue() {
381 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
387 //===----------------------------------------------------------------------===//
389 //===----------------------------------------------------------------------===//
391 /// tryAssign - Try to assign VirtReg to an available register.
392 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
393 AllocationOrder &Order,
394 SmallVectorImpl<LiveInterval*> &NewVRegs) {
397 while ((PhysReg = Order.next()))
398 if (!checkPhysRegInterference(VirtReg, PhysReg))
400 if (!PhysReg || Order.isHint(PhysReg))
403 // PhysReg is available, but there may be a better choice.
405 // If we missed a simple hint, try to cheaply evict interference from the
406 // preferred register.
407 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
408 if (Order.isHint(Hint)) {
409 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
410 EvictionCost MaxCost(1);
411 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
412 evictInterference(VirtReg, Hint, NewVRegs);
417 // Try to evict interference from a cheaper alternative.
418 unsigned Cost = TRI->getCostPerUse(PhysReg);
420 // Most registers have 0 additional cost.
424 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
426 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
427 return CheapReg ? CheapReg : PhysReg;
431 //===----------------------------------------------------------------------===//
432 // Interference eviction
433 //===----------------------------------------------------------------------===//
435 /// shouldEvict - determine if A should evict the assigned live range B. The
436 /// eviction policy defined by this function together with the allocation order
437 /// defined by enqueue() decides which registers ultimately end up being split
440 /// Cascade numbers are used to prevent infinite loops if this function is a
443 /// @param A The live range to be assigned.
444 /// @param IsHint True when A is about to be assigned to its preferred
446 /// @param B The live range to be evicted.
447 /// @param BreaksHint True when B is already assigned to its preferred register.
448 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
449 LiveInterval &B, bool BreaksHint) {
450 bool CanSplit = getStage(B) <= RS_Second;
452 // Be fairly aggressive about following hints as long as the evictee can be
454 if (CanSplit && IsHint && !BreaksHint)
457 return A.weight > B.weight;
460 /// canEvictInterference - Return true if all interferences between VirtReg and
461 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
463 /// @param VirtReg Live range that is about to be assigned.
464 /// @param PhysReg Desired register for assignment.
465 /// @prarm IsHint True when PhysReg is VirtReg's preferred register.
466 /// @param MaxCost Only look for cheaper candidates and update with new cost
467 /// when returning true.
468 /// @returns True when interference can be evicted cheaper than MaxCost.
469 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
470 bool IsHint, EvictionCost &MaxCost) {
471 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
472 // involved in an eviction before. If a cascade number was assigned, deny
473 // evicting anything with the same or a newer cascade number. This prevents
474 // infinite eviction loops.
476 // This works out so a register without a cascade number is allowed to evict
477 // anything, and it can be evicted by anything.
478 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
480 Cascade = NextCascade;
483 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
484 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
485 // If there is 10 or more interferences, chances are one is heavier.
486 if (Q.collectInterferingVRegs(10) >= 10)
489 // Check if any interfering live range is heavier than MaxWeight.
490 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
491 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
492 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
494 // Never evict spill products. They cannot split or spill.
495 if (getStage(*Intf) == RS_Spill)
497 // Once a live range becomes small enough, it is urgent that we find a
498 // register for it. This is indicated by an infinite spill weight. These
499 // urgent live ranges get to evict almost anything.
500 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable();
501 // Only evict older cascades or live ranges without a cascade.
502 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
503 if (Cascade <= IntfCascade) {
506 // We permit breaking cascades for urgent evictions. It should be the
507 // last resort, though, so make it really expensive.
508 Cost.BrokenHints += 10;
510 // Would this break a satisfied hint?
511 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
512 // Update eviction cost.
513 Cost.BrokenHints += BreaksHint;
514 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
515 // Abort if this would be too expensive.
516 if (!(Cost < MaxCost))
518 // Finally, apply the eviction policy for non-urgent evictions.
519 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
527 /// evictInterference - Evict any interferring registers that prevent VirtReg
528 /// from being assigned to Physreg. This assumes that canEvictInterference
530 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
531 SmallVectorImpl<LiveInterval*> &NewVRegs) {
532 // Make sure that VirtReg has a cascade number, and assign that cascade
533 // number to every evicted register. These live ranges than then only be
534 // evicted by a newer cascade, preventing infinite loops.
535 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
537 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
539 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
540 << " interference: Cascade " << Cascade << '\n');
541 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
542 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
543 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
544 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
545 LiveInterval *Intf = Q.interferingVRegs()[i];
546 unassign(*Intf, VRM->getPhys(Intf->reg));
547 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
548 VirtReg.isSpillable() < Intf->isSpillable()) &&
549 "Cannot decrease cascade number, illegal eviction");
550 ExtraRegInfo[Intf->reg].Cascade = Cascade;
552 NewVRegs.push_back(Intf);
557 /// tryEvict - Try to evict all interferences for a physreg.
558 /// @param VirtReg Currently unassigned virtual register.
559 /// @param Order Physregs to try.
560 /// @return Physreg to assign VirtReg, or 0.
561 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
562 AllocationOrder &Order,
563 SmallVectorImpl<LiveInterval*> &NewVRegs,
564 unsigned CostPerUseLimit) {
565 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
567 // Keep track of the cheapest interference seen so far.
568 EvictionCost BestCost(~0u);
569 unsigned BestPhys = 0;
571 // When we are just looking for a reduced cost per use, don't break any
572 // hints, and only evict smaller spill weights.
573 if (CostPerUseLimit < ~0u) {
574 BestCost.BrokenHints = 0;
575 BestCost.MaxWeight = VirtReg.weight;
579 while (unsigned PhysReg = Order.next()) {
580 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
582 // The first use of a callee-saved register in a function has cost 1.
583 // Don't start using a CSR when the CostPerUseLimit is low.
584 if (CostPerUseLimit == 1)
585 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
586 if (!MRI->isPhysRegUsed(CSR)) {
587 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
588 << PrintReg(CSR, TRI) << '\n');
592 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
598 // Stop if the hint can be used.
599 if (Order.isHint(PhysReg))
606 evictInterference(VirtReg, BestPhys, NewVRegs);
611 //===----------------------------------------------------------------------===//
613 //===----------------------------------------------------------------------===//
615 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
616 /// interference pattern in Physreg and its aliases. Add the constraints to
617 /// SpillPlacement and return the static cost of this split in Cost, assuming
618 /// that all preferences in SplitConstraints are met.
619 /// Return false if there are no bundles with positive bias.
620 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
622 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
624 // Reset interference dependent info.
625 SplitConstraints.resize(UseBlocks.size());
626 float StaticCost = 0;
627 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
628 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
629 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
631 BC.Number = BI.MBB->getNumber();
632 Intf.moveToBlock(BC.Number);
633 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
634 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
636 if (!Intf.hasInterference())
639 // Number of spill code instructions to insert.
642 // Interference for the live-in value.
644 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
645 BC.Entry = SpillPlacement::MustSpill, ++Ins;
646 else if (Intf.first() < BI.FirstUse)
647 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
648 else if (Intf.first() < BI.LastUse)
652 // Interference for the live-out value.
654 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
655 BC.Exit = SpillPlacement::MustSpill, ++Ins;
656 else if (Intf.last() > BI.LastUse)
657 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
658 else if (Intf.last() > BI.FirstUse)
662 // Accumulate the total frequency of inserted spill code.
664 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
668 // Add constraints for use-blocks. Note that these are the only constraints
669 // that may add a positive bias, it is downhill from here.
670 SpillPlacer->addConstraints(SplitConstraints);
671 return SpillPlacer->scanActiveBundles();
675 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
676 /// live-through blocks in Blocks.
677 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
678 ArrayRef<unsigned> Blocks) {
679 const unsigned GroupSize = 8;
680 SpillPlacement::BlockConstraint BCS[GroupSize];
681 unsigned TBS[GroupSize];
682 unsigned B = 0, T = 0;
684 for (unsigned i = 0; i != Blocks.size(); ++i) {
685 unsigned Number = Blocks[i];
686 Intf.moveToBlock(Number);
688 if (!Intf.hasInterference()) {
689 assert(T < GroupSize && "Array overflow");
691 if (++T == GroupSize) {
692 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
698 assert(B < GroupSize && "Array overflow");
699 BCS[B].Number = Number;
701 // Interference for the live-in value.
702 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
703 BCS[B].Entry = SpillPlacement::MustSpill;
705 BCS[B].Entry = SpillPlacement::PrefSpill;
707 // Interference for the live-out value.
708 if (Intf.last() >= SA->getLastSplitPoint(Number))
709 BCS[B].Exit = SpillPlacement::MustSpill;
711 BCS[B].Exit = SpillPlacement::PrefSpill;
713 if (++B == GroupSize) {
714 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
715 SpillPlacer->addConstraints(Array);
720 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
721 SpillPlacer->addConstraints(Array);
722 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
725 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
726 // Keep track of through blocks that have not been added to SpillPlacer.
727 BitVector Todo = SA->getThroughBlocks();
728 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
729 unsigned AddedTo = 0;
731 unsigned Visited = 0;
735 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
736 // Find new through blocks in the periphery of PrefRegBundles.
737 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
738 unsigned Bundle = NewBundles[i];
739 // Look at all blocks connected to Bundle in the full graph.
740 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
741 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
744 if (!Todo.test(Block))
747 // This is a new through block. Add it to SpillPlacer later.
748 ActiveBlocks.push_back(Block);
754 // Any new blocks to add?
755 if (ActiveBlocks.size() == AddedTo)
757 addThroughConstraints(Cand.Intf,
758 ArrayRef<unsigned>(ActiveBlocks).slice(AddedTo));
759 AddedTo = ActiveBlocks.size();
761 // Perhaps iterating can enable more bundles?
762 SpillPlacer->iterate();
764 DEBUG(dbgs() << ", v=" << Visited);
767 /// calcSpillCost - Compute how expensive it would be to split the live range in
768 /// SA around all use blocks instead of forming bundle regions.
769 float RAGreedy::calcSpillCost() {
771 const LiveInterval &LI = SA->getParent();
772 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
773 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
774 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
775 unsigned Number = BI.MBB->getNumber();
776 // We normally only need one spill instruction - a load or a store.
777 Cost += SpillPlacer->getBlockFrequency(Number);
779 // Unless the value is redefined in the block.
780 if (BI.LiveIn && BI.LiveOut) {
781 SlotIndex Start, Stop;
782 tie(Start, Stop) = Indexes->getMBBRange(Number);
783 LiveInterval::const_iterator I = LI.find(Start);
784 assert(I != LI.end() && "Expected live-in value");
785 // Is there a different live-out value? If so, we need an extra spill
788 Cost += SpillPlacer->getBlockFrequency(Number);
794 /// calcGlobalSplitCost - Return the global split cost of following the split
795 /// pattern in LiveBundles. This cost should be added to the local cost of the
796 /// interference pattern in SplitConstraints.
798 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
799 float GlobalCost = 0;
800 const BitVector &LiveBundles = Cand.LiveBundles;
801 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
802 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
803 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
804 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
805 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
806 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
810 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
812 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
814 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
817 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
818 unsigned Number = Cand.ActiveBlocks[i];
819 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
820 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
821 if (!RegIn && !RegOut)
823 if (RegIn && RegOut) {
824 // We need double spill code if this block has interference.
825 Cand.Intf.moveToBlock(Number);
826 if (Cand.Intf.hasInterference())
827 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
830 // live-in / stack-out or stack-in live-out.
831 GlobalCost += SpillPlacer->getBlockFrequency(Number);
836 /// splitAroundRegion - Split VirtReg around the region determined by
837 /// LiveBundles. Make an effort to avoid interference from PhysReg.
839 /// The 'register' interval is going to contain as many uses as possible while
840 /// avoiding interference. The 'stack' interval is the complement constructed by
841 /// SplitEditor. It will contain the rest.
843 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
844 GlobalSplitCandidate &Cand,
845 SmallVectorImpl<LiveInterval*> &NewVRegs) {
846 const BitVector &LiveBundles = Cand.LiveBundles;
849 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
851 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
852 dbgs() << " EB#" << i;
856 InterferenceCache::Cursor &Intf = Cand.Intf;
857 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
860 // Create the main cross-block interval.
861 const unsigned MainIntv = SE->openIntv();
863 // First handle all the blocks with uses.
864 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
865 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
866 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
867 bool RegIn = BI.LiveIn &&
868 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
869 bool RegOut = BI.LiveOut &&
870 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
872 // Create separate intervals for isolated blocks with multiple uses.
873 if (!RegIn && !RegOut) {
874 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
875 if (!BI.isOneInstr()) {
876 SE->splitSingleBlock(BI);
877 SE->selectIntv(MainIntv);
882 Intf.moveToBlock(BI.MBB->getNumber());
885 SE->splitLiveThroughBlock(BI.MBB->getNumber(),
886 MainIntv, Intf.first(),
887 MainIntv, Intf.last());
889 SE->splitRegInBlock(BI, MainIntv, Intf.first());
891 SE->splitRegOutBlock(BI, MainIntv, Intf.last());
894 // Handle live-through blocks.
895 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
896 unsigned Number = Cand.ActiveBlocks[i];
897 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
898 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
899 if (!RegIn && !RegOut)
901 Intf.moveToBlock(Number);
902 SE->splitLiveThroughBlock(Number, RegIn ? MainIntv : 0, Intf.first(),
903 RegOut ? MainIntv : 0, Intf.last());
908 SmallVector<unsigned, 8> IntvMap;
909 SE->finish(&IntvMap);
910 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
912 ExtraRegInfo.resize(MRI->getNumVirtRegs());
913 unsigned OrigBlocks = SA->getNumLiveBlocks();
915 // Sort out the new intervals created by splitting. We get four kinds:
916 // - Remainder intervals should not be split again.
917 // - Candidate intervals can be assigned to Cand.PhysReg.
918 // - Block-local splits are candidates for local splitting.
919 // - DCE leftovers should go back on the queue.
920 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
921 LiveInterval &Reg = *LREdit.get(i);
923 // Ignore old intervals from DCE.
924 if (getStage(Reg) != RS_New)
927 // Remainder interval. Don't try splitting again, spill if it doesn't
929 if (IntvMap[i] == 0) {
930 setStage(Reg, RS_Global);
934 // Main interval. Allow repeated splitting as long as the number of live
935 // blocks is strictly decreasing.
936 if (IntvMap[i] == MainIntv) {
937 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
938 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
939 << " blocks as original.\n");
940 // Don't allow repeated splitting as a safe guard against looping.
941 setStage(Reg, RS_Global);
946 // Other intervals are treated as new. This includes local intervals created
947 // for blocks with multiple uses, and anything created by DCE.
951 MF->verify(this, "After splitting live range around region");
954 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
955 SmallVectorImpl<LiveInterval*> &NewVRegs) {
956 float BestCost = Hysteresis * calcSpillCost();
957 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
958 const unsigned NoCand = ~0u;
959 unsigned BestCand = NoCand;
960 unsigned NumCands = 0;
963 while (unsigned PhysReg = Order.next()) {
964 // Discard bad candidates before we run out of interference cache cursors.
965 // This will only affect register classes with a lot of registers (>32).
966 if (NumCands == IntfCache.getMaxCursors()) {
967 unsigned WorstCount = ~0u;
969 for (unsigned i = 0; i != NumCands; ++i) {
972 unsigned Count = GlobalCand[i].LiveBundles.count();
973 if (Count < WorstCount)
974 Worst = i, WorstCount = Count;
977 GlobalCand[Worst] = GlobalCand[NumCands];
980 if (GlobalCand.size() <= NumCands)
981 GlobalCand.resize(NumCands+1);
982 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
983 Cand.reset(IntfCache, PhysReg);
985 SpillPlacer->prepare(Cand.LiveBundles);
987 if (!addSplitConstraints(Cand.Intf, Cost)) {
988 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
991 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
992 if (Cost >= BestCost) {
994 if (BestCand == NoCand)
995 dbgs() << " worse than no bundles\n";
997 dbgs() << " worse than "
998 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1004 SpillPlacer->finish();
1006 // No live bundles, defer to splitSingleBlocks().
1007 if (!Cand.LiveBundles.any()) {
1008 DEBUG(dbgs() << " no bundles.\n");
1012 Cost += calcGlobalSplitCost(Cand);
1014 dbgs() << ", total = " << Cost << " with bundles";
1015 for (int i = Cand.LiveBundles.find_first(); i>=0;
1016 i = Cand.LiveBundles.find_next(i))
1017 dbgs() << " EB#" << i;
1020 if (Cost < BestCost) {
1021 BestCand = NumCands;
1022 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1027 if (BestCand == NoCand)
1030 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
1035 //===----------------------------------------------------------------------===//
1037 //===----------------------------------------------------------------------===//
1040 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1041 /// in order to use PhysReg between two entries in SA->UseSlots.
1043 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1045 void RAGreedy::calcGapWeights(unsigned PhysReg,
1046 SmallVectorImpl<float> &GapWeight) {
1047 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1048 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1049 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1050 const unsigned NumGaps = Uses.size()-1;
1052 // Start and end points for the interference check.
1053 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
1054 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
1056 GapWeight.assign(NumGaps, 0.0f);
1058 // Add interference from each overlapping register.
1059 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1060 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1061 .checkInterference())
1064 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
1065 // so we don't need InterferenceQuery.
1067 // Interference that overlaps an instruction is counted in both gaps
1068 // surrounding the instruction. The exception is interference before
1069 // StartIdx and after StopIdx.
1071 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
1072 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1073 // Skip the gaps before IntI.
1074 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1075 if (++Gap == NumGaps)
1080 // Update the gaps covered by IntI.
1081 const float weight = IntI.value()->weight;
1082 for (; Gap != NumGaps; ++Gap) {
1083 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1084 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1093 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1096 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1097 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1098 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1099 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1101 // Note that it is possible to have an interval that is live-in or live-out
1102 // while only covering a single block - A phi-def can use undef values from
1103 // predecessors, and the block could be a single-block loop.
1104 // We don't bother doing anything clever about such a case, we simply assume
1105 // that the interval is continuous from FirstUse to LastUse. We should make
1106 // sure that we don't do anything illegal to such an interval, though.
1108 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1109 if (Uses.size() <= 2)
1111 const unsigned NumGaps = Uses.size()-1;
1114 dbgs() << "tryLocalSplit: ";
1115 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1116 dbgs() << ' ' << SA->UseSlots[i];
1120 // Since we allow local split results to be split again, there is a risk of
1121 // creating infinite loops. It is tempting to require that the new live
1122 // ranges have less instructions than the original. That would guarantee
1123 // convergence, but it is too strict. A live range with 3 instructions can be
1124 // split 2+3 (including the COPY), and we want to allow that.
1126 // Instead we use these rules:
1128 // 1. Allow any split for ranges with getStage() < RS_Local. (Except for the
1129 // noop split, of course).
1130 // 2. Require progress be made for ranges with getStage() >= RS_Local. All
1131 // the new ranges must have fewer instructions than before the split.
1132 // 3. New ranges with the same number of instructions are marked RS_Local,
1133 // smaller ranges are marked RS_New.
1135 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1136 // excessive splitting and infinite loops.
1138 bool ProgressRequired = getStage(VirtReg) >= RS_Local;
1140 // Best split candidate.
1141 unsigned BestBefore = NumGaps;
1142 unsigned BestAfter = 0;
1145 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1146 SmallVector<float, 8> GapWeight;
1149 while (unsigned PhysReg = Order.next()) {
1150 // Keep track of the largest spill weight that would need to be evicted in
1151 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1152 calcGapWeights(PhysReg, GapWeight);
1154 // Try to find the best sequence of gaps to close.
1155 // The new spill weight must be larger than any gap interference.
1157 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1158 unsigned SplitBefore = 0, SplitAfter = 1;
1160 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1161 // It is the spill weight that needs to be evicted.
1162 float MaxGap = GapWeight[0];
1165 // Live before/after split?
1166 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1167 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1169 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1170 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1171 << " i=" << MaxGap);
1173 // Stop before the interval gets so big we wouldn't be making progress.
1174 if (!LiveBefore && !LiveAfter) {
1175 DEBUG(dbgs() << " all\n");
1178 // Should the interval be extended or shrunk?
1181 // How many gaps would the new range have?
1182 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1184 // Legally, without causing looping?
1185 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1187 if (Legal && MaxGap < HUGE_VALF) {
1188 // Estimate the new spill weight. Each instruction reads or writes the
1189 // register. Conservatively assume there are no read-modify-write
1192 // Try to guess the size of the new interval.
1193 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1194 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1195 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1196 // Would this split be possible to allocate?
1197 // Never allocate all gaps, we wouldn't be making progress.
1198 DEBUG(dbgs() << " w=" << EstWeight);
1199 if (EstWeight * Hysteresis >= MaxGap) {
1201 float Diff = EstWeight - MaxGap;
1202 if (Diff > BestDiff) {
1203 DEBUG(dbgs() << " (best)");
1204 BestDiff = Hysteresis * Diff;
1205 BestBefore = SplitBefore;
1206 BestAfter = SplitAfter;
1213 if (++SplitBefore < SplitAfter) {
1214 DEBUG(dbgs() << " shrink\n");
1215 // Recompute the max when necessary.
1216 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1217 MaxGap = GapWeight[SplitBefore];
1218 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1219 MaxGap = std::max(MaxGap, GapWeight[i]);
1226 // Try to extend the interval.
1227 if (SplitAfter >= NumGaps) {
1228 DEBUG(dbgs() << " end\n");
1232 DEBUG(dbgs() << " extend\n");
1233 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1237 // Didn't find any candidates?
1238 if (BestBefore == NumGaps)
1241 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1242 << '-' << Uses[BestAfter] << ", " << BestDiff
1243 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1245 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1249 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1250 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1251 SE->useIntv(SegStart, SegStop);
1252 SmallVector<unsigned, 8> IntvMap;
1253 SE->finish(&IntvMap);
1254 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1256 // If the new range has the same number of instructions as before, mark it as
1257 // RS_Local so the next split will be forced to make progress. Otherwise,
1258 // leave the new intervals as RS_New so they can compete.
1259 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1260 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1261 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1262 if (NewGaps >= NumGaps) {
1263 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1264 assert(!ProgressRequired && "Didn't make progress when it was required.");
1265 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1266 if (IntvMap[i] == 1) {
1267 setStage(*LREdit.get(i), RS_Local);
1268 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1270 DEBUG(dbgs() << '\n');
1277 //===----------------------------------------------------------------------===//
1278 // Live Range Splitting
1279 //===----------------------------------------------------------------------===//
1281 /// trySplit - Try to split VirtReg or one of its interferences, making it
1283 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1284 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1285 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1286 // Local intervals are handled separately.
1287 if (LIS->intervalIsInOneMBB(VirtReg)) {
1288 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1289 SA->analyze(&VirtReg);
1290 return tryLocalSplit(VirtReg, Order, NewVRegs);
1293 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1295 // Don't iterate global splitting.
1296 // Move straight to spilling if this range was produced by a global split.
1297 if (getStage(VirtReg) >= RS_Global)
1300 SA->analyze(&VirtReg);
1302 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1303 // coalescer. That may cause the range to become allocatable which means that
1304 // tryRegionSplit won't be making progress. This check should be replaced with
1305 // an assertion when the coalescer is fixed.
1306 if (SA->didRepairRange()) {
1307 // VirtReg has changed, so all cached queries are invalid.
1308 invalidateVirtRegs();
1309 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1313 // First try to split around a region spanning multiple blocks.
1314 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1315 if (PhysReg || !NewVRegs.empty())
1318 // Then isolate blocks with multiple uses.
1319 SplitAnalysis::BlockPtrSet Blocks;
1320 if (SA->getMultiUseBlocks(Blocks)) {
1321 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1323 SE->splitSingleBlocks(Blocks);
1324 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
1326 MF->verify(this, "After splitting live range around basic blocks");
1329 // Don't assign any physregs.
1334 //===----------------------------------------------------------------------===//
1336 //===----------------------------------------------------------------------===//
1338 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1339 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1340 // First try assigning a free register.
1341 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1342 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1345 LiveRangeStage Stage = getStage(VirtReg);
1346 DEBUG(dbgs() << StageName[Stage]
1347 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1349 // Try to evict a less worthy live range, but only for ranges from the primary
1350 // queue. The RS_Second ranges already failed to do this, and they should not
1351 // get a second chance until they have been split.
1352 if (Stage != RS_Second)
1353 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1356 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1358 // The first time we see a live range, don't try to split or spill.
1359 // Wait until the second time, when all smaller ranges have been allocated.
1360 // This gives a better picture of the interference to split around.
1361 if (Stage == RS_First) {
1362 setStage(VirtReg, RS_Second);
1363 DEBUG(dbgs() << "wait for second round\n");
1364 NewVRegs.push_back(&VirtReg);
1368 // If we couldn't allocate a register from spilling, there is probably some
1369 // invalid inline assembly. The base class wil report it.
1370 if (Stage >= RS_Spill || !VirtReg.isSpillable())
1373 // Try splitting VirtReg or interferences.
1374 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1375 if (PhysReg || !NewVRegs.empty())
1378 // Finally spill VirtReg itself.
1379 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1380 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1381 spiller().spill(LRE);
1382 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1385 MF->verify(this, "After spilling");
1387 // The live virtual register requesting allocation was spilled, so tell
1388 // the caller not to allocate anything during this round.
1392 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1393 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1394 << "********** Function: "
1395 << ((Value*)mf.getFunction())->getName() << '\n');
1399 MF->verify(this, "Before greedy register allocator");
1401 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1402 Indexes = &getAnalysis<SlotIndexes>();
1403 DomTree = &getAnalysis<MachineDominatorTree>();
1404 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1405 Loops = &getAnalysis<MachineLoopInfo>();
1406 LoopRanges = &getAnalysis<MachineLoopRanges>();
1407 Bundles = &getAnalysis<EdgeBundles>();
1408 SpillPlacer = &getAnalysis<SpillPlacement>();
1409 DebugVars = &getAnalysis<LiveDebugVariables>();
1411 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1412 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1413 ExtraRegInfo.clear();
1414 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1416 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1420 LIS->addKillFlags();
1424 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1425 VRM->rewrite(Indexes);
1428 // Write out new DBG_VALUE instructions.
1429 DebugVars->emitDebugValues(VRM);
1431 // The pass output is in VirtRegMap. Release all the transient data.