1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "LiveIntervalUnion.h"
18 #include "LiveRangeEdit.h"
19 #include "RegAllocBase.h"
21 #include "SpillPlacement.h"
23 #include "VirtRegMap.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Function.h"
27 #include "llvm/PassAnalysisSupport.h"
28 #include "llvm/CodeGen/CalcSpillWeights.h"
29 #include "llvm/CodeGen/EdgeBundles.h"
30 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
31 #include "llvm/CodeGen/LiveStackAnalysis.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineLoopRanges.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/CodeGen/RegAllocRegistry.h"
39 #include "llvm/CodeGen/RegisterCoalescer.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumReassigned, "Number of interferences reassigned");
53 STATISTIC(NumEvicted, "Number of interferences evicted");
55 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56 createGreedyRegisterAllocator);
59 class RAGreedy : public MachineFunctionPass, public RegAllocBase {
62 BitVector ReservedRegs;
67 MachineDominatorTree *DomTree;
68 MachineLoopInfo *Loops;
69 MachineLoopRanges *LoopRanges;
71 SpillPlacement *SpillPlacer;
74 std::auto_ptr<Spiller> SpillerInstance;
75 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
77 // Live ranges pass through a number of stages as we try to allocate them.
78 // Some of the stages may also create new live ranges:
80 // - Region splitting.
81 // - Per-block splitting.
85 // Ranges produced by one of the stages skip the previous stages when they are
86 // dequeued. This improves performance because we can skip interference checks
87 // that are unlikely to give any results. It also guarantees that the live
88 // range splitting algorithm terminates, something that is otherwise hard to
91 RS_Original, ///< Never seen before, never split.
92 RS_Second, ///< Second time in the queue.
93 RS_Region, ///< Produced by region splitting.
94 RS_Block, ///< Produced by per-block splitting.
95 RS_Local, ///< Produced by local splitting.
96 RS_Spill ///< Produced by spilling.
99 IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
101 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
102 return LiveRangeStage(LRStage[VirtReg.reg]);
105 template<typename Iterator>
106 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
107 LRStage.resize(MRI->getNumVirtRegs());
108 for (;Begin != End; ++Begin)
109 LRStage[(*Begin)->reg] = NewStage;
113 std::auto_ptr<SplitAnalysis> SA;
115 /// All basic blocks where the current register is live.
116 SmallVector<SpillPlacement::BlockConstraint, 8> SpillConstraints;
118 /// For every instruction in SA->UseSlots, store the previous non-copy
120 SmallVector<SlotIndex, 8> PrevSlot;
125 /// Return the pass name.
126 virtual const char* getPassName() const {
127 return "Greedy Register Allocator";
130 /// RAGreedy analysis usage.
131 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
132 virtual void releaseMemory();
133 virtual Spiller &spiller() { return *SpillerInstance; }
134 virtual void enqueue(LiveInterval *LI);
135 virtual LiveInterval *dequeue();
136 virtual unsigned selectOrSplit(LiveInterval&,
137 SmallVectorImpl<LiveInterval*>&);
139 /// Perform register allocation.
140 virtual bool runOnMachineFunction(MachineFunction &mf);
145 bool checkUncachedInterference(LiveInterval&, unsigned);
146 LiveInterval *getSingleInterference(LiveInterval&, unsigned);
147 bool reassignVReg(LiveInterval &InterferingVReg, unsigned OldPhysReg);
148 float calcInterferenceInfo(LiveInterval&, unsigned);
149 float calcGlobalSplitCost(const BitVector&);
150 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
151 SmallVectorImpl<LiveInterval*>&);
152 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
153 SlotIndex getPrevMappedIndex(const MachineInstr*);
154 void calcPrevSlots();
155 unsigned nextSplitPoint(unsigned);
156 bool canEvictInterference(LiveInterval&, unsigned, unsigned, float&);
158 unsigned tryReassign(LiveInterval&, AllocationOrder&,
159 SmallVectorImpl<LiveInterval*>&);
160 unsigned tryEvict(LiveInterval&, AllocationOrder&,
161 SmallVectorImpl<LiveInterval*>&);
162 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
163 SmallVectorImpl<LiveInterval*>&);
164 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
165 SmallVectorImpl<LiveInterval*>&);
166 unsigned trySplit(LiveInterval&, AllocationOrder&,
167 SmallVectorImpl<LiveInterval*>&);
169 } // end anonymous namespace
171 char RAGreedy::ID = 0;
173 FunctionPass* llvm::createGreedyRegisterAllocator() {
174 return new RAGreedy();
177 RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_Original) {
178 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
179 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
180 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
181 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
182 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
183 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
184 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
185 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
186 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
187 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
188 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
189 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
190 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
193 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
194 AU.setPreservesCFG();
195 AU.addRequired<AliasAnalysis>();
196 AU.addPreserved<AliasAnalysis>();
197 AU.addRequired<LiveIntervals>();
198 AU.addRequired<SlotIndexes>();
199 AU.addPreserved<SlotIndexes>();
201 AU.addRequiredID(StrongPHIEliminationID);
202 AU.addRequiredTransitive<RegisterCoalescer>();
203 AU.addRequired<CalculateSpillWeights>();
204 AU.addRequired<LiveStacks>();
205 AU.addPreserved<LiveStacks>();
206 AU.addRequired<MachineDominatorTree>();
207 AU.addPreserved<MachineDominatorTree>();
208 AU.addRequired<MachineLoopInfo>();
209 AU.addPreserved<MachineLoopInfo>();
210 AU.addRequired<MachineLoopRanges>();
211 AU.addPreserved<MachineLoopRanges>();
212 AU.addRequired<VirtRegMap>();
213 AU.addPreserved<VirtRegMap>();
214 AU.addRequired<EdgeBundles>();
215 AU.addRequired<SpillPlacement>();
216 MachineFunctionPass::getAnalysisUsage(AU);
219 void RAGreedy::releaseMemory() {
220 SpillerInstance.reset(0);
222 RegAllocBase::releaseMemory();
225 void RAGreedy::enqueue(LiveInterval *LI) {
226 // Prioritize live ranges by size, assigning larger ranges first.
227 // The queue holds (size, reg) pairs.
228 const unsigned Size = LI->getSize();
229 const unsigned Reg = LI->reg;
230 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
231 "Can only enqueue virtual registers");
235 if (LRStage[Reg] == RS_Original)
236 // 1st generation ranges are handled first, long -> short.
237 Prio = (1u << 31) + Size;
239 // Repeat offenders are handled second, short -> long
240 Prio = (1u << 30) - Size;
242 // Boost ranges that have a physical register hint.
243 const unsigned Hint = VRM->getRegAllocPref(Reg);
244 if (TargetRegisterInfo::isPhysicalRegister(Hint))
247 Queue.push(std::make_pair(Prio, Reg));
250 LiveInterval *RAGreedy::dequeue() {
253 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
258 //===----------------------------------------------------------------------===//
259 // Register Reassignment
260 //===----------------------------------------------------------------------===//
262 // Check interference without using the cache.
263 bool RAGreedy::checkUncachedInterference(LiveInterval &VirtReg,
265 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
266 LiveIntervalUnion::Query subQ(&VirtReg, &PhysReg2LiveUnion[*AliasI]);
267 if (subQ.checkInterference())
273 /// getSingleInterference - Return the single interfering virtual register
274 /// assigned to PhysReg. Return 0 if more than one virtual register is
276 LiveInterval *RAGreedy::getSingleInterference(LiveInterval &VirtReg,
278 // Check physreg and aliases.
279 LiveInterval *Interference = 0;
280 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
281 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
282 if (Q.checkInterference()) {
285 if (Q.collectInterferingVRegs(2) > 1)
287 Interference = Q.interferingVRegs().front();
293 // Attempt to reassign this virtual register to a different physical register.
295 // FIXME: we are not yet caching these "second-level" interferences discovered
296 // in the sub-queries. These interferences can change with each call to
297 // selectOrSplit. However, we could implement a "may-interfere" cache that
298 // could be conservatively dirtied when we reassign or split.
300 // FIXME: This may result in a lot of alias queries. We could summarize alias
301 // live intervals in their parent register's live union, but it's messy.
302 bool RAGreedy::reassignVReg(LiveInterval &InterferingVReg,
303 unsigned WantedPhysReg) {
304 assert(TargetRegisterInfo::isVirtualRegister(InterferingVReg.reg) &&
305 "Can only reassign virtual registers");
306 assert(TRI->regsOverlap(WantedPhysReg, VRM->getPhys(InterferingVReg.reg)) &&
307 "inconsistent phys reg assigment");
309 AllocationOrder Order(InterferingVReg.reg, *VRM, ReservedRegs);
310 while (unsigned PhysReg = Order.next()) {
311 // Don't reassign to a WantedPhysReg alias.
312 if (TRI->regsOverlap(PhysReg, WantedPhysReg))
315 if (checkUncachedInterference(InterferingVReg, PhysReg))
318 // Reassign the interfering virtual reg to this physical reg.
319 unsigned OldAssign = VRM->getPhys(InterferingVReg.reg);
320 DEBUG(dbgs() << "reassigning: " << InterferingVReg << " from " <<
321 TRI->getName(OldAssign) << " to " << TRI->getName(PhysReg) << '\n');
322 unassign(InterferingVReg, OldAssign);
323 assign(InterferingVReg, PhysReg);
330 /// tryReassign - Try to reassign a single interference to a different physreg.
331 /// @param VirtReg Currently unassigned virtual register.
332 /// @param Order Physregs to try.
333 /// @return Physreg to assign VirtReg, or 0.
334 unsigned RAGreedy::tryReassign(LiveInterval &VirtReg, AllocationOrder &Order,
335 SmallVectorImpl<LiveInterval*> &NewVRegs){
336 NamedRegionTimer T("Reassign", TimerGroupName, TimePassesIsEnabled);
339 while (unsigned PhysReg = Order.next()) {
340 LiveInterval *InterferingVReg = getSingleInterference(VirtReg, PhysReg);
341 if (!InterferingVReg)
343 if (TargetRegisterInfo::isPhysicalRegister(InterferingVReg->reg))
345 if (reassignVReg(*InterferingVReg, PhysReg))
352 //===----------------------------------------------------------------------===//
353 // Interference eviction
354 //===----------------------------------------------------------------------===//
356 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
357 /// be evicted. Set maxWeight to the maximal spill weight of an interference.
358 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
359 unsigned Size, float &MaxWeight) {
361 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
362 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
363 // If there is 10 or more interferences, chances are one is smaller.
364 if (Q.collectInterferingVRegs(10) >= 10)
367 // CHeck if any interfering live range is shorter than VirtReg.
368 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
369 LiveInterval *Intf = Q.interferingVRegs()[i];
370 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
372 if (Intf->getSize() <= Size)
374 Weight = std::max(Weight, Intf->weight);
381 /// tryEvict - Try to evict all interferences for a physreg.
382 /// @param VirtReg Currently unassigned virtual register.
383 /// @param Order Physregs to try.
384 /// @return Physreg to assign VirtReg, or 0.
385 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
386 AllocationOrder &Order,
387 SmallVectorImpl<LiveInterval*> &NewVRegs){
388 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
390 // We can only evict interference if all interfering registers are virtual and
391 // longer than VirtReg.
392 const unsigned Size = VirtReg.getSize();
394 // Keep track of the lightest single interference seen so far.
395 float BestWeight = 0;
396 unsigned BestPhys = 0;
399 while (unsigned PhysReg = Order.next()) {
401 if (!canEvictInterference(VirtReg, PhysReg, Size, Weight))
404 // This is an eviction candidate.
405 DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = "
407 if (BestPhys && Weight >= BestWeight)
413 // Stop if the hint can be used.
414 if (Order.isHint(PhysReg))
421 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
422 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
423 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
424 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
425 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
426 LiveInterval *Intf = Q.interferingVRegs()[i];
427 unassign(*Intf, VRM->getPhys(Intf->reg));
429 NewVRegs.push_back(Intf);
436 //===----------------------------------------------------------------------===//
438 //===----------------------------------------------------------------------===//
440 /// calcInterferenceInfo - Compute per-block outgoing and ingoing constraints
441 /// when considering interference from PhysReg. Also compute an optimistic local
442 /// cost of this interference pattern.
444 /// The final cost of a split is the local cost + global cost of preferences
445 /// broken by SpillPlacement.
447 float RAGreedy::calcInterferenceInfo(LiveInterval &VirtReg, unsigned PhysReg) {
448 // Reset interference dependent info.
449 SpillConstraints.resize(SA->LiveBlocks.size());
450 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
451 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
452 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
453 BC.Number = BI.MBB->getNumber();
454 BC.Entry = (BI.Uses && BI.LiveIn) ?
455 SpillPlacement::PrefReg : SpillPlacement::DontCare;
456 BC.Exit = (BI.Uses && BI.LiveOut) ?
457 SpillPlacement::PrefReg : SpillPlacement::DontCare;
458 BI.OverlapEntry = BI.OverlapExit = false;
461 // Add interference info from each PhysReg alias.
462 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
463 if (!query(VirtReg, *AI).checkInterference())
465 LiveIntervalUnion::SegmentIter IntI =
466 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
470 // Determine which blocks have interference live in or after the last split
472 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
473 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
474 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
475 SlotIndex Start, Stop;
476 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
478 // Skip interference-free blocks.
479 if (IntI.start() >= Stop)
482 // Is the interference live-in?
484 IntI.advanceTo(Start);
487 if (IntI.start() <= Start)
488 BC.Entry = SpillPlacement::MustSpill;
491 // Is the interference overlapping the last split point?
493 if (IntI.stop() < BI.LastSplitPoint)
494 IntI.advanceTo(BI.LastSplitPoint.getPrevSlot());
497 if (IntI.start() < Stop)
498 BC.Exit = SpillPlacement::MustSpill;
502 // Rewind iterator and check other interferences.
503 IntI.find(VirtReg.beginIndex());
504 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
505 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
506 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
507 SlotIndex Start, Stop;
508 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
510 // Skip interference-free blocks.
511 if (IntI.start() >= Stop)
514 // Handle transparent blocks with interference separately.
515 // Transparent blocks never incur any fixed cost.
516 if (BI.LiveThrough && !BI.Uses) {
517 IntI.advanceTo(Start);
520 if (IntI.start() >= Stop)
523 if (BC.Entry != SpillPlacement::MustSpill)
524 BC.Entry = SpillPlacement::PrefSpill;
525 if (BC.Exit != SpillPlacement::MustSpill)
526 BC.Exit = SpillPlacement::PrefSpill;
530 // Now we only have blocks with uses left.
531 // Check if the interference overlaps the uses.
532 assert(BI.Uses && "Non-transparent block without any uses");
534 // Check interference on entry.
535 if (BI.LiveIn && BC.Entry != SpillPlacement::MustSpill) {
536 IntI.advanceTo(Start);
539 // Not live in, but before the first use.
540 if (IntI.start() < BI.FirstUse) {
541 BC.Entry = SpillPlacement::PrefSpill;
542 // If the block contains a kill from an earlier split, never split
543 // again in the same block.
544 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Kill))
545 BC.Entry = SpillPlacement::MustSpill;
549 // Does interference overlap the uses in the entry segment
551 if (BI.LiveIn && !BI.OverlapEntry) {
552 IntI.advanceTo(BI.FirstUse);
555 // A live-through interval has no kill.
556 // Check [FirstUse;LastUse) instead.
557 if (IntI.start() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
558 BI.OverlapEntry = true;
561 // Does interference overlap the uses in the exit segment [Def;LastUse)?
562 if (BI.LiveOut && !BI.LiveThrough && !BI.OverlapExit) {
563 IntI.advanceTo(BI.Def);
566 if (IntI.start() < BI.LastUse)
567 BI.OverlapExit = true;
570 // Check interference on exit.
571 if (BI.LiveOut && BC.Exit != SpillPlacement::MustSpill) {
572 // Check interference between LastUse and Stop.
573 if (BC.Exit != SpillPlacement::PrefSpill) {
574 IntI.advanceTo(BI.LastUse);
577 if (IntI.start() < Stop) {
578 BC.Exit = SpillPlacement::PrefSpill;
579 // Avoid splitting twice in the same block.
580 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Def))
581 BC.Exit = SpillPlacement::MustSpill;
588 // Accumulate a local cost of this interference pattern.
590 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
591 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
594 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
595 unsigned Inserts = 0;
597 // Do we need spill code for the entry segment?
599 Inserts += BI.OverlapEntry || BC.Entry != SpillPlacement::PrefReg;
601 // For the exit segment?
603 Inserts += BI.OverlapExit || BC.Exit != SpillPlacement::PrefReg;
605 // The local cost of spill code in this block is the block frequency times
606 // the number of spill instructions inserted.
608 LocalCost += Inserts * SpillPlacer->getBlockFrequency(BI.MBB);
610 DEBUG(dbgs() << "Local cost of " << PrintReg(PhysReg, TRI) << " = "
611 << LocalCost << '\n');
615 /// calcGlobalSplitCost - Return the global split cost of following the split
616 /// pattern in LiveBundles. This cost should be added to the local cost of the
617 /// interference pattern in SpillConstraints.
619 float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) {
620 float GlobalCost = 0;
621 for (unsigned i = 0, e = SpillConstraints.size(); i != e; ++i) {
622 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
623 unsigned Inserts = 0;
624 // Broken entry preference?
625 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 0)] !=
626 (BC.Entry == SpillPlacement::PrefReg);
627 // Broken exit preference?
628 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 1)] !=
629 (BC.Exit == SpillPlacement::PrefReg);
632 Inserts * SpillPlacer->getBlockFrequency(SA->LiveBlocks[i].MBB);
634 DEBUG(dbgs() << "Global cost = " << GlobalCost << '\n');
638 /// splitAroundRegion - Split VirtReg around the region determined by
639 /// LiveBundles. Make an effort to avoid interference from PhysReg.
641 /// The 'register' interval is going to contain as many uses as possible while
642 /// avoiding interference. The 'stack' interval is the complement constructed by
643 /// SplitEditor. It will contain the rest.
645 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
646 const BitVector &LiveBundles,
647 SmallVectorImpl<LiveInterval*> &NewVRegs) {
649 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
651 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
652 dbgs() << " EB#" << i;
656 // First compute interference ranges in the live blocks.
657 typedef std::pair<SlotIndex, SlotIndex> IndexPair;
658 SmallVector<IndexPair, 8> InterferenceRanges;
659 InterferenceRanges.resize(SA->LiveBlocks.size());
660 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
661 if (!query(VirtReg, *AI).checkInterference())
663 LiveIntervalUnion::SegmentIter IntI =
664 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
667 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
668 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
669 IndexPair &IP = InterferenceRanges[i];
670 SlotIndex Start, Stop;
671 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
672 // Skip interference-free blocks.
673 if (IntI.start() >= Stop)
676 // First interference in block.
678 IntI.advanceTo(Start);
681 if (IntI.start() >= Stop)
683 if (!IP.first.isValid() || IntI.start() < IP.first)
684 IP.first = IntI.start();
687 // Last interference in block.
689 IntI.advanceTo(Stop);
690 if (!IntI.valid() || IntI.start() >= Stop)
692 if (IntI.stop() <= Start)
694 if (!IP.second.isValid() || IntI.stop() > IP.second)
695 IP.second = IntI.stop();
700 SmallVector<LiveInterval*, 4> SpillRegs;
701 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
702 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
704 // Create the main cross-block interval.
707 // First add all defs that are live out of a block.
708 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
709 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
710 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
711 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
713 // Should the register be live out?
714 if (!BI.LiveOut || !RegOut)
717 IndexPair &IP = InterferenceRanges[i];
718 SlotIndex Start, Stop;
719 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
721 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
722 << Bundles->getBundle(BI.MBB->getNumber(), 1)
723 << " intf [" << IP.first << ';' << IP.second << ')');
725 // The interference interval should either be invalid or overlap MBB.
726 assert((!IP.first.isValid() || IP.first < Stop) && "Bad interference");
727 assert((!IP.second.isValid() || IP.second > Start) && "Bad interference");
729 // Check interference leaving the block.
730 if (!IP.second.isValid()) {
731 // Block is interference-free.
732 DEBUG(dbgs() << ", no interference");
734 assert(BI.LiveThrough && "No uses, but not live through block?");
735 // Block is live-through without interference.
736 DEBUG(dbgs() << ", no uses"
737 << (RegIn ? ", live-through.\n" : ", stack in.\n"));
739 SE.enterIntvAtEnd(*BI.MBB);
742 if (!BI.LiveThrough) {
743 DEBUG(dbgs() << ", not live-through.\n");
744 SE.useIntv(SE.enterIntvBefore(BI.Def), Stop);
748 // Block is live-through, but entry bundle is on the stack.
749 // Reload just before the first use.
750 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
751 SE.useIntv(SE.enterIntvBefore(BI.FirstUse), Stop);
754 DEBUG(dbgs() << ", live-through.\n");
758 // Block has interference.
759 DEBUG(dbgs() << ", interference to " << IP.second);
761 if (!BI.LiveThrough && IP.second <= BI.Def) {
762 // The interference doesn't reach the outgoing segment.
763 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
764 SE.useIntv(BI.Def, Stop);
770 // No uses in block, avoid interference by reloading as late as possible.
771 DEBUG(dbgs() << ", no uses.\n");
772 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
773 assert(SegStart >= IP.second && "Couldn't avoid interference");
777 if (IP.second.getBoundaryIndex() < BI.LastUse) {
778 // There are interference-free uses at the end of the block.
779 // Find the first use that can get the live-out register.
780 SmallVectorImpl<SlotIndex>::const_iterator UI =
781 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
782 IP.second.getBoundaryIndex());
783 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
785 assert(Use <= BI.LastUse && "Couldn't find last use");
786 // Only attempt a split befroe the last split point.
787 if (Use.getBaseIndex() <= BI.LastSplitPoint) {
788 DEBUG(dbgs() << ", free use at " << Use << ".\n");
789 SlotIndex SegStart = SE.enterIntvBefore(Use);
790 assert(SegStart >= IP.second && "Couldn't avoid interference");
791 assert(SegStart < BI.LastSplitPoint && "Impossible split point");
792 SE.useIntv(SegStart, Stop);
797 // Interference is after the last use.
798 DEBUG(dbgs() << " after last use.\n");
799 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
800 assert(SegStart >= IP.second && "Couldn't avoid interference");
803 // Now all defs leading to live bundles are handled, do everything else.
804 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
805 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
806 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
807 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
809 // Is the register live-in?
810 if (!BI.LiveIn || !RegIn)
813 // We have an incoming register. Check for interference.
814 IndexPair &IP = InterferenceRanges[i];
815 SlotIndex Start, Stop;
816 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
818 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
819 << " -> BB#" << BI.MBB->getNumber());
821 // Check interference entering the block.
822 if (!IP.first.isValid()) {
823 // Block is interference-free.
824 DEBUG(dbgs() << ", no interference");
826 assert(BI.LiveThrough && "No uses, but not live through block?");
827 // Block is live-through without interference.
829 DEBUG(dbgs() << ", no uses, live-through.\n");
830 SE.useIntv(Start, Stop);
832 DEBUG(dbgs() << ", no uses, stack-out.\n");
833 SE.leaveIntvAtTop(*BI.MBB);
837 if (!BI.LiveThrough) {
838 DEBUG(dbgs() << ", killed in block.\n");
839 SE.useIntv(Start, SE.leaveIntvAfter(BI.Kill));
843 // Block is live-through, but exit bundle is on the stack.
844 // Spill immediately after the last use.
845 if (BI.LastUse < BI.LastSplitPoint) {
846 DEBUG(dbgs() << ", uses, stack-out.\n");
847 SE.useIntv(Start, SE.leaveIntvAfter(BI.LastUse));
850 // The last use is after the last split point, it is probably an
852 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
853 << BI.LastSplitPoint << ", stack-out.\n");
854 SlotIndex SegEnd = SE.leaveIntvBefore(BI.LastSplitPoint);
855 SE.useIntv(Start, SegEnd);
856 // Run a double interval from the split to the last use.
857 // This makes it possible to spill the complement without affecting the
859 SE.overlapIntv(SegEnd, BI.LastUse);
862 // Register is live-through.
863 DEBUG(dbgs() << ", uses, live-through.\n");
864 SE.useIntv(Start, Stop);
868 // Block has interference.
869 DEBUG(dbgs() << ", interference from " << IP.first);
871 if (!BI.LiveThrough && IP.first >= BI.Kill) {
872 // The interference doesn't reach the outgoing segment.
873 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
874 SE.useIntv(Start, BI.Kill);
879 // No uses in block, avoid interference by spilling as soon as possible.
880 DEBUG(dbgs() << ", no uses.\n");
881 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
882 assert(SegEnd <= IP.first && "Couldn't avoid interference");
885 if (IP.first.getBaseIndex() > BI.FirstUse) {
886 // There are interference-free uses at the beginning of the block.
887 // Find the last use that can get the register.
888 SmallVectorImpl<SlotIndex>::const_iterator UI =
889 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
890 IP.first.getBaseIndex());
891 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
892 SlotIndex Use = (--UI)->getBoundaryIndex();
893 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
894 SlotIndex SegEnd = SE.leaveIntvAfter(Use);
895 assert(SegEnd <= IP.first && "Couldn't avoid interference");
896 SE.useIntv(Start, SegEnd);
900 // Interference is before the first use.
901 DEBUG(dbgs() << " before first use.\n");
902 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
903 assert(SegEnd <= IP.first && "Couldn't avoid interference");
908 // FIXME: Should we be more aggressive about splitting the stack region into
909 // per-block segments? The current approach allows the stack region to
910 // separate into connected components. Some components may be allocatable.
915 MF->verify(this, "After splitting live range around region");
918 // Make sure that at least one of the new intervals can allocate to PhysReg.
919 // That was the whole point of splitting the live range.
921 for (LiveRangeEdit::iterator I = LREdit.begin(), E = LREdit.end(); I != E;
923 if (!checkUncachedInterference(**I, PhysReg)) {
927 assert(found && "No allocatable intervals after pointless splitting");
932 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
933 SmallVectorImpl<LiveInterval*> &NewVRegs) {
934 BitVector LiveBundles, BestBundles;
936 unsigned BestReg = 0;
938 while (unsigned PhysReg = Order.next()) {
939 float Cost = calcInterferenceInfo(VirtReg, PhysReg);
940 if (BestReg && Cost >= BestCost)
943 SpillPlacer->placeSpills(SpillConstraints, LiveBundles);
944 // No live bundles, defer to splitSingleBlocks().
945 if (!LiveBundles.any())
948 Cost += calcGlobalSplitCost(LiveBundles);
949 if (!BestReg || Cost < BestCost) {
952 BestBundles.swap(LiveBundles);
959 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
960 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Region);
965 //===----------------------------------------------------------------------===//
967 //===----------------------------------------------------------------------===//
970 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
971 /// in order to use PhysReg between two entries in SA->UseSlots.
973 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
975 void RAGreedy::calcGapWeights(unsigned PhysReg,
976 SmallVectorImpl<float> &GapWeight) {
977 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
978 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
979 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
980 const unsigned NumGaps = Uses.size()-1;
982 // Start and end points for the interference check.
983 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
984 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
986 GapWeight.assign(NumGaps, 0.0f);
988 // Add interference from each overlapping register.
989 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
990 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
991 .checkInterference())
994 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
995 // so we don't need InterferenceQuery.
997 // Interference that overlaps an instruction is counted in both gaps
998 // surrounding the instruction. The exception is interference before
999 // StartIdx and after StopIdx.
1001 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
1002 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1003 // Skip the gaps before IntI.
1004 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1005 if (++Gap == NumGaps)
1010 // Update the gaps covered by IntI.
1011 const float weight = IntI.value()->weight;
1012 for (; Gap != NumGaps; ++Gap) {
1013 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1014 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1023 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
1024 /// before MI that has a slot index. If MI is the first mapped instruction in
1025 /// its block, return the block start index instead.
1027 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
1028 assert(MI && "Missing MachineInstr");
1029 const MachineBasicBlock *MBB = MI->getParent();
1030 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
1032 if (!(--I)->isDebugValue() && !I->isCopy())
1033 return Indexes->getInstructionIndex(I);
1034 return Indexes->getMBBStartIdx(MBB);
1037 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
1038 /// real non-copy instruction for each instruction in SA->UseSlots.
1040 void RAGreedy::calcPrevSlots() {
1041 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1043 PrevSlot.reserve(Uses.size());
1044 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
1045 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
1046 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
1050 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
1051 /// be beneficial to split before UseSlots[i].
1053 /// 0 is always a valid split point
1054 unsigned RAGreedy::nextSplitPoint(unsigned i) {
1055 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1056 const unsigned Size = Uses.size();
1057 assert(i != Size && "No split points after the end");
1058 // Allow split before i when Uses[i] is not adjacent to the previous use.
1059 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1064 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1067 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1068 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1069 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
1070 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
1072 // Note that it is possible to have an interval that is live-in or live-out
1073 // while only covering a single block - A phi-def can use undef values from
1074 // predecessors, and the block could be a single-block loop.
1075 // We don't bother doing anything clever about such a case, we simply assume
1076 // that the interval is continuous from FirstUse to LastUse. We should make
1077 // sure that we don't do anything illegal to such an interval, though.
1079 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1080 if (Uses.size() <= 2)
1082 const unsigned NumGaps = Uses.size()-1;
1085 dbgs() << "tryLocalSplit: ";
1086 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1087 dbgs() << ' ' << SA->UseSlots[i];
1091 // For every use, find the previous mapped non-copy instruction.
1092 // We use this to detect valid split points, and to estimate new interval
1096 unsigned BestBefore = NumGaps;
1097 unsigned BestAfter = 0;
1100 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB);
1101 SmallVector<float, 8> GapWeight;
1104 while (unsigned PhysReg = Order.next()) {
1105 // Keep track of the largest spill weight that would need to be evicted in
1106 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1107 calcGapWeights(PhysReg, GapWeight);
1109 // Try to find the best sequence of gaps to close.
1110 // The new spill weight must be larger than any gap interference.
1112 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1113 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1115 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1116 // It is the spill weight that needs to be evicted.
1117 float MaxGap = GapWeight[0];
1118 for (unsigned i = 1; i != SplitAfter; ++i)
1119 MaxGap = std::max(MaxGap, GapWeight[i]);
1122 // Live before/after split?
1123 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1124 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1126 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1127 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1128 << " i=" << MaxGap);
1130 // Stop before the interval gets so big we wouldn't be making progress.
1131 if (!LiveBefore && !LiveAfter) {
1132 DEBUG(dbgs() << " all\n");
1135 // Should the interval be extended or shrunk?
1137 if (MaxGap < HUGE_VALF) {
1138 // Estimate the new spill weight.
1140 // Each instruction reads and writes the register, except the first
1141 // instr doesn't read when !FirstLive, and the last instr doesn't write
1144 // We will be inserting copies before and after, so the total number of
1145 // reads and writes is 2 * EstUses.
1147 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1148 2*(LiveBefore + LiveAfter);
1150 // Try to guess the size of the new interval. This should be trivial,
1151 // but the slot index of an inserted copy can be a lot smaller than the
1152 // instruction it is inserted before if there are many dead indexes
1155 // We measure the distance from the instruction before SplitBefore to
1156 // get a conservative estimate.
1158 // The final distance can still be different if inserting copies
1159 // triggers a slot index renumbering.
1161 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1162 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1163 // Would this split be possible to allocate?
1164 // Never allocate all gaps, we wouldn't be making progress.
1165 float Diff = EstWeight - MaxGap;
1166 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1169 if (Diff > BestDiff) {
1170 DEBUG(dbgs() << " (best)");
1172 BestBefore = SplitBefore;
1173 BestAfter = SplitAfter;
1180 SplitBefore = nextSplitPoint(SplitBefore);
1181 if (SplitBefore < SplitAfter) {
1182 DEBUG(dbgs() << " shrink\n");
1183 // Recompute the max when necessary.
1184 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1185 MaxGap = GapWeight[SplitBefore];
1186 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1187 MaxGap = std::max(MaxGap, GapWeight[i]);
1194 // Try to extend the interval.
1195 if (SplitAfter >= NumGaps) {
1196 DEBUG(dbgs() << " end\n");
1200 DEBUG(dbgs() << " extend\n");
1201 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1202 SplitAfter != e; ++SplitAfter)
1203 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1208 // Didn't find any candidates?
1209 if (BestBefore == NumGaps)
1212 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1213 << '-' << Uses[BestAfter] << ", " << BestDiff
1214 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1216 SmallVector<LiveInterval*, 4> SpillRegs;
1217 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1218 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
1221 SlotIndex SegStart = SE.enterIntvBefore(Uses[BestBefore]);
1222 SlotIndex SegStop = SE.leaveIntvAfter(Uses[BestAfter]);
1223 SE.useIntv(SegStart, SegStop);
1226 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
1232 //===----------------------------------------------------------------------===//
1233 // Live Range Splitting
1234 //===----------------------------------------------------------------------===//
1236 /// trySplit - Try to split VirtReg or one of its interferences, making it
1238 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1239 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1240 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1241 // Local intervals are handled separately.
1242 if (LIS->intervalIsInOneMBB(VirtReg)) {
1243 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1244 SA->analyze(&VirtReg);
1245 return tryLocalSplit(VirtReg, Order, NewVRegs);
1248 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1250 // Don't iterate global splitting.
1251 // Move straight to spilling if this range was produced by a global split.
1252 LiveRangeStage Stage = getStage(VirtReg);
1253 if (Stage >= RS_Block)
1256 SA->analyze(&VirtReg);
1258 // First try to split around a region spanning multiple blocks.
1259 if (Stage < RS_Region) {
1260 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1261 if (PhysReg || !NewVRegs.empty())
1265 // Then isolate blocks with multiple uses.
1266 if (Stage < RS_Block) {
1267 SplitAnalysis::BlockPtrSet Blocks;
1268 if (SA->getMultiUseBlocks(Blocks)) {
1269 SmallVector<LiveInterval*, 4> SpillRegs;
1270 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1271 SplitEditor(*SA, *LIS, *VRM, *DomTree, LREdit).splitSingleBlocks(Blocks);
1272 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Block);
1274 MF->verify(this, "After splitting live range around basic blocks");
1278 // Don't assign any physregs.
1283 //===----------------------------------------------------------------------===//
1285 //===----------------------------------------------------------------------===//
1287 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1288 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1289 LiveRangeStage Stage = getStage(VirtReg);
1290 if (Stage == RS_Original)
1291 LRStage[VirtReg.reg] = RS_Second;
1293 // First try assigning a free register.
1294 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1295 while (unsigned PhysReg = Order.next()) {
1296 if (!checkPhysRegInterference(VirtReg, PhysReg))
1300 if (unsigned PhysReg = tryReassign(VirtReg, Order, NewVRegs))
1303 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1306 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1308 // The first time we see a live range, don't try to split or spill.
1309 // Wait until the second time, when all smaller ranges have been allocated.
1310 // This gives a better picture of the interference to split around.
1311 if (Stage == RS_Original) {
1312 NewVRegs.push_back(&VirtReg);
1316 assert(Stage < RS_Spill && "Cannot allocate after spilling");
1318 // Try splitting VirtReg or interferences.
1319 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1320 if (PhysReg || !NewVRegs.empty())
1323 // Finally spill VirtReg itself.
1324 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1325 SmallVector<LiveInterval*, 1> pendingSpills;
1326 spiller().spill(&VirtReg, NewVRegs, pendingSpills);
1328 // The live virtual register requesting allocation was spilled, so tell
1329 // the caller not to allocate anything during this round.
1333 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1334 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1335 << "********** Function: "
1336 << ((Value*)mf.getFunction())->getName() << '\n');
1340 MF->verify(this, "Before greedy register allocator");
1342 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1343 Indexes = &getAnalysis<SlotIndexes>();
1344 DomTree = &getAnalysis<MachineDominatorTree>();
1345 ReservedRegs = TRI->getReservedRegs(*MF);
1346 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1347 Loops = &getAnalysis<MachineLoopInfo>();
1348 LoopRanges = &getAnalysis<MachineLoopRanges>();
1349 Bundles = &getAnalysis<EdgeBundles>();
1350 SpillPlacer = &getAnalysis<SpillPlacement>();
1352 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1354 LRStage.resize(MRI->getNumVirtRegs());
1358 LIS->addKillFlags();
1362 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1363 VRM->rewrite(Indexes);
1366 // The pass output is in VirtRegMap. Release all the transient data.