1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "LiveIntervalUnion.h"
18 #include "LiveRangeEdit.h"
19 #include "RegAllocBase.h"
21 #include "SpillPlacement.h"
23 #include "VirtRegMap.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Function.h"
27 #include "llvm/PassAnalysisSupport.h"
28 #include "llvm/CodeGen/CalcSpillWeights.h"
29 #include "llvm/CodeGen/EdgeBundles.h"
30 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
31 #include "llvm/CodeGen/LiveStackAnalysis.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineLoopRanges.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/CodeGen/RegAllocRegistry.h"
39 #include "llvm/CodeGen/RegisterCoalescer.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumReassigned, "Number of interferences reassigned");
53 STATISTIC(NumEvicted, "Number of interferences evicted");
55 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56 createGreedyRegisterAllocator);
59 class RAGreedy : public MachineFunctionPass, public RegAllocBase {
62 BitVector ReservedRegs;
67 MachineDominatorTree *DomTree;
68 MachineLoopInfo *Loops;
69 MachineLoopRanges *LoopRanges;
71 SpillPlacement *SpillPlacer;
74 std::auto_ptr<Spiller> SpillerInstance;
75 std::auto_ptr<SplitAnalysis> SA;
76 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
77 IndexedMap<unsigned, VirtReg2IndexFunctor> Generation;
81 /// All basic blocks where the current register is live.
82 SmallVector<SpillPlacement::BlockConstraint, 8> SpillConstraints;
84 /// For every instruction in SA->UseSlots, store the previous non-copy
86 SmallVector<SlotIndex, 8> PrevSlot;
91 /// Return the pass name.
92 virtual const char* getPassName() const {
93 return "Greedy Register Allocator";
96 /// RAGreedy analysis usage.
97 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
98 virtual void releaseMemory();
99 virtual Spiller &spiller() { return *SpillerInstance; }
100 virtual void enqueue(LiveInterval *LI);
101 virtual LiveInterval *dequeue();
102 virtual unsigned selectOrSplit(LiveInterval&,
103 SmallVectorImpl<LiveInterval*>&);
105 /// Perform register allocation.
106 virtual bool runOnMachineFunction(MachineFunction &mf);
111 bool checkUncachedInterference(LiveInterval&, unsigned);
112 LiveInterval *getSingleInterference(LiveInterval&, unsigned);
113 bool reassignVReg(LiveInterval &InterferingVReg, unsigned OldPhysReg);
114 float calcInterferenceWeight(LiveInterval&, unsigned);
115 float calcInterferenceInfo(LiveInterval&, unsigned);
116 float calcGlobalSplitCost(const BitVector&);
117 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
118 SmallVectorImpl<LiveInterval*>&);
119 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
120 SlotIndex getPrevMappedIndex(const MachineInstr*);
121 void calcPrevSlots();
122 unsigned nextSplitPoint(unsigned);
123 bool canEvictInterference(LiveInterval&, unsigned, unsigned, float&);
125 unsigned tryReassign(LiveInterval&, AllocationOrder&,
126 SmallVectorImpl<LiveInterval*>&);
127 unsigned tryEvict(LiveInterval&, AllocationOrder&,
128 SmallVectorImpl<LiveInterval*>&);
129 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
130 SmallVectorImpl<LiveInterval*>&);
131 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
132 SmallVectorImpl<LiveInterval*>&);
133 unsigned trySplit(LiveInterval&, AllocationOrder&,
134 SmallVectorImpl<LiveInterval*>&);
135 unsigned trySpillInterferences(LiveInterval&, AllocationOrder&,
136 SmallVectorImpl<LiveInterval*>&);
138 } // end anonymous namespace
140 char RAGreedy::ID = 0;
142 FunctionPass* llvm::createGreedyRegisterAllocator() {
143 return new RAGreedy();
146 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
147 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
148 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
149 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
150 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
151 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
152 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
153 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
154 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
155 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
156 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
157 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
158 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
159 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
162 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
163 AU.setPreservesCFG();
164 AU.addRequired<AliasAnalysis>();
165 AU.addPreserved<AliasAnalysis>();
166 AU.addRequired<LiveIntervals>();
167 AU.addRequired<SlotIndexes>();
168 AU.addPreserved<SlotIndexes>();
170 AU.addRequiredID(StrongPHIEliminationID);
171 AU.addRequiredTransitive<RegisterCoalescer>();
172 AU.addRequired<CalculateSpillWeights>();
173 AU.addRequired<LiveStacks>();
174 AU.addPreserved<LiveStacks>();
175 AU.addRequired<MachineDominatorTree>();
176 AU.addPreserved<MachineDominatorTree>();
177 AU.addRequired<MachineLoopInfo>();
178 AU.addPreserved<MachineLoopInfo>();
179 AU.addRequired<MachineLoopRanges>();
180 AU.addPreserved<MachineLoopRanges>();
181 AU.addRequired<VirtRegMap>();
182 AU.addPreserved<VirtRegMap>();
183 AU.addRequired<EdgeBundles>();
184 AU.addRequired<SpillPlacement>();
185 MachineFunctionPass::getAnalysisUsage(AU);
188 void RAGreedy::releaseMemory() {
189 SpillerInstance.reset(0);
191 RegAllocBase::releaseMemory();
194 void RAGreedy::enqueue(LiveInterval *LI) {
195 // Prioritize live ranges by size, assigning larger ranges first.
196 // The queue holds (size, reg) pairs.
197 unsigned Size = LI->getSize();
198 unsigned Reg = LI->reg;
199 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
200 "Can only enqueue virtual registers");
202 // Boost ranges that have a physical register hint.
203 unsigned Hint = VRM->getRegAllocPref(Reg);
204 if (TargetRegisterInfo::isPhysicalRegister(Hint))
207 // Boost ranges that we see for the first time.
208 Generation.grow(Reg);
209 if (++Generation[Reg] == 1)
212 Queue.push(std::make_pair(Size, Reg));
215 LiveInterval *RAGreedy::dequeue() {
218 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
223 //===----------------------------------------------------------------------===//
224 // Register Reassignment
225 //===----------------------------------------------------------------------===//
227 // Check interference without using the cache.
228 bool RAGreedy::checkUncachedInterference(LiveInterval &VirtReg,
230 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
231 LiveIntervalUnion::Query subQ(&VirtReg, &PhysReg2LiveUnion[*AliasI]);
232 if (subQ.checkInterference())
238 /// getSingleInterference - Return the single interfering virtual register
239 /// assigned to PhysReg. Return 0 if more than one virtual register is
241 LiveInterval *RAGreedy::getSingleInterference(LiveInterval &VirtReg,
243 // Check physreg and aliases.
244 LiveInterval *Interference = 0;
245 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
246 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
247 if (Q.checkInterference()) {
250 if (Q.collectInterferingVRegs(2) > 1)
252 Interference = Q.interferingVRegs().front();
258 // Attempt to reassign this virtual register to a different physical register.
260 // FIXME: we are not yet caching these "second-level" interferences discovered
261 // in the sub-queries. These interferences can change with each call to
262 // selectOrSplit. However, we could implement a "may-interfere" cache that
263 // could be conservatively dirtied when we reassign or split.
265 // FIXME: This may result in a lot of alias queries. We could summarize alias
266 // live intervals in their parent register's live union, but it's messy.
267 bool RAGreedy::reassignVReg(LiveInterval &InterferingVReg,
268 unsigned WantedPhysReg) {
269 assert(TargetRegisterInfo::isVirtualRegister(InterferingVReg.reg) &&
270 "Can only reassign virtual registers");
271 assert(TRI->regsOverlap(WantedPhysReg, VRM->getPhys(InterferingVReg.reg)) &&
272 "inconsistent phys reg assigment");
274 AllocationOrder Order(InterferingVReg.reg, *VRM, ReservedRegs);
275 while (unsigned PhysReg = Order.next()) {
276 // Don't reassign to a WantedPhysReg alias.
277 if (TRI->regsOverlap(PhysReg, WantedPhysReg))
280 if (checkUncachedInterference(InterferingVReg, PhysReg))
283 // Reassign the interfering virtual reg to this physical reg.
284 unsigned OldAssign = VRM->getPhys(InterferingVReg.reg);
285 DEBUG(dbgs() << "reassigning: " << InterferingVReg << " from " <<
286 TRI->getName(OldAssign) << " to " << TRI->getName(PhysReg) << '\n');
287 unassign(InterferingVReg, OldAssign);
288 assign(InterferingVReg, PhysReg);
295 /// tryReassign - Try to reassign a single interference to a different physreg.
296 /// @param VirtReg Currently unassigned virtual register.
297 /// @param Order Physregs to try.
298 /// @return Physreg to assign VirtReg, or 0.
299 unsigned RAGreedy::tryReassign(LiveInterval &VirtReg, AllocationOrder &Order,
300 SmallVectorImpl<LiveInterval*> &NewVRegs){
301 NamedRegionTimer T("Reassign", TimerGroupName, TimePassesIsEnabled);
304 while (unsigned PhysReg = Order.next()) {
305 LiveInterval *InterferingVReg = getSingleInterference(VirtReg, PhysReg);
306 if (!InterferingVReg)
308 if (TargetRegisterInfo::isPhysicalRegister(InterferingVReg->reg))
310 if (reassignVReg(*InterferingVReg, PhysReg))
317 //===----------------------------------------------------------------------===//
318 // Interference eviction
319 //===----------------------------------------------------------------------===//
321 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
322 /// be evicted. Set maxWeight to the maximal spill weight of an interference.
323 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
324 unsigned Size, float &MaxWeight) {
326 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
327 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
328 // If there is 10 or more interferences, chances are one is smaller.
329 if (Q.collectInterferingVRegs(10) >= 10)
332 // CHeck if any interfering live range is shorter than VirtReg.
333 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
334 LiveInterval *Intf = Q.interferingVRegs()[i];
335 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
337 if (Intf->getSize() <= Size)
339 Weight = std::max(Weight, Intf->weight);
346 /// tryEvict - Try to evict all interferences for a physreg.
347 /// @param VirtReg Currently unassigned virtual register.
348 /// @param Order Physregs to try.
349 /// @return Physreg to assign VirtReg, or 0.
350 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
351 AllocationOrder &Order,
352 SmallVectorImpl<LiveInterval*> &NewVRegs){
353 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
355 // We can only evict interference if all interfering registers are virtual and
356 // longer than VirtReg.
357 const unsigned Size = VirtReg.getSize();
359 // Keep track of the lightest single interference seen so far.
360 float BestWeight = 0;
361 unsigned BestPhys = 0;
364 while (unsigned PhysReg = Order.next()) {
366 if (!canEvictInterference(VirtReg, PhysReg, Size, Weight))
369 // This is an eviction candidate.
370 DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = "
372 if (BestPhys && Weight >= BestWeight)
383 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
384 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
385 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
386 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
387 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
388 LiveInterval *Intf = Q.interferingVRegs()[i];
389 unassign(*Intf, VRM->getPhys(Intf->reg));
391 NewVRegs.push_back(Intf);
398 //===----------------------------------------------------------------------===//
400 //===----------------------------------------------------------------------===//
402 /// calcInterferenceInfo - Compute per-block outgoing and ingoing constraints
403 /// when considering interference from PhysReg. Also compute an optimistic local
404 /// cost of this interference pattern.
406 /// The final cost of a split is the local cost + global cost of preferences
407 /// broken by SpillPlacement.
409 float RAGreedy::calcInterferenceInfo(LiveInterval &VirtReg, unsigned PhysReg) {
410 // Reset interference dependent info.
411 SpillConstraints.resize(SA->LiveBlocks.size());
412 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
413 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
414 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
415 BC.Number = BI.MBB->getNumber();
416 BC.Entry = (BI.Uses && BI.LiveIn) ?
417 SpillPlacement::PrefReg : SpillPlacement::DontCare;
418 BC.Exit = (BI.Uses && BI.LiveOut) ?
419 SpillPlacement::PrefReg : SpillPlacement::DontCare;
420 BI.OverlapEntry = BI.OverlapExit = false;
423 // Add interference info from each PhysReg alias.
424 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
425 if (!query(VirtReg, *AI).checkInterference())
427 LiveIntervalUnion::SegmentIter IntI =
428 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
432 // Determine which blocks have interference live in or after the last split
434 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
435 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
436 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
437 SlotIndex Start, Stop;
438 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
440 // Skip interference-free blocks.
441 if (IntI.start() >= Stop)
444 // Is the interference live-in?
446 IntI.advanceTo(Start);
449 if (IntI.start() <= Start)
450 BC.Entry = SpillPlacement::MustSpill;
453 // Is the interference overlapping the last split point?
455 if (IntI.stop() < BI.LastSplitPoint)
456 IntI.advanceTo(BI.LastSplitPoint.getPrevSlot());
459 if (IntI.start() < Stop)
460 BC.Exit = SpillPlacement::MustSpill;
464 // Rewind iterator and check other interferences.
465 IntI.find(VirtReg.beginIndex());
466 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
467 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
468 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
469 SlotIndex Start, Stop;
470 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
472 // Skip interference-free blocks.
473 if (IntI.start() >= Stop)
476 // Handle transparent blocks with interference separately.
477 // Transparent blocks never incur any fixed cost.
478 if (BI.LiveThrough && !BI.Uses) {
479 IntI.advanceTo(Start);
482 if (IntI.start() >= Stop)
485 if (BC.Entry != SpillPlacement::MustSpill)
486 BC.Entry = SpillPlacement::PrefSpill;
487 if (BC.Exit != SpillPlacement::MustSpill)
488 BC.Exit = SpillPlacement::PrefSpill;
492 // Now we only have blocks with uses left.
493 // Check if the interference overlaps the uses.
494 assert(BI.Uses && "Non-transparent block without any uses");
496 // Check interference on entry.
497 if (BI.LiveIn && BC.Entry != SpillPlacement::MustSpill) {
498 IntI.advanceTo(Start);
501 // Not live in, but before the first use.
502 if (IntI.start() < BI.FirstUse) {
503 BC.Entry = SpillPlacement::PrefSpill;
504 // If the block contains a kill from an earlier split, never split
505 // again in the same block.
506 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Kill))
507 BC.Entry = SpillPlacement::MustSpill;
511 // Does interference overlap the uses in the entry segment
513 if (BI.LiveIn && !BI.OverlapEntry) {
514 IntI.advanceTo(BI.FirstUse);
517 // A live-through interval has no kill.
518 // Check [FirstUse;LastUse) instead.
519 if (IntI.start() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
520 BI.OverlapEntry = true;
523 // Does interference overlap the uses in the exit segment [Def;LastUse)?
524 if (BI.LiveOut && !BI.LiveThrough && !BI.OverlapExit) {
525 IntI.advanceTo(BI.Def);
528 if (IntI.start() < BI.LastUse)
529 BI.OverlapExit = true;
532 // Check interference on exit.
533 if (BI.LiveOut && BC.Exit != SpillPlacement::MustSpill) {
534 // Check interference between LastUse and Stop.
535 if (BC.Exit != SpillPlacement::PrefSpill) {
536 IntI.advanceTo(BI.LastUse);
539 if (IntI.start() < Stop) {
540 BC.Exit = SpillPlacement::PrefSpill;
541 // Avoid splitting twice in the same block.
542 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Def))
543 BC.Exit = SpillPlacement::MustSpill;
550 // Accumulate a local cost of this interference pattern.
552 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
553 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
556 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
557 unsigned Inserts = 0;
559 // Do we need spill code for the entry segment?
561 Inserts += BI.OverlapEntry || BC.Entry != SpillPlacement::PrefReg;
563 // For the exit segment?
565 Inserts += BI.OverlapExit || BC.Exit != SpillPlacement::PrefReg;
567 // The local cost of spill code in this block is the block frequency times
568 // the number of spill instructions inserted.
570 LocalCost += Inserts * SpillPlacer->getBlockFrequency(BI.MBB);
572 DEBUG(dbgs() << "Local cost of " << PrintReg(PhysReg, TRI) << " = "
573 << LocalCost << '\n');
577 /// calcGlobalSplitCost - Return the global split cost of following the split
578 /// pattern in LiveBundles. This cost should be added to the local cost of the
579 /// interference pattern in SpillConstraints.
581 float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) {
582 float GlobalCost = 0;
583 for (unsigned i = 0, e = SpillConstraints.size(); i != e; ++i) {
584 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
585 unsigned Inserts = 0;
586 // Broken entry preference?
587 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 0)] !=
588 (BC.Entry == SpillPlacement::PrefReg);
589 // Broken exit preference?
590 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 1)] !=
591 (BC.Exit == SpillPlacement::PrefReg);
594 Inserts * SpillPlacer->getBlockFrequency(SA->LiveBlocks[i].MBB);
596 DEBUG(dbgs() << "Global cost = " << GlobalCost << '\n');
600 /// splitAroundRegion - Split VirtReg around the region determined by
601 /// LiveBundles. Make an effort to avoid interference from PhysReg.
603 /// The 'register' interval is going to contain as many uses as possible while
604 /// avoiding interference. The 'stack' interval is the complement constructed by
605 /// SplitEditor. It will contain the rest.
607 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
608 const BitVector &LiveBundles,
609 SmallVectorImpl<LiveInterval*> &NewVRegs) {
611 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
613 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
614 dbgs() << " EB#" << i;
618 // First compute interference ranges in the live blocks.
619 typedef std::pair<SlotIndex, SlotIndex> IndexPair;
620 SmallVector<IndexPair, 8> InterferenceRanges;
621 InterferenceRanges.resize(SA->LiveBlocks.size());
622 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
623 if (!query(VirtReg, *AI).checkInterference())
625 LiveIntervalUnion::SegmentIter IntI =
626 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
629 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
630 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
631 IndexPair &IP = InterferenceRanges[i];
632 SlotIndex Start, Stop;
633 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
634 // Skip interference-free blocks.
635 if (IntI.start() >= Stop)
638 // First interference in block.
640 IntI.advanceTo(Start);
643 if (IntI.start() >= Stop)
645 if (!IP.first.isValid() || IntI.start() < IP.first)
646 IP.first = IntI.start();
649 // Last interference in block.
651 IntI.advanceTo(Stop);
652 if (!IntI.valid() || IntI.start() >= Stop)
654 if (IntI.stop() <= Start)
656 if (!IP.second.isValid() || IntI.stop() > IP.second)
657 IP.second = IntI.stop();
662 SmallVector<LiveInterval*, 4> SpillRegs;
663 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
664 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
666 // Create the main cross-block interval.
669 // First add all defs that are live out of a block.
670 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
671 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
672 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
673 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
675 // Should the register be live out?
676 if (!BI.LiveOut || !RegOut)
679 IndexPair &IP = InterferenceRanges[i];
680 SlotIndex Start, Stop;
681 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
683 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
684 << Bundles->getBundle(BI.MBB->getNumber(), 1)
685 << " intf [" << IP.first << ';' << IP.second << ')');
687 // The interference interval should either be invalid or overlap MBB.
688 assert((!IP.first.isValid() || IP.first < Stop) && "Bad interference");
689 assert((!IP.second.isValid() || IP.second > Start) && "Bad interference");
691 // Check interference leaving the block.
692 if (!IP.second.isValid()) {
693 // Block is interference-free.
694 DEBUG(dbgs() << ", no interference");
696 assert(BI.LiveThrough && "No uses, but not live through block?");
697 // Block is live-through without interference.
698 DEBUG(dbgs() << ", no uses"
699 << (RegIn ? ", live-through.\n" : ", stack in.\n"));
701 SE.enterIntvAtEnd(*BI.MBB);
704 if (!BI.LiveThrough) {
705 DEBUG(dbgs() << ", not live-through.\n");
706 SE.useIntv(SE.enterIntvBefore(BI.Def), Stop);
710 // Block is live-through, but entry bundle is on the stack.
711 // Reload just before the first use.
712 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
713 SE.useIntv(SE.enterIntvBefore(BI.FirstUse), Stop);
716 DEBUG(dbgs() << ", live-through.\n");
720 // Block has interference.
721 DEBUG(dbgs() << ", interference to " << IP.second);
723 if (!BI.LiveThrough && IP.second <= BI.Def) {
724 // The interference doesn't reach the outgoing segment.
725 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
726 SE.useIntv(BI.Def, Stop);
732 // No uses in block, avoid interference by reloading as late as possible.
733 DEBUG(dbgs() << ", no uses.\n");
734 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
735 assert(SegStart >= IP.second && "Couldn't avoid interference");
739 if (IP.second.getBoundaryIndex() < BI.LastUse) {
740 // There are interference-free uses at the end of the block.
741 // Find the first use that can get the live-out register.
742 SmallVectorImpl<SlotIndex>::const_iterator UI =
743 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
744 IP.second.getBoundaryIndex());
745 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
747 assert(Use <= BI.LastUse && "Couldn't find last use");
748 // Only attempt a split befroe the last split point.
749 if (Use.getBaseIndex() <= BI.LastSplitPoint) {
750 DEBUG(dbgs() << ", free use at " << Use << ".\n");
751 SlotIndex SegStart = SE.enterIntvBefore(Use);
752 assert(SegStart >= IP.second && "Couldn't avoid interference");
753 assert(SegStart < BI.LastSplitPoint && "Impossible split point");
754 SE.useIntv(SegStart, Stop);
759 // Interference is after the last use.
760 DEBUG(dbgs() << " after last use.\n");
761 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
762 assert(SegStart >= IP.second && "Couldn't avoid interference");
765 // Now all defs leading to live bundles are handled, do everything else.
766 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
767 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
768 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
769 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
771 // Is the register live-in?
772 if (!BI.LiveIn || !RegIn)
775 // We have an incoming register. Check for interference.
776 IndexPair &IP = InterferenceRanges[i];
777 SlotIndex Start, Stop;
778 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
780 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
781 << " -> BB#" << BI.MBB->getNumber());
783 // Check interference entering the block.
784 if (!IP.first.isValid()) {
785 // Block is interference-free.
786 DEBUG(dbgs() << ", no interference");
788 assert(BI.LiveThrough && "No uses, but not live through block?");
789 // Block is live-through without interference.
791 DEBUG(dbgs() << ", no uses, live-through.\n");
792 SE.useIntv(Start, Stop);
794 DEBUG(dbgs() << ", no uses, stack-out.\n");
795 SE.leaveIntvAtTop(*BI.MBB);
799 if (!BI.LiveThrough) {
800 DEBUG(dbgs() << ", killed in block.\n");
801 SE.useIntv(Start, SE.leaveIntvAfter(BI.Kill));
805 // Block is live-through, but exit bundle is on the stack.
806 // Spill immediately after the last use.
807 if (BI.LastUse < BI.LastSplitPoint) {
808 DEBUG(dbgs() << ", uses, stack-out.\n");
809 SE.useIntv(Start, SE.leaveIntvAfter(BI.LastUse));
812 // The last use is after the last split point, it is probably an
814 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
815 << BI.LastSplitPoint << ", stack-out.\n");
816 SlotIndex SegEnd = SE.leaveIntvBefore(BI.LastSplitPoint);
817 SE.useIntv(Start, SegEnd);
818 // Run a double interval from the split to the last use.
819 // This makes it possible to spill the complement without affecting the
821 SE.overlapIntv(SegEnd, BI.LastUse);
824 // Register is live-through.
825 DEBUG(dbgs() << ", uses, live-through.\n");
826 SE.useIntv(Start, Stop);
830 // Block has interference.
831 DEBUG(dbgs() << ", interference from " << IP.first);
833 if (!BI.LiveThrough && IP.first >= BI.Kill) {
834 // The interference doesn't reach the outgoing segment.
835 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
836 SE.useIntv(Start, BI.Kill);
841 // No uses in block, avoid interference by spilling as soon as possible.
842 DEBUG(dbgs() << ", no uses.\n");
843 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
844 assert(SegEnd <= IP.first && "Couldn't avoid interference");
847 if (IP.first.getBaseIndex() > BI.FirstUse) {
848 // There are interference-free uses at the beginning of the block.
849 // Find the last use that can get the register.
850 SmallVectorImpl<SlotIndex>::const_iterator UI =
851 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
852 IP.first.getBaseIndex());
853 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
854 SlotIndex Use = (--UI)->getBoundaryIndex();
855 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
856 SlotIndex SegEnd = SE.leaveIntvAfter(Use);
857 assert(SegEnd <= IP.first && "Couldn't avoid interference");
858 SE.useIntv(Start, SegEnd);
862 // Interference is before the first use.
863 DEBUG(dbgs() << " before first use.\n");
864 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
865 assert(SegEnd <= IP.first && "Couldn't avoid interference");
870 // FIXME: Should we be more aggressive about splitting the stack region into
871 // per-block segments? The current approach allows the stack region to
872 // separate into connected components. Some components may be allocatable.
877 MF->verify(this, "After splitting live range around region");
880 // Make sure that at least one of the new intervals can allocate to PhysReg.
881 // That was the whole point of splitting the live range.
883 for (LiveRangeEdit::iterator I = LREdit.begin(), E = LREdit.end(); I != E;
885 if (!checkUncachedInterference(**I, PhysReg)) {
889 assert(found && "No allocatable intervals after pointless splitting");
894 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
895 SmallVectorImpl<LiveInterval*> &NewVRegs) {
896 BitVector LiveBundles, BestBundles;
898 unsigned BestReg = 0;
900 while (unsigned PhysReg = Order.next()) {
901 float Cost = calcInterferenceInfo(VirtReg, PhysReg);
902 if (BestReg && Cost >= BestCost)
905 SpillPlacer->placeSpills(SpillConstraints, LiveBundles);
906 // No live bundles, defer to splitSingleBlocks().
907 if (!LiveBundles.any())
910 Cost += calcGlobalSplitCost(LiveBundles);
911 if (!BestReg || Cost < BestCost) {
914 BestBundles.swap(LiveBundles);
921 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
926 //===----------------------------------------------------------------------===//
928 //===----------------------------------------------------------------------===//
931 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
932 /// in order to use PhysReg between two entries in SA->UseSlots.
934 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
936 void RAGreedy::calcGapWeights(unsigned PhysReg,
937 SmallVectorImpl<float> &GapWeight) {
938 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
939 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
940 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
941 const unsigned NumGaps = Uses.size()-1;
943 // Start and end points for the interference check.
944 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
945 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
947 GapWeight.assign(NumGaps, 0.0f);
949 // Add interference from each overlapping register.
950 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
951 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
952 .checkInterference())
955 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
956 // so we don't need InterferenceQuery.
958 // Interference that overlaps an instruction is counted in both gaps
959 // surrounding the instruction. The exception is interference before
960 // StartIdx and after StopIdx.
962 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
963 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
964 // Skip the gaps before IntI.
965 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
966 if (++Gap == NumGaps)
971 // Update the gaps covered by IntI.
972 const float weight = IntI.value()->weight;
973 for (; Gap != NumGaps; ++Gap) {
974 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
975 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
984 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
985 /// before MI that has a slot index. If MI is the first mapped instruction in
986 /// its block, return the block start index instead.
988 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
989 assert(MI && "Missing MachineInstr");
990 const MachineBasicBlock *MBB = MI->getParent();
991 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
993 if (!(--I)->isDebugValue() && !I->isCopy())
994 return Indexes->getInstructionIndex(I);
995 return Indexes->getMBBStartIdx(MBB);
998 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
999 /// real non-copy instruction for each instruction in SA->UseSlots.
1001 void RAGreedy::calcPrevSlots() {
1002 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1004 PrevSlot.reserve(Uses.size());
1005 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
1006 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
1007 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
1011 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
1012 /// be beneficial to split before UseSlots[i].
1014 /// 0 is always a valid split point
1015 unsigned RAGreedy::nextSplitPoint(unsigned i) {
1016 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1017 const unsigned Size = Uses.size();
1018 assert(i != Size && "No split points after the end");
1019 // Allow split before i when Uses[i] is not adjacent to the previous use.
1020 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1025 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1028 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1029 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1030 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
1031 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
1033 // Note that it is possible to have an interval that is live-in or live-out
1034 // while only covering a single block - A phi-def can use undef values from
1035 // predecessors, and the block could be a single-block loop.
1036 // We don't bother doing anything clever about such a case, we simply assume
1037 // that the interval is continuous from FirstUse to LastUse. We should make
1038 // sure that we don't do anything illegal to such an interval, though.
1040 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1041 if (Uses.size() <= 2)
1043 const unsigned NumGaps = Uses.size()-1;
1046 dbgs() << "tryLocalSplit: ";
1047 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1048 dbgs() << ' ' << SA->UseSlots[i];
1052 // For every use, find the previous mapped non-copy instruction.
1053 // We use this to detect valid split points, and to estimate new interval
1057 unsigned BestBefore = NumGaps;
1058 unsigned BestAfter = 0;
1061 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB);
1062 SmallVector<float, 8> GapWeight;
1065 while (unsigned PhysReg = Order.next()) {
1066 // Keep track of the largest spill weight that would need to be evicted in
1067 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1068 calcGapWeights(PhysReg, GapWeight);
1070 // Try to find the best sequence of gaps to close.
1071 // The new spill weight must be larger than any gap interference.
1073 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1074 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1076 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1077 // It is the spill weight that needs to be evicted.
1078 float MaxGap = GapWeight[0];
1079 for (unsigned i = 1; i != SplitAfter; ++i)
1080 MaxGap = std::max(MaxGap, GapWeight[i]);
1083 // Live before/after split?
1084 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1085 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1087 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1088 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1089 << " i=" << MaxGap);
1091 // Stop before the interval gets so big we wouldn't be making progress.
1092 if (!LiveBefore && !LiveAfter) {
1093 DEBUG(dbgs() << " all\n");
1096 // Should the interval be extended or shrunk?
1098 if (MaxGap < HUGE_VALF) {
1099 // Estimate the new spill weight.
1101 // Each instruction reads and writes the register, except the first
1102 // instr doesn't read when !FirstLive, and the last instr doesn't write
1105 // We will be inserting copies before and after, so the total number of
1106 // reads and writes is 2 * EstUses.
1108 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1109 2*(LiveBefore + LiveAfter);
1111 // Try to guess the size of the new interval. This should be trivial,
1112 // but the slot index of an inserted copy can be a lot smaller than the
1113 // instruction it is inserted before if there are many dead indexes
1116 // We measure the distance from the instruction before SplitBefore to
1117 // get a conservative estimate.
1119 // The final distance can still be different if inserting copies
1120 // triggers a slot index renumbering.
1122 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1123 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1124 // Would this split be possible to allocate?
1125 // Never allocate all gaps, we wouldn't be making progress.
1126 float Diff = EstWeight - MaxGap;
1127 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1130 if (Diff > BestDiff) {
1131 DEBUG(dbgs() << " (best)");
1133 BestBefore = SplitBefore;
1134 BestAfter = SplitAfter;
1141 SplitBefore = nextSplitPoint(SplitBefore);
1142 if (SplitBefore < SplitAfter) {
1143 DEBUG(dbgs() << " shrink\n");
1144 // Recompute the max when necessary.
1145 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1146 MaxGap = GapWeight[SplitBefore];
1147 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1148 MaxGap = std::max(MaxGap, GapWeight[i]);
1155 // Try to extend the interval.
1156 if (SplitAfter >= NumGaps) {
1157 DEBUG(dbgs() << " end\n");
1161 DEBUG(dbgs() << " extend\n");
1162 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1163 SplitAfter != e; ++SplitAfter)
1164 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1169 // Didn't find any candidates?
1170 if (BestBefore == NumGaps)
1173 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1174 << '-' << Uses[BestAfter] << ", " << BestDiff
1175 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1177 SmallVector<LiveInterval*, 4> SpillRegs;
1178 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1179 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
1182 SlotIndex SegStart = SE.enterIntvBefore(Uses[BestBefore]);
1183 SlotIndex SegStop = SE.leaveIntvAfter(Uses[BestAfter]);
1184 SE.useIntv(SegStart, SegStop);
1192 //===----------------------------------------------------------------------===//
1193 // Live Range Splitting
1194 //===----------------------------------------------------------------------===//
1196 /// trySplit - Try to split VirtReg or one of its interferences, making it
1198 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1199 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1200 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1201 SA->analyze(&VirtReg);
1203 // Local intervals are handled separately.
1204 if (LIS->intervalIsInOneMBB(VirtReg)) {
1205 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1206 return tryLocalSplit(VirtReg, Order, NewVRegs);
1209 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1211 // First try to split around a region spanning multiple blocks.
1212 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1213 if (PhysReg || !NewVRegs.empty())
1216 // Then isolate blocks with multiple uses.
1217 SplitAnalysis::BlockPtrSet Blocks;
1218 if (SA->getMultiUseBlocks(Blocks)) {
1219 SmallVector<LiveInterval*, 4> SpillRegs;
1220 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1221 SplitEditor(*SA, *LIS, *VRM, *DomTree, LREdit).splitSingleBlocks(Blocks);
1223 MF->verify(this, "After splitting live range around basic blocks");
1226 // Don't assign any physregs.
1231 //===----------------------------------------------------------------------===//
1233 //===----------------------------------------------------------------------===//
1235 /// calcInterferenceWeight - Calculate the combined spill weight of
1236 /// interferences when assigning VirtReg to PhysReg.
1237 float RAGreedy::calcInterferenceWeight(LiveInterval &VirtReg, unsigned PhysReg){
1239 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1240 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
1241 Q.collectInterferingVRegs();
1242 if (Q.seenUnspillableVReg())
1244 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i)
1245 Sum += Q.interferingVRegs()[i]->weight;
1250 /// trySpillInterferences - Try to spill interfering registers instead of the
1251 /// current one. Only do it if the accumulated spill weight is smaller than the
1252 /// current spill weight.
1253 unsigned RAGreedy::trySpillInterferences(LiveInterval &VirtReg,
1254 AllocationOrder &Order,
1255 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1256 NamedRegionTimer T("Spill Interference", TimerGroupName, TimePassesIsEnabled);
1257 unsigned BestPhys = 0;
1258 float BestWeight = 0;
1261 while (unsigned PhysReg = Order.next()) {
1262 float Weight = calcInterferenceWeight(VirtReg, PhysReg);
1263 if (Weight == HUGE_VALF || Weight >= VirtReg.weight)
1265 if (!BestPhys || Weight < BestWeight)
1266 BestPhys = PhysReg, BestWeight = Weight;
1269 // No candidates found.
1273 // Collect all interfering registers.
1274 SmallVector<LiveInterval*, 8> Spills;
1275 for (const unsigned *AI = TRI->getOverlaps(BestPhys); *AI; ++AI) {
1276 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
1277 Spills.append(Q.interferingVRegs().begin(), Q.interferingVRegs().end());
1278 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
1279 LiveInterval *VReg = Q.interferingVRegs()[i];
1280 unassign(*VReg, *AI);
1285 DEBUG(dbgs() << "spilling " << Spills.size() << " interferences with weight "
1286 << BestWeight << '\n');
1287 for (unsigned i = 0, e = Spills.size(); i != e; ++i)
1288 spiller().spill(Spills[i], NewVRegs, Spills);
1293 //===----------------------------------------------------------------------===//
1295 //===----------------------------------------------------------------------===//
1297 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1298 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1299 // First try assigning a free register.
1300 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1301 while (unsigned PhysReg = Order.next()) {
1302 if (!checkPhysRegInterference(VirtReg, PhysReg))
1306 if (unsigned PhysReg = tryReassign(VirtReg, Order, NewVRegs))
1309 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1312 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1314 // Try splitting VirtReg or interferences.
1315 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1316 if (PhysReg || !NewVRegs.empty())
1319 // Try to spill another interfering reg with less spill weight.
1320 PhysReg = trySpillInterferences(VirtReg, Order, NewVRegs);
1324 // Finally spill VirtReg itself.
1325 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1326 SmallVector<LiveInterval*, 1> pendingSpills;
1327 spiller().spill(&VirtReg, NewVRegs, pendingSpills);
1329 // The live virtual register requesting allocation was spilled, so tell
1330 // the caller not to allocate anything during this round.
1334 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1335 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1336 << "********** Function: "
1337 << ((Value*)mf.getFunction())->getName() << '\n');
1341 MF->verify(this, "Before greedy register allocator");
1343 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1344 Indexes = &getAnalysis<SlotIndexes>();
1345 DomTree = &getAnalysis<MachineDominatorTree>();
1346 ReservedRegs = TRI->getReservedRegs(*MF);
1347 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1348 Loops = &getAnalysis<MachineLoopInfo>();
1349 LoopRanges = &getAnalysis<MachineLoopRanges>();
1350 Bundles = &getAnalysis<EdgeBundles>();
1351 SpillPlacer = &getAnalysis<SpillPlacement>();
1353 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1357 LIS->addKillFlags();
1361 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1362 VRM->rewrite(Indexes);
1365 // The pass output is in VirtRegMap. Release all the transient data.