1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Function.h"
28 #include "llvm/PassAnalysisSupport.h"
29 #include "llvm/CodeGen/CalcSpillWeights.h"
30 #include "llvm/CodeGen/EdgeBundles.h"
31 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
32 #include "llvm/CodeGen/LiveStackAnalysis.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineLoopRanges.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/CodeGen/RegAllocRegistry.h"
40 #include "llvm/CodeGen/RegisterCoalescer.h"
41 #include "llvm/Target/TargetOptions.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Support/Timer.h"
51 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
52 STATISTIC(NumLocalSplits, "Number of split local live ranges");
53 STATISTIC(NumEvicted, "Number of interferences evicted");
55 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56 createGreedyRegisterAllocator);
59 class RAGreedy : public MachineFunctionPass,
61 private LiveRangeEdit::Delegate {
65 BitVector ReservedRegs;
70 MachineDominatorTree *DomTree;
71 MachineLoopInfo *Loops;
72 MachineLoopRanges *LoopRanges;
74 SpillPlacement *SpillPlacer;
77 std::auto_ptr<Spiller> SpillerInstance;
78 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
80 // Live ranges pass through a number of stages as we try to allocate them.
81 // Some of the stages may also create new live ranges:
83 // - Region splitting.
84 // - Per-block splitting.
88 // Ranges produced by one of the stages skip the previous stages when they are
89 // dequeued. This improves performance because we can skip interference checks
90 // that are unlikely to give any results. It also guarantees that the live
91 // range splitting algorithm terminates, something that is otherwise hard to
94 RS_New, ///< Never seen before.
95 RS_First, ///< First time in the queue.
96 RS_Second, ///< Second time in the queue.
97 RS_Global, ///< Produced by global splitting.
98 RS_Local, ///< Produced by local splitting.
99 RS_Spill ///< Produced by spilling.
102 IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
104 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
105 return LiveRangeStage(LRStage[VirtReg.reg]);
108 template<typename Iterator>
109 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
110 LRStage.resize(MRI->getNumVirtRegs());
111 for (;Begin != End; ++Begin) {
112 unsigned Reg = (*Begin)->reg;
113 if (LRStage[Reg] == RS_New)
114 LRStage[Reg] = NewStage;
119 std::auto_ptr<SplitAnalysis> SA;
120 std::auto_ptr<SplitEditor> SE;
122 /// Cached per-block interference maps
123 InterferenceCache IntfCache;
125 /// All basic blocks where the current register has uses.
126 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
128 /// Global live range splitting candidate info.
129 struct GlobalSplitCandidate {
131 BitVector LiveBundles;
132 SmallVector<unsigned, 8> ActiveBlocks;
134 void reset(unsigned Reg) {
137 ActiveBlocks.clear();
141 /// Candidate info for for each PhysReg in AllocationOrder.
142 /// This vector never shrinks, but grows to the size of the largest register
144 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
146 /// For every instruction in SA->UseSlots, store the previous non-copy
148 SmallVector<SlotIndex, 8> PrevSlot;
153 /// Return the pass name.
154 virtual const char* getPassName() const {
155 return "Greedy Register Allocator";
158 /// RAGreedy analysis usage.
159 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
160 virtual void releaseMemory();
161 virtual Spiller &spiller() { return *SpillerInstance; }
162 virtual void enqueue(LiveInterval *LI);
163 virtual LiveInterval *dequeue();
164 virtual unsigned selectOrSplit(LiveInterval&,
165 SmallVectorImpl<LiveInterval*>&);
167 /// Perform register allocation.
168 virtual bool runOnMachineFunction(MachineFunction &mf);
173 void LRE_WillEraseInstruction(MachineInstr*);
174 bool LRE_CanEraseVirtReg(unsigned);
175 void LRE_WillShrinkVirtReg(unsigned);
176 void LRE_DidCloneVirtReg(unsigned, unsigned);
178 bool addSplitConstraints(InterferenceCache::Cursor, float&);
179 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
180 void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor);
181 float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor);
182 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
183 SmallVectorImpl<LiveInterval*>&);
184 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
185 SlotIndex getPrevMappedIndex(const MachineInstr*);
186 void calcPrevSlots();
187 unsigned nextSplitPoint(unsigned);
188 bool canEvictInterference(LiveInterval&, unsigned, float&);
190 unsigned tryEvict(LiveInterval&, AllocationOrder&,
191 SmallVectorImpl<LiveInterval*>&);
192 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
193 SmallVectorImpl<LiveInterval*>&);
194 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
195 SmallVectorImpl<LiveInterval*>&);
196 unsigned trySplit(LiveInterval&, AllocationOrder&,
197 SmallVectorImpl<LiveInterval*>&);
199 } // end anonymous namespace
201 char RAGreedy::ID = 0;
203 FunctionPass* llvm::createGreedyRegisterAllocator() {
204 return new RAGreedy();
207 RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
208 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
209 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
210 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
211 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
212 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
213 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
214 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
215 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
216 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
217 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
218 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
219 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
220 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
221 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
224 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
225 AU.setPreservesCFG();
226 AU.addRequired<AliasAnalysis>();
227 AU.addPreserved<AliasAnalysis>();
228 AU.addRequired<LiveIntervals>();
229 AU.addRequired<SlotIndexes>();
230 AU.addPreserved<SlotIndexes>();
231 AU.addRequired<LiveDebugVariables>();
232 AU.addPreserved<LiveDebugVariables>();
234 AU.addRequiredID(StrongPHIEliminationID);
235 AU.addRequiredTransitive<RegisterCoalescer>();
236 AU.addRequired<CalculateSpillWeights>();
237 AU.addRequired<LiveStacks>();
238 AU.addPreserved<LiveStacks>();
239 AU.addRequired<MachineDominatorTree>();
240 AU.addPreserved<MachineDominatorTree>();
241 AU.addRequired<MachineLoopInfo>();
242 AU.addPreserved<MachineLoopInfo>();
243 AU.addRequired<MachineLoopRanges>();
244 AU.addPreserved<MachineLoopRanges>();
245 AU.addRequired<VirtRegMap>();
246 AU.addPreserved<VirtRegMap>();
247 AU.addRequired<EdgeBundles>();
248 AU.addRequired<SpillPlacement>();
249 MachineFunctionPass::getAnalysisUsage(AU);
253 //===----------------------------------------------------------------------===//
254 // LiveRangeEdit delegate methods
255 //===----------------------------------------------------------------------===//
257 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
258 // LRE itself will remove from SlotIndexes and parent basic block.
259 VRM->RemoveMachineInstrFromMaps(MI);
262 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
263 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
264 unassign(LIS->getInterval(VirtReg), PhysReg);
267 // Unassigned virtreg is probably in the priority queue.
268 // RegAllocBase will erase it after dequeueing.
272 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
273 unsigned PhysReg = VRM->getPhys(VirtReg);
277 // Register is assigned, put it back on the queue for reassignment.
278 LiveInterval &LI = LIS->getInterval(VirtReg);
279 unassign(LI, PhysReg);
283 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
284 // LRE may clone a virtual register because dead code elimination causes it to
285 // be split into connected components. Ensure that the new register gets the
286 // same stage as the parent.
288 LRStage[New] = LRStage[Old];
291 void RAGreedy::releaseMemory() {
292 SpillerInstance.reset(0);
295 RegAllocBase::releaseMemory();
298 void RAGreedy::enqueue(LiveInterval *LI) {
299 // Prioritize live ranges by size, assigning larger ranges first.
300 // The queue holds (size, reg) pairs.
301 const unsigned Size = LI->getSize();
302 const unsigned Reg = LI->reg;
303 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
304 "Can only enqueue virtual registers");
308 if (LRStage[Reg] == RS_New)
309 LRStage[Reg] = RS_First;
311 if (LRStage[Reg] == RS_Second)
312 // Unsplit ranges that couldn't be allocated immediately are deferred until
313 // everything else has been allocated. Long ranges are allocated last so
314 // they are split against realistic interference.
315 Prio = (1u << 31) - Size;
317 // Everything else is allocated in long->short order. Long ranges that don't
318 // fit should be spilled ASAP so they don't create interference.
319 Prio = (1u << 31) + Size;
321 // Boost ranges that have a physical register hint.
322 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
326 Queue.push(std::make_pair(Prio, Reg));
329 LiveInterval *RAGreedy::dequeue() {
332 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
337 //===----------------------------------------------------------------------===//
338 // Interference eviction
339 //===----------------------------------------------------------------------===//
341 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
343 /// Return false if any interference is heavier than MaxWeight.
344 /// On return, set MaxWeight to the maximal spill weight of an interference.
345 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
348 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
349 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
350 // If there is 10 or more interferences, chances are one is heavier.
351 if (Q.collectInterferingVRegs(10, MaxWeight) >= 10)
354 // Check if any interfering live range is heavier than MaxWeight.
355 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
356 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
357 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
359 if (Intf->weight >= MaxWeight)
361 Weight = std::max(Weight, Intf->weight);
368 /// tryEvict - Try to evict all interferences for a physreg.
369 /// @param VirtReg Currently unassigned virtual register.
370 /// @param Order Physregs to try.
371 /// @return Physreg to assign VirtReg, or 0.
372 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
373 AllocationOrder &Order,
374 SmallVectorImpl<LiveInterval*> &NewVRegs){
375 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
377 // Keep track of the lightest single interference seen so far.
378 float BestWeight = VirtReg.weight;
379 unsigned BestPhys = 0;
382 while (unsigned PhysReg = Order.next()) {
383 float Weight = BestWeight;
384 if (!canEvictInterference(VirtReg, PhysReg, Weight))
387 // This is an eviction candidate.
388 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = "
390 if (BestPhys && Weight >= BestWeight)
396 // Stop if the hint can be used.
397 if (Order.isHint(PhysReg))
404 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
405 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
406 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
407 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
408 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
409 LiveInterval *Intf = Q.interferingVRegs()[i];
410 unassign(*Intf, VRM->getPhys(Intf->reg));
412 NewVRegs.push_back(Intf);
419 //===----------------------------------------------------------------------===//
421 //===----------------------------------------------------------------------===//
423 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
424 /// interference pattern in Physreg and its aliases. Add the constraints to
425 /// SpillPlacement and return the static cost of this split in Cost, assuming
426 /// that all preferences in SplitConstraints are met.
427 /// Return false if there are no bundles with positive bias.
428 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
430 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
432 // Reset interference dependent info.
433 SplitConstraints.resize(UseBlocks.size());
434 float StaticCost = 0;
435 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
436 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
437 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
439 BC.Number = BI.MBB->getNumber();
440 Intf.moveToBlock(BC.Number);
441 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
442 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
444 if (!Intf.hasInterference())
447 // Number of spill code instructions to insert.
450 // Interference for the live-in value.
452 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
453 BC.Entry = SpillPlacement::MustSpill, ++Ins;
454 else if (Intf.first() < BI.FirstUse)
455 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
456 else if (Intf.first() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
460 // Interference for the live-out value.
462 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
463 BC.Exit = SpillPlacement::MustSpill, ++Ins;
464 else if (Intf.last() > BI.LastUse)
465 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
466 else if (Intf.last() > (BI.LiveThrough ? BI.FirstUse : BI.Def))
470 // Accumulate the total frequency of inserted spill code.
472 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
476 // Add constraints for use-blocks. Note that these are the only constraints
477 // that may add a positive bias, it is downhill from here.
478 SpillPlacer->addConstraints(SplitConstraints);
479 return SpillPlacer->scanActiveBundles();
483 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
484 /// live-through blocks in Blocks.
485 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
486 ArrayRef<unsigned> Blocks) {
487 const unsigned GroupSize = 8;
488 SpillPlacement::BlockConstraint BCS[GroupSize];
489 unsigned TBS[GroupSize];
490 unsigned B = 0, T = 0;
492 for (unsigned i = 0; i != Blocks.size(); ++i) {
493 unsigned Number = Blocks[i];
494 Intf.moveToBlock(Number);
496 if (!Intf.hasInterference()) {
497 assert(T < GroupSize && "Array overflow");
499 if (++T == GroupSize) {
500 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
506 assert(B < GroupSize && "Array overflow");
507 BCS[B].Number = Number;
509 // Interference for the live-in value.
510 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
511 BCS[B].Entry = SpillPlacement::MustSpill;
513 BCS[B].Entry = SpillPlacement::PrefSpill;
515 // Interference for the live-out value.
516 if (Intf.last() >= SA->getLastSplitPoint(Number))
517 BCS[B].Exit = SpillPlacement::MustSpill;
519 BCS[B].Exit = SpillPlacement::PrefSpill;
521 if (++B == GroupSize) {
522 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
523 SpillPlacer->addConstraints(Array);
528 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
529 SpillPlacer->addConstraints(Array);
530 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
533 void RAGreedy::growRegion(GlobalSplitCandidate &Cand,
534 InterferenceCache::Cursor Intf) {
535 // Keep track of through blocks that have not been added to SpillPlacer.
536 BitVector Todo = SA->getThroughBlocks();
537 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
538 unsigned AddedTo = 0;
540 unsigned Visited = 0;
544 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
545 if (NewBundles.empty())
547 // Find new through blocks in the periphery of PrefRegBundles.
548 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
549 unsigned Bundle = NewBundles[i];
550 // Look at all blocks connected to Bundle in the full graph.
551 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
552 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
555 if (!Todo.test(Block))
558 // This is a new through block. Add it to SpillPlacer later.
559 ActiveBlocks.push_back(Block);
565 // Any new blocks to add?
566 if (ActiveBlocks.size() > AddedTo) {
567 ArrayRef<unsigned> Add(&ActiveBlocks[AddedTo],
568 ActiveBlocks.size() - AddedTo);
569 addThroughConstraints(Intf, Add);
570 AddedTo = ActiveBlocks.size();
572 // Perhaps iterating can enable more bundles?
573 SpillPlacer->iterate();
575 DEBUG(dbgs() << ", v=" << Visited);
578 /// calcGlobalSplitCost - Return the global split cost of following the split
579 /// pattern in LiveBundles. This cost should be added to the local cost of the
580 /// interference pattern in SplitConstraints.
582 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
583 InterferenceCache::Cursor Intf) {
584 float GlobalCost = 0;
585 const BitVector &LiveBundles = Cand.LiveBundles;
586 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
587 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
588 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
589 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
590 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
591 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
595 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
597 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
599 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
602 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
603 unsigned Number = Cand.ActiveBlocks[i];
604 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
605 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
606 if (!RegIn && !RegOut)
608 if (RegIn && RegOut) {
609 // We need double spill code if this block has interference.
610 Intf.moveToBlock(Number);
611 if (Intf.hasInterference())
612 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
615 // live-in / stack-out or stack-in live-out.
616 GlobalCost += SpillPlacer->getBlockFrequency(Number);
621 /// splitAroundRegion - Split VirtReg around the region determined by
622 /// LiveBundles. Make an effort to avoid interference from PhysReg.
624 /// The 'register' interval is going to contain as many uses as possible while
625 /// avoiding interference. The 'stack' interval is the complement constructed by
626 /// SplitEditor. It will contain the rest.
628 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
629 GlobalSplitCandidate &Cand,
630 SmallVectorImpl<LiveInterval*> &NewVRegs) {
631 const BitVector &LiveBundles = Cand.LiveBundles;
634 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
636 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
637 dbgs() << " EB#" << i;
641 InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg);
642 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
645 // Create the main cross-block interval.
646 const unsigned MainIntv = SE->openIntv();
648 // First add all defs that are live out of a block.
649 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
650 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
651 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
652 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
653 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
655 // Create separate intervals for isolated blocks with multiple uses.
656 if (!RegIn && !RegOut && BI.FirstUse != BI.LastUse) {
657 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
658 SE->splitSingleBlock(BI);
659 SE->selectIntv(MainIntv);
663 // Should the register be live out?
664 if (!BI.LiveOut || !RegOut)
667 SlotIndex Start, Stop;
668 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
669 Intf.moveToBlock(BI.MBB->getNumber());
670 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
671 << Bundles->getBundle(BI.MBB->getNumber(), 1)
672 << " [" << Start << ';'
673 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
674 << ") intf [" << Intf.first() << ';' << Intf.last() << ')');
676 // The interference interval should either be invalid or overlap MBB.
677 assert((!Intf.hasInterference() || Intf.first() < Stop)
678 && "Bad interference");
679 assert((!Intf.hasInterference() || Intf.last() > Start)
680 && "Bad interference");
682 // Check interference leaving the block.
683 if (!Intf.hasInterference()) {
684 // Block is interference-free.
685 DEBUG(dbgs() << ", no interference");
686 if (!BI.LiveThrough) {
687 DEBUG(dbgs() << ", not live-through.\n");
688 SE->useIntv(SE->enterIntvBefore(BI.Def), Stop);
692 // Block is live-through, but entry bundle is on the stack.
693 // Reload just before the first use.
694 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
695 SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
698 DEBUG(dbgs() << ", live-through.\n");
702 // Block has interference.
703 DEBUG(dbgs() << ", interference to " << Intf.last());
705 if (!BI.LiveThrough && Intf.last() <= BI.Def) {
706 // The interference doesn't reach the outgoing segment.
707 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
708 SE->useIntv(BI.Def, Stop);
712 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
713 if (Intf.last().getBoundaryIndex() < BI.LastUse) {
714 // There are interference-free uses at the end of the block.
715 // Find the first use that can get the live-out register.
716 SmallVectorImpl<SlotIndex>::const_iterator UI =
717 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
718 Intf.last().getBoundaryIndex());
719 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
721 assert(Use <= BI.LastUse && "Couldn't find last use");
722 // Only attempt a split befroe the last split point.
723 if (Use.getBaseIndex() <= LastSplitPoint) {
724 DEBUG(dbgs() << ", free use at " << Use << ".\n");
725 SlotIndex SegStart = SE->enterIntvBefore(Use);
726 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
727 assert(SegStart < LastSplitPoint && "Impossible split point");
728 SE->useIntv(SegStart, Stop);
733 // Interference is after the last use.
734 DEBUG(dbgs() << " after last use.\n");
735 SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
736 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
739 // Now all defs leading to live bundles are handled, do everything else.
740 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
741 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
742 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
743 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
745 // Is the register live-in?
746 if (!BI.LiveIn || !RegIn)
749 // We have an incoming register. Check for interference.
750 SlotIndex Start, Stop;
751 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
752 Intf.moveToBlock(BI.MBB->getNumber());
753 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
754 << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
755 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
758 // Check interference entering the block.
759 if (!Intf.hasInterference()) {
760 // Block is interference-free.
761 DEBUG(dbgs() << ", no interference");
762 if (!BI.LiveThrough) {
763 DEBUG(dbgs() << ", killed in block.\n");
764 SE->useIntv(Start, SE->leaveIntvAfter(BI.Kill));
768 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
769 // Block is live-through, but exit bundle is on the stack.
770 // Spill immediately after the last use.
771 if (BI.LastUse < LastSplitPoint) {
772 DEBUG(dbgs() << ", uses, stack-out.\n");
773 SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
776 // The last use is after the last split point, it is probably an
778 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
779 << LastSplitPoint << ", stack-out.\n");
780 SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
781 SE->useIntv(Start, SegEnd);
782 // Run a double interval from the split to the last use.
783 // This makes it possible to spill the complement without affecting the
785 SE->overlapIntv(SegEnd, BI.LastUse);
788 // Register is live-through.
789 DEBUG(dbgs() << ", uses, live-through.\n");
790 SE->useIntv(Start, Stop);
794 // Block has interference.
795 DEBUG(dbgs() << ", interference from " << Intf.first());
797 if (!BI.LiveThrough && Intf.first() >= BI.Kill) {
798 // The interference doesn't reach the outgoing segment.
799 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
800 SE->useIntv(Start, BI.Kill);
804 if (Intf.first().getBaseIndex() > BI.FirstUse) {
805 // There are interference-free uses at the beginning of the block.
806 // Find the last use that can get the register.
807 SmallVectorImpl<SlotIndex>::const_iterator UI =
808 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
809 Intf.first().getBaseIndex());
810 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
811 SlotIndex Use = (--UI)->getBoundaryIndex();
812 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
813 SlotIndex SegEnd = SE->leaveIntvAfter(Use);
814 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
815 SE->useIntv(Start, SegEnd);
819 // Interference is before the first use.
820 DEBUG(dbgs() << " before first use.\n");
821 SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
822 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
825 // Handle live-through blocks.
826 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
827 unsigned Number = Cand.ActiveBlocks[i];
828 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
829 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
830 DEBUG(dbgs() << "Live through BB#" << Number << '\n');
831 if (RegIn && RegOut) {
832 Intf.moveToBlock(Number);
833 if (!Intf.hasInterference()) {
834 SE->useIntv(Indexes->getMBBStartIdx(Number),
835 Indexes->getMBBEndIdx(Number));
839 MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
841 SE->leaveIntvAtTop(*MBB);
843 SE->enterIntvAtEnd(*MBB);
848 // FIXME: Should we be more aggressive about splitting the stack region into
849 // per-block segments? The current approach allows the stack region to
850 // separate into connected components. Some components may be allocatable.
855 MF->verify(this, "After splitting live range around region");
858 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
859 SmallVectorImpl<LiveInterval*> &NewVRegs) {
861 const unsigned NoCand = ~0u;
862 unsigned BestCand = NoCand;
865 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
866 if (GlobalCand.size() <= Cand)
867 GlobalCand.resize(Cand+1);
868 GlobalCand[Cand].reset(PhysReg);
870 SpillPlacer->prepare(GlobalCand[Cand].LiveBundles);
872 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
873 if (!addSplitConstraints(Intf, Cost)) {
874 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
877 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
878 if (BestCand != NoCand && Cost >= BestCost) {
879 DEBUG(dbgs() << " worse than "
880 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n');
883 growRegion(GlobalCand[Cand], Intf);
885 SpillPlacer->finish();
887 // No live bundles, defer to splitSingleBlocks().
888 if (!GlobalCand[Cand].LiveBundles.any()) {
889 DEBUG(dbgs() << " no bundles.\n");
893 Cost += calcGlobalSplitCost(GlobalCand[Cand], Intf);
895 dbgs() << ", total = " << Cost << " with bundles";
896 for (int i = GlobalCand[Cand].LiveBundles.find_first(); i>=0;
897 i = GlobalCand[Cand].LiveBundles.find_next(i))
898 dbgs() << " EB#" << i;
901 if (BestCand == NoCand || Cost < BestCost) {
903 BestCost = 0.98f * Cost; // Prevent rounding effects.
907 if (BestCand == NoCand)
910 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
911 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
916 //===----------------------------------------------------------------------===//
918 //===----------------------------------------------------------------------===//
921 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
922 /// in order to use PhysReg between two entries in SA->UseSlots.
924 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
926 void RAGreedy::calcGapWeights(unsigned PhysReg,
927 SmallVectorImpl<float> &GapWeight) {
928 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
929 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
930 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
931 const unsigned NumGaps = Uses.size()-1;
933 // Start and end points for the interference check.
934 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
935 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
937 GapWeight.assign(NumGaps, 0.0f);
939 // Add interference from each overlapping register.
940 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
941 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
942 .checkInterference())
945 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
946 // so we don't need InterferenceQuery.
948 // Interference that overlaps an instruction is counted in both gaps
949 // surrounding the instruction. The exception is interference before
950 // StartIdx and after StopIdx.
952 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
953 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
954 // Skip the gaps before IntI.
955 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
956 if (++Gap == NumGaps)
961 // Update the gaps covered by IntI.
962 const float weight = IntI.value()->weight;
963 for (; Gap != NumGaps; ++Gap) {
964 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
965 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
974 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
975 /// before MI that has a slot index. If MI is the first mapped instruction in
976 /// its block, return the block start index instead.
978 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
979 assert(MI && "Missing MachineInstr");
980 const MachineBasicBlock *MBB = MI->getParent();
981 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
983 if (!(--I)->isDebugValue() && !I->isCopy())
984 return Indexes->getInstructionIndex(I);
985 return Indexes->getMBBStartIdx(MBB);
988 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
989 /// real non-copy instruction for each instruction in SA->UseSlots.
991 void RAGreedy::calcPrevSlots() {
992 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
994 PrevSlot.reserve(Uses.size());
995 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
996 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
997 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
1001 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
1002 /// be beneficial to split before UseSlots[i].
1004 /// 0 is always a valid split point
1005 unsigned RAGreedy::nextSplitPoint(unsigned i) {
1006 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1007 const unsigned Size = Uses.size();
1008 assert(i != Size && "No split points after the end");
1009 // Allow split before i when Uses[i] is not adjacent to the previous use.
1010 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1015 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1018 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1019 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1020 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1021 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1023 // Note that it is possible to have an interval that is live-in or live-out
1024 // while only covering a single block - A phi-def can use undef values from
1025 // predecessors, and the block could be a single-block loop.
1026 // We don't bother doing anything clever about such a case, we simply assume
1027 // that the interval is continuous from FirstUse to LastUse. We should make
1028 // sure that we don't do anything illegal to such an interval, though.
1030 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1031 if (Uses.size() <= 2)
1033 const unsigned NumGaps = Uses.size()-1;
1036 dbgs() << "tryLocalSplit: ";
1037 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1038 dbgs() << ' ' << SA->UseSlots[i];
1042 // For every use, find the previous mapped non-copy instruction.
1043 // We use this to detect valid split points, and to estimate new interval
1047 unsigned BestBefore = NumGaps;
1048 unsigned BestAfter = 0;
1051 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1052 SmallVector<float, 8> GapWeight;
1055 while (unsigned PhysReg = Order.next()) {
1056 // Keep track of the largest spill weight that would need to be evicted in
1057 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1058 calcGapWeights(PhysReg, GapWeight);
1060 // Try to find the best sequence of gaps to close.
1061 // The new spill weight must be larger than any gap interference.
1063 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1064 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1066 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1067 // It is the spill weight that needs to be evicted.
1068 float MaxGap = GapWeight[0];
1069 for (unsigned i = 1; i != SplitAfter; ++i)
1070 MaxGap = std::max(MaxGap, GapWeight[i]);
1073 // Live before/after split?
1074 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1075 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1077 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1078 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1079 << " i=" << MaxGap);
1081 // Stop before the interval gets so big we wouldn't be making progress.
1082 if (!LiveBefore && !LiveAfter) {
1083 DEBUG(dbgs() << " all\n");
1086 // Should the interval be extended or shrunk?
1088 if (MaxGap < HUGE_VALF) {
1089 // Estimate the new spill weight.
1091 // Each instruction reads and writes the register, except the first
1092 // instr doesn't read when !FirstLive, and the last instr doesn't write
1095 // We will be inserting copies before and after, so the total number of
1096 // reads and writes is 2 * EstUses.
1098 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1099 2*(LiveBefore + LiveAfter);
1101 // Try to guess the size of the new interval. This should be trivial,
1102 // but the slot index of an inserted copy can be a lot smaller than the
1103 // instruction it is inserted before if there are many dead indexes
1106 // We measure the distance from the instruction before SplitBefore to
1107 // get a conservative estimate.
1109 // The final distance can still be different if inserting copies
1110 // triggers a slot index renumbering.
1112 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1113 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1114 // Would this split be possible to allocate?
1115 // Never allocate all gaps, we wouldn't be making progress.
1116 float Diff = EstWeight - MaxGap;
1117 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1120 if (Diff > BestDiff) {
1121 DEBUG(dbgs() << " (best)");
1123 BestBefore = SplitBefore;
1124 BestAfter = SplitAfter;
1131 SplitBefore = nextSplitPoint(SplitBefore);
1132 if (SplitBefore < SplitAfter) {
1133 DEBUG(dbgs() << " shrink\n");
1134 // Recompute the max when necessary.
1135 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1136 MaxGap = GapWeight[SplitBefore];
1137 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1138 MaxGap = std::max(MaxGap, GapWeight[i]);
1145 // Try to extend the interval.
1146 if (SplitAfter >= NumGaps) {
1147 DEBUG(dbgs() << " end\n");
1151 DEBUG(dbgs() << " extend\n");
1152 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1153 SplitAfter != e; ++SplitAfter)
1154 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1159 // Didn't find any candidates?
1160 if (BestBefore == NumGaps)
1163 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1164 << '-' << Uses[BestAfter] << ", " << BestDiff
1165 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1167 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1171 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1172 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1173 SE->useIntv(SegStart, SegStop);
1176 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
1182 //===----------------------------------------------------------------------===//
1183 // Live Range Splitting
1184 //===----------------------------------------------------------------------===//
1186 /// trySplit - Try to split VirtReg or one of its interferences, making it
1188 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1189 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1190 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1191 // Local intervals are handled separately.
1192 if (LIS->intervalIsInOneMBB(VirtReg)) {
1193 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1194 SA->analyze(&VirtReg);
1195 return tryLocalSplit(VirtReg, Order, NewVRegs);
1198 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1200 // Don't iterate global splitting.
1201 // Move straight to spilling if this range was produced by a global split.
1202 if (getStage(VirtReg) >= RS_Global)
1205 SA->analyze(&VirtReg);
1207 // First try to split around a region spanning multiple blocks.
1208 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1209 if (PhysReg || !NewVRegs.empty())
1212 // Then isolate blocks with multiple uses.
1213 SplitAnalysis::BlockPtrSet Blocks;
1214 if (SA->getMultiUseBlocks(Blocks)) {
1215 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1217 SE->splitSingleBlocks(Blocks);
1218 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
1220 MF->verify(this, "After splitting live range around basic blocks");
1223 // Don't assign any physregs.
1228 //===----------------------------------------------------------------------===//
1230 //===----------------------------------------------------------------------===//
1232 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1233 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1234 // First try assigning a free register.
1235 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1236 while (unsigned PhysReg = Order.next()) {
1237 if (!checkPhysRegInterference(VirtReg, PhysReg))
1241 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1244 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1246 // The first time we see a live range, don't try to split or spill.
1247 // Wait until the second time, when all smaller ranges have been allocated.
1248 // This gives a better picture of the interference to split around.
1249 LiveRangeStage Stage = getStage(VirtReg);
1250 if (Stage == RS_First) {
1251 LRStage[VirtReg.reg] = RS_Second;
1252 DEBUG(dbgs() << "wait for second round\n");
1253 NewVRegs.push_back(&VirtReg);
1257 assert(Stage < RS_Spill && "Cannot allocate after spilling");
1259 // Try splitting VirtReg or interferences.
1260 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1261 if (PhysReg || !NewVRegs.empty())
1264 // Finally spill VirtReg itself.
1265 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1266 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1267 spiller().spill(LRE);
1268 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1271 MF->verify(this, "After spilling");
1273 // The live virtual register requesting allocation was spilled, so tell
1274 // the caller not to allocate anything during this round.
1278 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1279 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1280 << "********** Function: "
1281 << ((Value*)mf.getFunction())->getName() << '\n');
1285 MF->verify(this, "Before greedy register allocator");
1287 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1288 Indexes = &getAnalysis<SlotIndexes>();
1289 DomTree = &getAnalysis<MachineDominatorTree>();
1290 ReservedRegs = TRI->getReservedRegs(*MF);
1291 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1292 Loops = &getAnalysis<MachineLoopInfo>();
1293 LoopRanges = &getAnalysis<MachineLoopRanges>();
1294 Bundles = &getAnalysis<EdgeBundles>();
1295 SpillPlacer = &getAnalysis<SpillPlacement>();
1297 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1298 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1300 LRStage.resize(MRI->getNumVirtRegs());
1301 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1305 LIS->addKillFlags();
1309 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1310 VRM->rewrite(Indexes);
1313 // Write out new DBG_VALUE instructions.
1314 getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
1316 // The pass output is in VirtRegMap. Release all the transient data.