1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "llvm/ADT/SparseBitVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Function.h"
29 #include "llvm/PassAnalysisSupport.h"
30 #include "llvm/CodeGen/CalcSpillWeights.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
33 #include "llvm/CodeGen/LiveStackAnalysis.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineLoopRanges.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/CodeGen/RegAllocRegistry.h"
41 #include "llvm/CodeGen/RegisterCoalescer.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/Support/Timer.h"
52 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
53 STATISTIC(NumLocalSplits, "Number of split local live ranges");
54 STATISTIC(NumEvicted, "Number of interferences evicted");
56 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
57 createGreedyRegisterAllocator);
60 class RAGreedy : public MachineFunctionPass,
62 private LiveRangeEdit::Delegate {
66 BitVector ReservedRegs;
71 MachineDominatorTree *DomTree;
72 MachineLoopInfo *Loops;
73 MachineLoopRanges *LoopRanges;
75 SpillPlacement *SpillPlacer;
78 std::auto_ptr<Spiller> SpillerInstance;
79 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
81 // Live ranges pass through a number of stages as we try to allocate them.
82 // Some of the stages may also create new live ranges:
84 // - Region splitting.
85 // - Per-block splitting.
89 // Ranges produced by one of the stages skip the previous stages when they are
90 // dequeued. This improves performance because we can skip interference checks
91 // that are unlikely to give any results. It also guarantees that the live
92 // range splitting algorithm terminates, something that is otherwise hard to
95 RS_New, ///< Never seen before.
96 RS_First, ///< First time in the queue.
97 RS_Second, ///< Second time in the queue.
98 RS_Global, ///< Produced by global splitting.
99 RS_Local, ///< Produced by local splitting.
100 RS_Spill ///< Produced by spilling.
103 IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
105 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
106 return LiveRangeStage(LRStage[VirtReg.reg]);
109 template<typename Iterator>
110 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
111 LRStage.resize(MRI->getNumVirtRegs());
112 for (;Begin != End; ++Begin) {
113 unsigned Reg = (*Begin)->reg;
114 if (LRStage[Reg] == RS_New)
115 LRStage[Reg] = NewStage;
120 std::auto_ptr<SplitAnalysis> SA;
121 std::auto_ptr<SplitEditor> SE;
123 /// Cached per-block interference maps
124 InterferenceCache IntfCache;
126 /// All basic blocks where the current register has uses.
127 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
129 /// Live-through blocks that have already been added to SpillPlacer.
130 SparseBitVector<> ActiveThroughBlocks;
132 /// Global live range splitting candidate info.
133 struct GlobalSplitCandidate {
135 BitVector LiveBundles;
138 /// Candidate info for for each PhysReg in AllocationOrder.
139 /// This vector never shrinks, but grows to the size of the largest register
141 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
143 /// For every instruction in SA->UseSlots, store the previous non-copy
145 SmallVector<SlotIndex, 8> PrevSlot;
150 /// Return the pass name.
151 virtual const char* getPassName() const {
152 return "Greedy Register Allocator";
155 /// RAGreedy analysis usage.
156 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
157 virtual void releaseMemory();
158 virtual Spiller &spiller() { return *SpillerInstance; }
159 virtual void enqueue(LiveInterval *LI);
160 virtual LiveInterval *dequeue();
161 virtual unsigned selectOrSplit(LiveInterval&,
162 SmallVectorImpl<LiveInterval*>&);
164 /// Perform register allocation.
165 virtual bool runOnMachineFunction(MachineFunction &mf);
170 void LRE_WillEraseInstruction(MachineInstr*);
171 bool LRE_CanEraseVirtReg(unsigned);
172 void LRE_WillShrinkVirtReg(unsigned);
173 void LRE_DidCloneVirtReg(unsigned, unsigned);
175 bool addSplitConstraints(InterferenceCache::Cursor, float&);
176 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
177 void growRegion(InterferenceCache::Cursor);
178 float calcGlobalSplitCost(unsigned, const BitVector&);
179 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
180 SmallVectorImpl<LiveInterval*>&);
181 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
182 SlotIndex getPrevMappedIndex(const MachineInstr*);
183 void calcPrevSlots();
184 unsigned nextSplitPoint(unsigned);
185 bool canEvictInterference(LiveInterval&, unsigned, float&);
187 unsigned tryEvict(LiveInterval&, AllocationOrder&,
188 SmallVectorImpl<LiveInterval*>&);
189 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
190 SmallVectorImpl<LiveInterval*>&);
191 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
192 SmallVectorImpl<LiveInterval*>&);
193 unsigned trySplit(LiveInterval&, AllocationOrder&,
194 SmallVectorImpl<LiveInterval*>&);
196 } // end anonymous namespace
198 char RAGreedy::ID = 0;
200 FunctionPass* llvm::createGreedyRegisterAllocator() {
201 return new RAGreedy();
204 RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
205 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
206 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
207 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
208 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
209 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
210 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
211 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
212 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
213 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
214 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
215 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
216 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
217 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
218 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
221 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
222 AU.setPreservesCFG();
223 AU.addRequired<AliasAnalysis>();
224 AU.addPreserved<AliasAnalysis>();
225 AU.addRequired<LiveIntervals>();
226 AU.addRequired<SlotIndexes>();
227 AU.addPreserved<SlotIndexes>();
228 AU.addRequired<LiveDebugVariables>();
229 AU.addPreserved<LiveDebugVariables>();
231 AU.addRequiredID(StrongPHIEliminationID);
232 AU.addRequiredTransitive<RegisterCoalescer>();
233 AU.addRequired<CalculateSpillWeights>();
234 AU.addRequired<LiveStacks>();
235 AU.addPreserved<LiveStacks>();
236 AU.addRequired<MachineDominatorTree>();
237 AU.addPreserved<MachineDominatorTree>();
238 AU.addRequired<MachineLoopInfo>();
239 AU.addPreserved<MachineLoopInfo>();
240 AU.addRequired<MachineLoopRanges>();
241 AU.addPreserved<MachineLoopRanges>();
242 AU.addRequired<VirtRegMap>();
243 AU.addPreserved<VirtRegMap>();
244 AU.addRequired<EdgeBundles>();
245 AU.addRequired<SpillPlacement>();
246 MachineFunctionPass::getAnalysisUsage(AU);
250 //===----------------------------------------------------------------------===//
251 // LiveRangeEdit delegate methods
252 //===----------------------------------------------------------------------===//
254 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
255 // LRE itself will remove from SlotIndexes and parent basic block.
256 VRM->RemoveMachineInstrFromMaps(MI);
259 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
260 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
261 unassign(LIS->getInterval(VirtReg), PhysReg);
264 // Unassigned virtreg is probably in the priority queue.
265 // RegAllocBase will erase it after dequeueing.
269 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
270 unsigned PhysReg = VRM->getPhys(VirtReg);
274 // Register is assigned, put it back on the queue for reassignment.
275 LiveInterval &LI = LIS->getInterval(VirtReg);
276 unassign(LI, PhysReg);
280 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
281 // LRE may clone a virtual register because dead code elimination causes it to
282 // be split into connected components. Ensure that the new register gets the
283 // same stage as the parent.
285 LRStage[New] = LRStage[Old];
288 void RAGreedy::releaseMemory() {
289 SpillerInstance.reset(0);
291 RegAllocBase::releaseMemory();
294 void RAGreedy::enqueue(LiveInterval *LI) {
295 // Prioritize live ranges by size, assigning larger ranges first.
296 // The queue holds (size, reg) pairs.
297 const unsigned Size = LI->getSize();
298 const unsigned Reg = LI->reg;
299 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
300 "Can only enqueue virtual registers");
304 if (LRStage[Reg] == RS_New)
305 LRStage[Reg] = RS_First;
307 if (LRStage[Reg] == RS_Second)
308 // Unsplit ranges that couldn't be allocated immediately are deferred until
309 // everything else has been allocated. Long ranges are allocated last so
310 // they are split against realistic interference.
311 Prio = (1u << 31) - Size;
313 // Everything else is allocated in long->short order. Long ranges that don't
314 // fit should be spilled ASAP so they don't create interference.
315 Prio = (1u << 31) + Size;
317 // Boost ranges that have a physical register hint.
318 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
322 Queue.push(std::make_pair(Prio, Reg));
325 LiveInterval *RAGreedy::dequeue() {
328 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
333 //===----------------------------------------------------------------------===//
334 // Interference eviction
335 //===----------------------------------------------------------------------===//
337 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
339 /// Return false if any interference is heavier than MaxWeight.
340 /// On return, set MaxWeight to the maximal spill weight of an interference.
341 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
344 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
345 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
346 // If there is 10 or more interferences, chances are one is heavier.
347 if (Q.collectInterferingVRegs(10, MaxWeight) >= 10)
350 // Check if any interfering live range is heavier than MaxWeight.
351 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
352 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
353 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
355 if (Intf->weight >= MaxWeight)
357 Weight = std::max(Weight, Intf->weight);
364 /// tryEvict - Try to evict all interferences for a physreg.
365 /// @param VirtReg Currently unassigned virtual register.
366 /// @param Order Physregs to try.
367 /// @return Physreg to assign VirtReg, or 0.
368 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
369 AllocationOrder &Order,
370 SmallVectorImpl<LiveInterval*> &NewVRegs){
371 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
373 // Keep track of the lightest single interference seen so far.
374 float BestWeight = VirtReg.weight;
375 unsigned BestPhys = 0;
378 while (unsigned PhysReg = Order.next()) {
379 float Weight = BestWeight;
380 if (!canEvictInterference(VirtReg, PhysReg, Weight))
383 // This is an eviction candidate.
384 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = "
386 if (BestPhys && Weight >= BestWeight)
392 // Stop if the hint can be used.
393 if (Order.isHint(PhysReg))
400 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
401 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
402 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
403 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
404 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
405 LiveInterval *Intf = Q.interferingVRegs()[i];
406 unassign(*Intf, VRM->getPhys(Intf->reg));
408 NewVRegs.push_back(Intf);
415 //===----------------------------------------------------------------------===//
417 //===----------------------------------------------------------------------===//
419 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
420 /// interference pattern in Physreg and its aliases. Add the constraints to
421 /// SpillPlacement and return the static cost of this split in Cost, assuming
422 /// that all preferences in SplitConstraints are met.
423 /// Return false if there are no bundles with positive bias.
424 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
426 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
428 // Reset interference dependent info.
429 SplitConstraints.resize(UseBlocks.size());
430 float StaticCost = 0;
431 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
432 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
433 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
435 BC.Number = BI.MBB->getNumber();
436 Intf.moveToBlock(BC.Number);
437 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
438 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
440 if (!Intf.hasInterference())
443 // Number of spill code instructions to insert.
446 // Interference for the live-in value.
448 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
449 BC.Entry = SpillPlacement::MustSpill, ++Ins;
450 else if (Intf.first() < BI.FirstUse)
451 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
452 else if (Intf.first() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
456 // Interference for the live-out value.
458 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
459 BC.Exit = SpillPlacement::MustSpill, ++Ins;
460 else if (Intf.last() > BI.LastUse)
461 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
462 else if (Intf.last() > (BI.LiveThrough ? BI.FirstUse : BI.Def))
466 // Accumulate the total frequency of inserted spill code.
468 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
472 // Add constraints for use-blocks. Note that these are the only constraints
473 // that may add a positive bias, it is downhill from here.
474 SpillPlacer->addConstraints(SplitConstraints);
475 return SpillPlacer->scanActiveBundles();
479 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
480 /// live-through blocks in Blocks.
481 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
482 ArrayRef<unsigned> Blocks) {
483 const unsigned GroupSize = 8;
484 SpillPlacement::BlockConstraint BCS[GroupSize];
485 unsigned TBS[GroupSize];
486 unsigned B = 0, T = 0;
488 for (unsigned i = 0; i != Blocks.size(); ++i) {
489 unsigned Number = Blocks[i];
490 Intf.moveToBlock(Number);
492 if (!Intf.hasInterference()) {
493 assert(T < GroupSize && "Array overflow");
495 if (++T == GroupSize) {
496 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
502 assert(B < GroupSize && "Array overflow");
503 BCS[B].Number = Number;
505 // Interference for the live-in value.
506 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
507 BCS[B].Entry = SpillPlacement::MustSpill;
509 BCS[B].Entry = SpillPlacement::PrefSpill;
511 // Interference for the live-out value.
512 if (Intf.last() >= SA->getLastSplitPoint(Number))
513 BCS[B].Exit = SpillPlacement::MustSpill;
515 BCS[B].Exit = SpillPlacement::PrefSpill;
517 if (++B == GroupSize) {
518 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
519 SpillPlacer->addConstraints(Array);
524 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
525 SpillPlacer->addConstraints(Array);
526 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
529 void RAGreedy::growRegion(InterferenceCache::Cursor Intf) {
530 // Keep track of through blocks that have already been added to SpillPlacer.
531 SparseBitVector<> Added;
532 SmallVector<unsigned, 16> ThroughBlocks;
534 unsigned Visited = 0;
537 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
538 if (NewBundles.empty())
540 // Find new through blocks in the periphery of PrefRegBundles.
541 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
542 unsigned Bundle = NewBundles[i];
543 // Look at all blocks connected to Bundle in the full graph.
544 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
545 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
548 if (!SA->isThroughBlock(Block) || !Added.test_and_set(Block))
550 // This is a new through block. Add it to SpillPlacer later.
551 ThroughBlocks.push_back(Block);
557 // Any new blocks to add?
558 if (!ThroughBlocks.empty()) {
559 addThroughConstraints(Intf, ThroughBlocks);
560 ThroughBlocks.clear();
562 // Perhaps iterating can enable more bundles?
563 SpillPlacer->iterate();
566 // Rememeber the relevant set of through blocks for splitAroundRegion().
567 ActiveThroughBlocks |= Added;
568 DEBUG(dbgs() << ", v=" << Visited);
571 /// calcGlobalSplitCost - Return the global split cost of following the split
572 /// pattern in LiveBundles. This cost should be added to the local cost of the
573 /// interference pattern in SplitConstraints.
575 float RAGreedy::calcGlobalSplitCost(unsigned PhysReg,
576 const BitVector &LiveBundles) {
577 float GlobalCost = 0;
578 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
579 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
580 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
581 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
582 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
583 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
587 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
589 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
591 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
594 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
595 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
596 E = ActiveThroughBlocks.end(); I != E; ++I) {
597 unsigned Number = *I;
598 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
599 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
600 if (!RegIn && !RegOut)
602 if (RegIn && RegOut) {
603 // We need double spill code if this block has interference.
604 Intf.moveToBlock(Number);
605 if (Intf.hasInterference())
606 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
609 // live-in / stack-out or stack-in live-out.
610 GlobalCost += SpillPlacer->getBlockFrequency(Number);
615 /// splitAroundRegion - Split VirtReg around the region determined by
616 /// LiveBundles. Make an effort to avoid interference from PhysReg.
618 /// The 'register' interval is going to contain as many uses as possible while
619 /// avoiding interference. The 'stack' interval is the complement constructed by
620 /// SplitEditor. It will contain the rest.
622 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
623 const BitVector &LiveBundles,
624 SmallVectorImpl<LiveInterval*> &NewVRegs) {
626 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
628 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
629 dbgs() << " EB#" << i;
633 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
634 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
637 // Create the main cross-block interval.
638 const unsigned MainIntv = SE->openIntv();
640 // First add all defs that are live out of a block.
641 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
642 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
643 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
644 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
645 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
647 // Create separate intervals for isolated blocks with multiple uses.
648 if (!RegIn && !RegOut && BI.FirstUse != BI.LastUse) {
649 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
650 SE->splitSingleBlock(BI);
651 SE->selectIntv(MainIntv);
655 // Should the register be live out?
656 if (!BI.LiveOut || !RegOut)
659 SlotIndex Start, Stop;
660 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
661 Intf.moveToBlock(BI.MBB->getNumber());
662 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
663 << Bundles->getBundle(BI.MBB->getNumber(), 1)
664 << " [" << Start << ';'
665 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
666 << ") intf [" << Intf.first() << ';' << Intf.last() << ')');
668 // The interference interval should either be invalid or overlap MBB.
669 assert((!Intf.hasInterference() || Intf.first() < Stop)
670 && "Bad interference");
671 assert((!Intf.hasInterference() || Intf.last() > Start)
672 && "Bad interference");
674 // Check interference leaving the block.
675 if (!Intf.hasInterference()) {
676 // Block is interference-free.
677 DEBUG(dbgs() << ", no interference");
678 if (!BI.LiveThrough) {
679 DEBUG(dbgs() << ", not live-through.\n");
680 SE->useIntv(SE->enterIntvBefore(BI.Def), Stop);
684 // Block is live-through, but entry bundle is on the stack.
685 // Reload just before the first use.
686 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
687 SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
690 DEBUG(dbgs() << ", live-through.\n");
694 // Block has interference.
695 DEBUG(dbgs() << ", interference to " << Intf.last());
697 if (!BI.LiveThrough && Intf.last() <= BI.Def) {
698 // The interference doesn't reach the outgoing segment.
699 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
700 SE->useIntv(BI.Def, Stop);
704 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
705 if (Intf.last().getBoundaryIndex() < BI.LastUse) {
706 // There are interference-free uses at the end of the block.
707 // Find the first use that can get the live-out register.
708 SmallVectorImpl<SlotIndex>::const_iterator UI =
709 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
710 Intf.last().getBoundaryIndex());
711 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
713 assert(Use <= BI.LastUse && "Couldn't find last use");
714 // Only attempt a split befroe the last split point.
715 if (Use.getBaseIndex() <= LastSplitPoint) {
716 DEBUG(dbgs() << ", free use at " << Use << ".\n");
717 SlotIndex SegStart = SE->enterIntvBefore(Use);
718 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
719 assert(SegStart < LastSplitPoint && "Impossible split point");
720 SE->useIntv(SegStart, Stop);
725 // Interference is after the last use.
726 DEBUG(dbgs() << " after last use.\n");
727 SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
728 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
731 // Now all defs leading to live bundles are handled, do everything else.
732 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
733 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
734 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
735 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
737 // Is the register live-in?
738 if (!BI.LiveIn || !RegIn)
741 // We have an incoming register. Check for interference.
742 SlotIndex Start, Stop;
743 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
744 Intf.moveToBlock(BI.MBB->getNumber());
745 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
746 << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
747 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
750 // Check interference entering the block.
751 if (!Intf.hasInterference()) {
752 // Block is interference-free.
753 DEBUG(dbgs() << ", no interference");
754 if (!BI.LiveThrough) {
755 DEBUG(dbgs() << ", killed in block.\n");
756 SE->useIntv(Start, SE->leaveIntvAfter(BI.Kill));
760 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
761 // Block is live-through, but exit bundle is on the stack.
762 // Spill immediately after the last use.
763 if (BI.LastUse < LastSplitPoint) {
764 DEBUG(dbgs() << ", uses, stack-out.\n");
765 SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
768 // The last use is after the last split point, it is probably an
770 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
771 << LastSplitPoint << ", stack-out.\n");
772 SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
773 SE->useIntv(Start, SegEnd);
774 // Run a double interval from the split to the last use.
775 // This makes it possible to spill the complement without affecting the
777 SE->overlapIntv(SegEnd, BI.LastUse);
780 // Register is live-through.
781 DEBUG(dbgs() << ", uses, live-through.\n");
782 SE->useIntv(Start, Stop);
786 // Block has interference.
787 DEBUG(dbgs() << ", interference from " << Intf.first());
789 if (!BI.LiveThrough && Intf.first() >= BI.Kill) {
790 // The interference doesn't reach the outgoing segment.
791 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
792 SE->useIntv(Start, BI.Kill);
796 if (Intf.first().getBaseIndex() > BI.FirstUse) {
797 // There are interference-free uses at the beginning of the block.
798 // Find the last use that can get the register.
799 SmallVectorImpl<SlotIndex>::const_iterator UI =
800 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
801 Intf.first().getBaseIndex());
802 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
803 SlotIndex Use = (--UI)->getBoundaryIndex();
804 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
805 SlotIndex SegEnd = SE->leaveIntvAfter(Use);
806 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
807 SE->useIntv(Start, SegEnd);
811 // Interference is before the first use.
812 DEBUG(dbgs() << " before first use.\n");
813 SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
814 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
817 // Handle live-through blocks.
818 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
819 E = ActiveThroughBlocks.end(); I != E; ++I) {
820 unsigned Number = *I;
821 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
822 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
823 DEBUG(dbgs() << "Live through BB#" << Number << '\n');
824 if (RegIn && RegOut) {
825 Intf.moveToBlock(Number);
826 if (!Intf.hasInterference()) {
827 SE->useIntv(Indexes->getMBBStartIdx(Number),
828 Indexes->getMBBEndIdx(Number));
832 MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
834 SE->leaveIntvAtTop(*MBB);
836 SE->enterIntvAtEnd(*MBB);
841 // FIXME: Should we be more aggressive about splitting the stack region into
842 // per-block segments? The current approach allows the stack region to
843 // separate into connected components. Some components may be allocatable.
848 MF->verify(this, "After splitting live range around region");
851 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
852 SmallVectorImpl<LiveInterval*> &NewVRegs) {
853 BitVector LiveBundles, BestBundles;
855 unsigned BestReg = 0;
856 ActiveThroughBlocks.clear();
859 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
860 if (GlobalCand.size() <= Cand)
861 GlobalCand.resize(Cand+1);
862 GlobalCand[Cand].PhysReg = PhysReg;
864 SpillPlacer->prepare(LiveBundles);
866 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
867 if (!addSplitConstraints(Intf, Cost)) {
868 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
871 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
872 if (BestReg && Cost >= BestCost) {
873 DEBUG(dbgs() << " worse than " << PrintReg(BestReg, TRI) << '\n');
878 SpillPlacer->finish();
880 // No live bundles, defer to splitSingleBlocks().
881 if (!LiveBundles.any()) {
882 DEBUG(dbgs() << " no bundles.\n");
886 Cost += calcGlobalSplitCost(PhysReg, LiveBundles);
888 dbgs() << ", total = " << Cost << " with bundles";
889 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
890 dbgs() << " EB#" << i;
893 if (!BestReg || Cost < BestCost) {
895 BestCost = 0.98f * Cost; // Prevent rounding effects.
896 BestBundles.swap(LiveBundles);
903 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
904 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
909 //===----------------------------------------------------------------------===//
911 //===----------------------------------------------------------------------===//
914 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
915 /// in order to use PhysReg between two entries in SA->UseSlots.
917 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
919 void RAGreedy::calcGapWeights(unsigned PhysReg,
920 SmallVectorImpl<float> &GapWeight) {
921 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
922 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
923 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
924 const unsigned NumGaps = Uses.size()-1;
926 // Start and end points for the interference check.
927 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
928 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
930 GapWeight.assign(NumGaps, 0.0f);
932 // Add interference from each overlapping register.
933 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
934 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
935 .checkInterference())
938 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
939 // so we don't need InterferenceQuery.
941 // Interference that overlaps an instruction is counted in both gaps
942 // surrounding the instruction. The exception is interference before
943 // StartIdx and after StopIdx.
945 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
946 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
947 // Skip the gaps before IntI.
948 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
949 if (++Gap == NumGaps)
954 // Update the gaps covered by IntI.
955 const float weight = IntI.value()->weight;
956 for (; Gap != NumGaps; ++Gap) {
957 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
958 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
967 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
968 /// before MI that has a slot index. If MI is the first mapped instruction in
969 /// its block, return the block start index instead.
971 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
972 assert(MI && "Missing MachineInstr");
973 const MachineBasicBlock *MBB = MI->getParent();
974 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
976 if (!(--I)->isDebugValue() && !I->isCopy())
977 return Indexes->getInstructionIndex(I);
978 return Indexes->getMBBStartIdx(MBB);
981 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
982 /// real non-copy instruction for each instruction in SA->UseSlots.
984 void RAGreedy::calcPrevSlots() {
985 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
987 PrevSlot.reserve(Uses.size());
988 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
989 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
990 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
994 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
995 /// be beneficial to split before UseSlots[i].
997 /// 0 is always a valid split point
998 unsigned RAGreedy::nextSplitPoint(unsigned i) {
999 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1000 const unsigned Size = Uses.size();
1001 assert(i != Size && "No split points after the end");
1002 // Allow split before i when Uses[i] is not adjacent to the previous use.
1003 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1008 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1011 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1012 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1013 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1014 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1016 // Note that it is possible to have an interval that is live-in or live-out
1017 // while only covering a single block - A phi-def can use undef values from
1018 // predecessors, and the block could be a single-block loop.
1019 // We don't bother doing anything clever about such a case, we simply assume
1020 // that the interval is continuous from FirstUse to LastUse. We should make
1021 // sure that we don't do anything illegal to such an interval, though.
1023 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1024 if (Uses.size() <= 2)
1026 const unsigned NumGaps = Uses.size()-1;
1029 dbgs() << "tryLocalSplit: ";
1030 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1031 dbgs() << ' ' << SA->UseSlots[i];
1035 // For every use, find the previous mapped non-copy instruction.
1036 // We use this to detect valid split points, and to estimate new interval
1040 unsigned BestBefore = NumGaps;
1041 unsigned BestAfter = 0;
1044 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1045 SmallVector<float, 8> GapWeight;
1048 while (unsigned PhysReg = Order.next()) {
1049 // Keep track of the largest spill weight that would need to be evicted in
1050 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1051 calcGapWeights(PhysReg, GapWeight);
1053 // Try to find the best sequence of gaps to close.
1054 // The new spill weight must be larger than any gap interference.
1056 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1057 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1059 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1060 // It is the spill weight that needs to be evicted.
1061 float MaxGap = GapWeight[0];
1062 for (unsigned i = 1; i != SplitAfter; ++i)
1063 MaxGap = std::max(MaxGap, GapWeight[i]);
1066 // Live before/after split?
1067 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1068 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1070 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1071 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1072 << " i=" << MaxGap);
1074 // Stop before the interval gets so big we wouldn't be making progress.
1075 if (!LiveBefore && !LiveAfter) {
1076 DEBUG(dbgs() << " all\n");
1079 // Should the interval be extended or shrunk?
1081 if (MaxGap < HUGE_VALF) {
1082 // Estimate the new spill weight.
1084 // Each instruction reads and writes the register, except the first
1085 // instr doesn't read when !FirstLive, and the last instr doesn't write
1088 // We will be inserting copies before and after, so the total number of
1089 // reads and writes is 2 * EstUses.
1091 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1092 2*(LiveBefore + LiveAfter);
1094 // Try to guess the size of the new interval. This should be trivial,
1095 // but the slot index of an inserted copy can be a lot smaller than the
1096 // instruction it is inserted before if there are many dead indexes
1099 // We measure the distance from the instruction before SplitBefore to
1100 // get a conservative estimate.
1102 // The final distance can still be different if inserting copies
1103 // triggers a slot index renumbering.
1105 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1106 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1107 // Would this split be possible to allocate?
1108 // Never allocate all gaps, we wouldn't be making progress.
1109 float Diff = EstWeight - MaxGap;
1110 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1113 if (Diff > BestDiff) {
1114 DEBUG(dbgs() << " (best)");
1116 BestBefore = SplitBefore;
1117 BestAfter = SplitAfter;
1124 SplitBefore = nextSplitPoint(SplitBefore);
1125 if (SplitBefore < SplitAfter) {
1126 DEBUG(dbgs() << " shrink\n");
1127 // Recompute the max when necessary.
1128 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1129 MaxGap = GapWeight[SplitBefore];
1130 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1131 MaxGap = std::max(MaxGap, GapWeight[i]);
1138 // Try to extend the interval.
1139 if (SplitAfter >= NumGaps) {
1140 DEBUG(dbgs() << " end\n");
1144 DEBUG(dbgs() << " extend\n");
1145 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1146 SplitAfter != e; ++SplitAfter)
1147 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1152 // Didn't find any candidates?
1153 if (BestBefore == NumGaps)
1156 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1157 << '-' << Uses[BestAfter] << ", " << BestDiff
1158 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1160 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1164 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1165 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1166 SE->useIntv(SegStart, SegStop);
1169 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
1175 //===----------------------------------------------------------------------===//
1176 // Live Range Splitting
1177 //===----------------------------------------------------------------------===//
1179 /// trySplit - Try to split VirtReg or one of its interferences, making it
1181 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1182 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1183 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1184 // Local intervals are handled separately.
1185 if (LIS->intervalIsInOneMBB(VirtReg)) {
1186 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1187 SA->analyze(&VirtReg);
1188 return tryLocalSplit(VirtReg, Order, NewVRegs);
1191 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1193 // Don't iterate global splitting.
1194 // Move straight to spilling if this range was produced by a global split.
1195 if (getStage(VirtReg) >= RS_Global)
1198 SA->analyze(&VirtReg);
1200 // First try to split around a region spanning multiple blocks.
1201 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1202 if (PhysReg || !NewVRegs.empty())
1205 // Then isolate blocks with multiple uses.
1206 SplitAnalysis::BlockPtrSet Blocks;
1207 if (SA->getMultiUseBlocks(Blocks)) {
1208 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1210 SE->splitSingleBlocks(Blocks);
1211 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
1213 MF->verify(this, "After splitting live range around basic blocks");
1216 // Don't assign any physregs.
1221 //===----------------------------------------------------------------------===//
1223 //===----------------------------------------------------------------------===//
1225 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1226 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1227 // First try assigning a free register.
1228 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1229 while (unsigned PhysReg = Order.next()) {
1230 if (!checkPhysRegInterference(VirtReg, PhysReg))
1234 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1237 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1239 // The first time we see a live range, don't try to split or spill.
1240 // Wait until the second time, when all smaller ranges have been allocated.
1241 // This gives a better picture of the interference to split around.
1242 LiveRangeStage Stage = getStage(VirtReg);
1243 if (Stage == RS_First) {
1244 LRStage[VirtReg.reg] = RS_Second;
1245 DEBUG(dbgs() << "wait for second round\n");
1246 NewVRegs.push_back(&VirtReg);
1250 assert(Stage < RS_Spill && "Cannot allocate after spilling");
1252 // Try splitting VirtReg or interferences.
1253 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1254 if (PhysReg || !NewVRegs.empty())
1257 // Finally spill VirtReg itself.
1258 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1259 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1260 spiller().spill(LRE);
1261 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1264 MF->verify(this, "After spilling");
1266 // The live virtual register requesting allocation was spilled, so tell
1267 // the caller not to allocate anything during this round.
1271 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1272 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1273 << "********** Function: "
1274 << ((Value*)mf.getFunction())->getName() << '\n');
1278 MF->verify(this, "Before greedy register allocator");
1280 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1281 Indexes = &getAnalysis<SlotIndexes>();
1282 DomTree = &getAnalysis<MachineDominatorTree>();
1283 ReservedRegs = TRI->getReservedRegs(*MF);
1284 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1285 Loops = &getAnalysis<MachineLoopInfo>();
1286 LoopRanges = &getAnalysis<MachineLoopRanges>();
1287 Bundles = &getAnalysis<EdgeBundles>();
1288 SpillPlacer = &getAnalysis<SpillPlacement>();
1290 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1291 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1293 LRStage.resize(MRI->getNumVirtRegs());
1294 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1298 LIS->addKillFlags();
1302 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1303 VRM->rewrite(Indexes);
1306 // Write out new DBG_VALUE instructions.
1307 getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
1309 // The pass output is in VirtRegMap. Release all the transient data.