1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Function.h"
28 #include "llvm/PassAnalysisSupport.h"
29 #include "llvm/CodeGen/CalcSpillWeights.h"
30 #include "llvm/CodeGen/EdgeBundles.h"
31 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
32 #include "llvm/CodeGen/LiveStackAnalysis.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineLoopRanges.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/CodeGen/RegAllocRegistry.h"
40 #include "llvm/CodeGen/RegisterCoalescer.h"
41 #include "llvm/Target/TargetOptions.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Support/Timer.h"
51 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
52 STATISTIC(NumLocalSplits, "Number of split local live ranges");
53 STATISTIC(NumEvicted, "Number of interferences evicted");
55 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56 createGreedyRegisterAllocator);
59 class RAGreedy : public MachineFunctionPass,
61 private LiveRangeEdit::Delegate {
69 MachineDominatorTree *DomTree;
70 MachineLoopInfo *Loops;
71 MachineLoopRanges *LoopRanges;
73 SpillPlacement *SpillPlacer;
74 LiveDebugVariables *DebugVars;
77 std::auto_ptr<Spiller> SpillerInstance;
78 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
80 // Live ranges pass through a number of stages as we try to allocate them.
81 // Some of the stages may also create new live ranges:
83 // - Region splitting.
84 // - Per-block splitting.
88 // Ranges produced by one of the stages skip the previous stages when they are
89 // dequeued. This improves performance because we can skip interference checks
90 // that are unlikely to give any results. It also guarantees that the live
91 // range splitting algorithm terminates, something that is otherwise hard to
94 RS_New, ///< Never seen before.
95 RS_First, ///< First time in the queue.
96 RS_Second, ///< Second time in the queue.
97 RS_Global, ///< Produced by global splitting.
98 RS_Local, ///< Produced by local splitting.
99 RS_Spill ///< Produced by spilling.
102 static const char *const StageName[];
104 IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
106 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
107 return LiveRangeStage(LRStage[VirtReg.reg]);
110 template<typename Iterator>
111 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
112 LRStage.resize(MRI->getNumVirtRegs());
113 for (;Begin != End; ++Begin) {
114 unsigned Reg = (*Begin)->reg;
115 if (LRStage[Reg] == RS_New)
116 LRStage[Reg] = NewStage;
120 // Eviction. Sometimes an assigned live range can be evicted without
121 // conditions, but other times it must be split after being evicted to avoid
124 CE_Never, ///< Can never evict.
125 CE_Always, ///< Can always evict.
126 CE_WithSplit ///< Can evict only if range is also split or spilled.
130 std::auto_ptr<SplitAnalysis> SA;
131 std::auto_ptr<SplitEditor> SE;
133 /// Cached per-block interference maps
134 InterferenceCache IntfCache;
136 /// All basic blocks where the current register has uses.
137 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
139 /// Global live range splitting candidate info.
140 struct GlobalSplitCandidate {
142 BitVector LiveBundles;
143 SmallVector<unsigned, 8> ActiveBlocks;
145 void reset(unsigned Reg) {
148 ActiveBlocks.clear();
152 /// Candidate info for for each PhysReg in AllocationOrder.
153 /// This vector never shrinks, but grows to the size of the largest register
155 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
157 /// For every instruction in SA->UseSlots, store the previous non-copy
159 SmallVector<SlotIndex, 8> PrevSlot;
164 /// Return the pass name.
165 virtual const char* getPassName() const {
166 return "Greedy Register Allocator";
169 /// RAGreedy analysis usage.
170 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
171 virtual void releaseMemory();
172 virtual Spiller &spiller() { return *SpillerInstance; }
173 virtual void enqueue(LiveInterval *LI);
174 virtual LiveInterval *dequeue();
175 virtual unsigned selectOrSplit(LiveInterval&,
176 SmallVectorImpl<LiveInterval*>&);
178 /// Perform register allocation.
179 virtual bool runOnMachineFunction(MachineFunction &mf);
184 void LRE_WillEraseInstruction(MachineInstr*);
185 bool LRE_CanEraseVirtReg(unsigned);
186 void LRE_WillShrinkVirtReg(unsigned);
187 void LRE_DidCloneVirtReg(unsigned, unsigned);
189 float calcSpillCost();
190 bool addSplitConstraints(InterferenceCache::Cursor, float&);
191 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
192 void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor);
193 float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor);
194 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
195 SmallVectorImpl<LiveInterval*>&);
196 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
197 SlotIndex getPrevMappedIndex(const MachineInstr*);
198 void calcPrevSlots();
199 unsigned nextSplitPoint(unsigned);
200 CanEvict canEvict(LiveInterval &A, LiveInterval &B);
201 bool canEvictInterference(LiveInterval&, unsigned, float&);
203 unsigned tryAssign(LiveInterval&, AllocationOrder&,
204 SmallVectorImpl<LiveInterval*>&);
205 unsigned tryEvict(LiveInterval&, AllocationOrder&,
206 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
207 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
208 SmallVectorImpl<LiveInterval*>&);
209 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
210 SmallVectorImpl<LiveInterval*>&);
211 unsigned trySplit(LiveInterval&, AllocationOrder&,
212 SmallVectorImpl<LiveInterval*>&);
214 } // end anonymous namespace
216 char RAGreedy::ID = 0;
219 const char *const RAGreedy::StageName[] = {
229 // Hysteresis to use when comparing floats.
230 // This helps stabilize decisions based on float comparisons.
231 const float Hysteresis = 0.98f;
234 FunctionPass* llvm::createGreedyRegisterAllocator() {
235 return new RAGreedy();
238 RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
239 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
240 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
241 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
242 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
243 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
244 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
245 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
246 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
247 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
248 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
249 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
250 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
251 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
252 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
255 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
256 AU.setPreservesCFG();
257 AU.addRequired<AliasAnalysis>();
258 AU.addPreserved<AliasAnalysis>();
259 AU.addRequired<LiveIntervals>();
260 AU.addRequired<SlotIndexes>();
261 AU.addPreserved<SlotIndexes>();
262 AU.addRequired<LiveDebugVariables>();
263 AU.addPreserved<LiveDebugVariables>();
265 AU.addRequiredID(StrongPHIEliminationID);
266 AU.addRequiredTransitive<RegisterCoalescer>();
267 AU.addRequired<CalculateSpillWeights>();
268 AU.addRequired<LiveStacks>();
269 AU.addPreserved<LiveStacks>();
270 AU.addRequired<MachineDominatorTree>();
271 AU.addPreserved<MachineDominatorTree>();
272 AU.addRequired<MachineLoopInfo>();
273 AU.addPreserved<MachineLoopInfo>();
274 AU.addRequired<MachineLoopRanges>();
275 AU.addPreserved<MachineLoopRanges>();
276 AU.addRequired<VirtRegMap>();
277 AU.addPreserved<VirtRegMap>();
278 AU.addRequired<EdgeBundles>();
279 AU.addRequired<SpillPlacement>();
280 MachineFunctionPass::getAnalysisUsage(AU);
284 //===----------------------------------------------------------------------===//
285 // LiveRangeEdit delegate methods
286 //===----------------------------------------------------------------------===//
288 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
289 // LRE itself will remove from SlotIndexes and parent basic block.
290 VRM->RemoveMachineInstrFromMaps(MI);
293 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
294 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
295 unassign(LIS->getInterval(VirtReg), PhysReg);
298 // Unassigned virtreg is probably in the priority queue.
299 // RegAllocBase will erase it after dequeueing.
303 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
304 unsigned PhysReg = VRM->getPhys(VirtReg);
308 // Register is assigned, put it back on the queue for reassignment.
309 LiveInterval &LI = LIS->getInterval(VirtReg);
310 unassign(LI, PhysReg);
314 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
315 // LRE may clone a virtual register because dead code elimination causes it to
316 // be split into connected components. Ensure that the new register gets the
317 // same stage as the parent.
319 LRStage[New] = LRStage[Old];
322 void RAGreedy::releaseMemory() {
323 SpillerInstance.reset(0);
326 RegAllocBase::releaseMemory();
329 void RAGreedy::enqueue(LiveInterval *LI) {
330 // Prioritize live ranges by size, assigning larger ranges first.
331 // The queue holds (size, reg) pairs.
332 const unsigned Size = LI->getSize();
333 const unsigned Reg = LI->reg;
334 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
335 "Can only enqueue virtual registers");
339 if (LRStage[Reg] == RS_New)
340 LRStage[Reg] = RS_First;
342 if (LRStage[Reg] == RS_Second)
343 // Unsplit ranges that couldn't be allocated immediately are deferred until
344 // everything else has been allocated. Long ranges are allocated last so
345 // they are split against realistic interference.
346 Prio = (1u << 31) - Size;
348 // Everything else is allocated in long->short order. Long ranges that don't
349 // fit should be spilled ASAP so they don't create interference.
350 Prio = (1u << 31) + Size;
352 // Boost ranges that have a physical register hint.
353 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
357 Queue.push(std::make_pair(Prio, Reg));
360 LiveInterval *RAGreedy::dequeue() {
363 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
369 //===----------------------------------------------------------------------===//
371 //===----------------------------------------------------------------------===//
373 /// tryAssign - Try to assign VirtReg to an available register.
374 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
375 AllocationOrder &Order,
376 SmallVectorImpl<LiveInterval*> &NewVRegs) {
379 while ((PhysReg = Order.next()))
380 if (!checkPhysRegInterference(VirtReg, PhysReg))
382 if (!PhysReg || Order.isHint(PhysReg))
385 // PhysReg is available. Try to evict interference from a cheaper alternative.
386 unsigned Cost = TRI->getCostPerUse(PhysReg);
388 // Most registers have 0 additional cost.
392 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
394 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
395 return CheapReg ? CheapReg : PhysReg;
399 //===----------------------------------------------------------------------===//
400 // Interference eviction
401 //===----------------------------------------------------------------------===//
403 /// canEvict - determine if A can evict the assigned live range B. The eviction
404 /// policy defined by this function together with the allocation order defined
405 /// by enqueue() decides which registers ultimately end up being split and
408 /// This function must define a non-circular relation when it returns CE_Always,
409 /// otherwise infinite eviction loops are possible. When evicting a <= RS_Second
410 /// range, it is possible to return CE_WithSplit which forces the evicted
411 /// register to be split or spilled before it can evict anything again. That
412 /// guarantees progress.
413 RAGreedy::CanEvict RAGreedy::canEvict(LiveInterval &A, LiveInterval &B) {
414 return A.weight > B.weight ? CE_Always : CE_Never;
417 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
419 /// Return false if any interference is heavier than MaxWeight.
420 /// On return, set MaxWeight to the maximal spill weight of an interference.
421 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
424 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
425 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
426 // If there is 10 or more interferences, chances are one is heavier.
427 if (Q.collectInterferingVRegs(10, MaxWeight) >= 10)
430 // Check if any interfering live range is heavier than MaxWeight.
431 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
432 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
433 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
435 if (Intf->weight >= MaxWeight)
437 switch (canEvict(VirtReg, *Intf)) {
443 if (getStage(*Intf) > RS_Second)
447 Weight = std::max(Weight, Intf->weight);
454 /// tryEvict - Try to evict all interferences for a physreg.
455 /// @param VirtReg Currently unassigned virtual register.
456 /// @param Order Physregs to try.
457 /// @return Physreg to assign VirtReg, or 0.
458 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
459 AllocationOrder &Order,
460 SmallVectorImpl<LiveInterval*> &NewVRegs,
461 unsigned CostPerUseLimit) {
462 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
464 // Keep track of the lightest single interference seen so far.
465 float BestWeight = HUGE_VALF;
466 unsigned BestPhys = 0;
469 while (unsigned PhysReg = Order.next()) {
470 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
472 // The first use of a register in a function has cost 1.
473 if (CostPerUseLimit == 1 && !MRI->isPhysRegUsed(PhysReg))
476 float Weight = BestWeight;
477 if (!canEvictInterference(VirtReg, PhysReg, Weight))
480 // This is an eviction candidate.
481 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = "
483 if (BestPhys && Weight >= BestWeight)
489 // Stop if the hint can be used.
490 if (Order.isHint(PhysReg))
497 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
498 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
499 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
500 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
501 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
502 LiveInterval *Intf = Q.interferingVRegs()[i];
503 unassign(*Intf, VRM->getPhys(Intf->reg));
505 NewVRegs.push_back(Intf);
506 // Prevent looping by forcing the evicted ranges to be split before they
507 // can evict anything else.
508 if (getStage(*Intf) < RS_Second &&
509 canEvict(VirtReg, *Intf) == CE_WithSplit)
510 LRStage[Intf->reg] = RS_Second;
517 //===----------------------------------------------------------------------===//
519 //===----------------------------------------------------------------------===//
521 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
522 /// interference pattern in Physreg and its aliases. Add the constraints to
523 /// SpillPlacement and return the static cost of this split in Cost, assuming
524 /// that all preferences in SplitConstraints are met.
525 /// Return false if there are no bundles with positive bias.
526 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
528 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
530 // Reset interference dependent info.
531 SplitConstraints.resize(UseBlocks.size());
532 float StaticCost = 0;
533 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
534 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
535 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
537 BC.Number = BI.MBB->getNumber();
538 Intf.moveToBlock(BC.Number);
539 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
540 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
542 if (!Intf.hasInterference())
545 // Number of spill code instructions to insert.
548 // Interference for the live-in value.
550 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
551 BC.Entry = SpillPlacement::MustSpill, ++Ins;
552 else if (Intf.first() < BI.FirstUse)
553 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
554 else if (Intf.first() < BI.LastUse)
558 // Interference for the live-out value.
560 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
561 BC.Exit = SpillPlacement::MustSpill, ++Ins;
562 else if (Intf.last() > BI.LastUse)
563 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
564 else if (Intf.last() > BI.FirstUse)
568 // Accumulate the total frequency of inserted spill code.
570 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
574 // Add constraints for use-blocks. Note that these are the only constraints
575 // that may add a positive bias, it is downhill from here.
576 SpillPlacer->addConstraints(SplitConstraints);
577 return SpillPlacer->scanActiveBundles();
581 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
582 /// live-through blocks in Blocks.
583 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
584 ArrayRef<unsigned> Blocks) {
585 const unsigned GroupSize = 8;
586 SpillPlacement::BlockConstraint BCS[GroupSize];
587 unsigned TBS[GroupSize];
588 unsigned B = 0, T = 0;
590 for (unsigned i = 0; i != Blocks.size(); ++i) {
591 unsigned Number = Blocks[i];
592 Intf.moveToBlock(Number);
594 if (!Intf.hasInterference()) {
595 assert(T < GroupSize && "Array overflow");
597 if (++T == GroupSize) {
598 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
604 assert(B < GroupSize && "Array overflow");
605 BCS[B].Number = Number;
607 // Interference for the live-in value.
608 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
609 BCS[B].Entry = SpillPlacement::MustSpill;
611 BCS[B].Entry = SpillPlacement::PrefSpill;
613 // Interference for the live-out value.
614 if (Intf.last() >= SA->getLastSplitPoint(Number))
615 BCS[B].Exit = SpillPlacement::MustSpill;
617 BCS[B].Exit = SpillPlacement::PrefSpill;
619 if (++B == GroupSize) {
620 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
621 SpillPlacer->addConstraints(Array);
626 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
627 SpillPlacer->addConstraints(Array);
628 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
631 void RAGreedy::growRegion(GlobalSplitCandidate &Cand,
632 InterferenceCache::Cursor Intf) {
633 // Keep track of through blocks that have not been added to SpillPlacer.
634 BitVector Todo = SA->getThroughBlocks();
635 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
636 unsigned AddedTo = 0;
638 unsigned Visited = 0;
642 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
643 if (NewBundles.empty())
645 // Find new through blocks in the periphery of PrefRegBundles.
646 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
647 unsigned Bundle = NewBundles[i];
648 // Look at all blocks connected to Bundle in the full graph.
649 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
650 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
653 if (!Todo.test(Block))
656 // This is a new through block. Add it to SpillPlacer later.
657 ActiveBlocks.push_back(Block);
663 // Any new blocks to add?
664 if (ActiveBlocks.size() > AddedTo) {
665 ArrayRef<unsigned> Add(&ActiveBlocks[AddedTo],
666 ActiveBlocks.size() - AddedTo);
667 addThroughConstraints(Intf, Add);
668 AddedTo = ActiveBlocks.size();
670 // Perhaps iterating can enable more bundles?
671 SpillPlacer->iterate();
673 DEBUG(dbgs() << ", v=" << Visited);
676 /// calcSpillCost - Compute how expensive it would be to split the live range in
677 /// SA around all use blocks instead of forming bundle regions.
678 float RAGreedy::calcSpillCost() {
680 const LiveInterval &LI = SA->getParent();
681 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
682 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
683 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
684 unsigned Number = BI.MBB->getNumber();
685 // We normally only need one spill instruction - a load or a store.
686 Cost += SpillPlacer->getBlockFrequency(Number);
688 // Unless the value is redefined in the block.
689 if (BI.LiveIn && BI.LiveOut) {
690 SlotIndex Start, Stop;
691 tie(Start, Stop) = Indexes->getMBBRange(Number);
692 LiveInterval::const_iterator I = LI.find(Start);
693 assert(I != LI.end() && "Expected live-in value");
694 // Is there a different live-out value? If so, we need an extra spill
697 Cost += SpillPlacer->getBlockFrequency(Number);
703 /// calcGlobalSplitCost - Return the global split cost of following the split
704 /// pattern in LiveBundles. This cost should be added to the local cost of the
705 /// interference pattern in SplitConstraints.
707 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
708 InterferenceCache::Cursor Intf) {
709 float GlobalCost = 0;
710 const BitVector &LiveBundles = Cand.LiveBundles;
711 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
712 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
713 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
714 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
715 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
716 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
720 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
722 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
724 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
727 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
728 unsigned Number = Cand.ActiveBlocks[i];
729 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
730 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
731 if (!RegIn && !RegOut)
733 if (RegIn && RegOut) {
734 // We need double spill code if this block has interference.
735 Intf.moveToBlock(Number);
736 if (Intf.hasInterference())
737 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
740 // live-in / stack-out or stack-in live-out.
741 GlobalCost += SpillPlacer->getBlockFrequency(Number);
746 /// splitAroundRegion - Split VirtReg around the region determined by
747 /// LiveBundles. Make an effort to avoid interference from PhysReg.
749 /// The 'register' interval is going to contain as many uses as possible while
750 /// avoiding interference. The 'stack' interval is the complement constructed by
751 /// SplitEditor. It will contain the rest.
753 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
754 GlobalSplitCandidate &Cand,
755 SmallVectorImpl<LiveInterval*> &NewVRegs) {
756 const BitVector &LiveBundles = Cand.LiveBundles;
759 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
761 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
762 dbgs() << " EB#" << i;
766 InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg);
767 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
770 // Create the main cross-block interval.
771 const unsigned MainIntv = SE->openIntv();
773 // First add all defs that are live out of a block.
774 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
775 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
776 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
777 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
778 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
780 // Create separate intervals for isolated blocks with multiple uses.
781 if (!RegIn && !RegOut && BI.FirstUse != BI.LastUse) {
782 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
783 SE->splitSingleBlock(BI);
784 SE->selectIntv(MainIntv);
788 // Should the register be live out?
789 if (!BI.LiveOut || !RegOut)
792 SlotIndex Start, Stop;
793 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
794 Intf.moveToBlock(BI.MBB->getNumber());
795 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
796 << Bundles->getBundle(BI.MBB->getNumber(), 1)
797 << " [" << Start << ';'
798 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
799 << ") intf [" << Intf.first() << ';' << Intf.last() << ')');
801 // The interference interval should either be invalid or overlap MBB.
802 assert((!Intf.hasInterference() || Intf.first() < Stop)
803 && "Bad interference");
804 assert((!Intf.hasInterference() || Intf.last() > Start)
805 && "Bad interference");
807 // Check interference leaving the block.
808 if (!Intf.hasInterference()) {
809 // Block is interference-free.
810 DEBUG(dbgs() << ", no interference");
811 if (!BI.LiveThrough) {
812 DEBUG(dbgs() << ", not live-through.\n");
813 SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
817 // Block is live-through, but entry bundle is on the stack.
818 // Reload just before the first use.
819 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
820 SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
823 DEBUG(dbgs() << ", live-through.\n");
827 // Block has interference.
828 DEBUG(dbgs() << ", interference to " << Intf.last());
830 if (!BI.LiveThrough && Intf.last() <= BI.FirstUse) {
831 // The interference doesn't reach the outgoing segment.
832 DEBUG(dbgs() << " doesn't affect def from " << BI.FirstUse << '\n');
833 SE->useIntv(BI.FirstUse, Stop);
837 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
838 if (Intf.last().getBoundaryIndex() < BI.LastUse) {
839 // There are interference-free uses at the end of the block.
840 // Find the first use that can get the live-out register.
841 SmallVectorImpl<SlotIndex>::const_iterator UI =
842 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
843 Intf.last().getBoundaryIndex());
844 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
846 assert(Use <= BI.LastUse && "Couldn't find last use");
847 // Only attempt a split befroe the last split point.
848 if (Use.getBaseIndex() <= LastSplitPoint) {
849 DEBUG(dbgs() << ", free use at " << Use << ".\n");
850 SlotIndex SegStart = SE->enterIntvBefore(Use);
851 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
852 assert(SegStart < LastSplitPoint && "Impossible split point");
853 SE->useIntv(SegStart, Stop);
858 // Interference is after the last use.
859 DEBUG(dbgs() << " after last use.\n");
860 SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
861 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
864 // Now all defs leading to live bundles are handled, do everything else.
865 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
866 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
867 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
868 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
870 // Is the register live-in?
871 if (!BI.LiveIn || !RegIn)
874 // We have an incoming register. Check for interference.
875 SlotIndex Start, Stop;
876 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
877 Intf.moveToBlock(BI.MBB->getNumber());
878 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
879 << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
880 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
883 // Check interference entering the block.
884 if (!Intf.hasInterference()) {
885 // Block is interference-free.
886 DEBUG(dbgs() << ", no interference");
887 if (!BI.LiveThrough) {
888 DEBUG(dbgs() << ", killed in block.\n");
889 SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
893 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
894 // Block is live-through, but exit bundle is on the stack.
895 // Spill immediately after the last use.
896 if (BI.LastUse < LastSplitPoint) {
897 DEBUG(dbgs() << ", uses, stack-out.\n");
898 SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
901 // The last use is after the last split point, it is probably an
903 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
904 << LastSplitPoint << ", stack-out.\n");
905 SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
906 SE->useIntv(Start, SegEnd);
907 // Run a double interval from the split to the last use.
908 // This makes it possible to spill the complement without affecting the
910 SE->overlapIntv(SegEnd, BI.LastUse);
913 // Register is live-through.
914 DEBUG(dbgs() << ", uses, live-through.\n");
915 SE->useIntv(Start, Stop);
919 // Block has interference.
920 DEBUG(dbgs() << ", interference from " << Intf.first());
922 if (!BI.LiveThrough && Intf.first() >= BI.LastUse) {
923 // The interference doesn't reach the outgoing segment.
924 DEBUG(dbgs() << " doesn't affect kill at " << BI.LastUse << '\n');
925 SE->useIntv(Start, BI.LastUse);
929 if (Intf.first().getBaseIndex() > BI.FirstUse) {
930 // There are interference-free uses at the beginning of the block.
931 // Find the last use that can get the register.
932 SmallVectorImpl<SlotIndex>::const_iterator UI =
933 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
934 Intf.first().getBaseIndex());
935 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
936 SlotIndex Use = (--UI)->getBoundaryIndex();
937 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
938 SlotIndex SegEnd = SE->leaveIntvAfter(Use);
939 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
940 SE->useIntv(Start, SegEnd);
944 // Interference is before the first use.
945 DEBUG(dbgs() << " before first use.\n");
946 SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
947 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
950 // Handle live-through blocks.
951 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
952 unsigned Number = Cand.ActiveBlocks[i];
953 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
954 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
955 DEBUG(dbgs() << "Live through BB#" << Number << '\n');
956 if (RegIn && RegOut) {
957 Intf.moveToBlock(Number);
958 if (!Intf.hasInterference()) {
959 SE->useIntv(Indexes->getMBBStartIdx(Number),
960 Indexes->getMBBEndIdx(Number));
964 MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
966 SE->leaveIntvAtTop(*MBB);
968 SE->enterIntvAtEnd(*MBB);
973 SmallVector<unsigned, 8> IntvMap;
974 SE->finish(&IntvMap);
975 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
977 LRStage.resize(MRI->getNumVirtRegs());
978 unsigned OrigBlocks = SA->getNumLiveBlocks();
980 // Sort out the new intervals created by splitting. We get four kinds:
981 // - Remainder intervals should not be split again.
982 // - Candidate intervals can be assigned to Cand.PhysReg.
983 // - Block-local splits are candidates for local splitting.
984 // - DCE leftovers should go back on the queue.
985 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
986 unsigned Reg = LREdit.get(i)->reg;
988 // Ignore old intervals from DCE.
989 if (LRStage[Reg] != RS_New)
992 // Remainder interval. Don't try splitting again, spill if it doesn't
994 if (IntvMap[i] == 0) {
995 LRStage[Reg] = RS_Global;
999 // Main interval. Allow repeated splitting as long as the number of live
1000 // blocks is strictly decreasing.
1001 if (IntvMap[i] == MainIntv) {
1002 if (SA->countLiveBlocks(LREdit.get(i)) >= OrigBlocks) {
1003 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1004 << " blocks as original.\n");
1005 // Don't allow repeated splitting as a safe guard against looping.
1006 LRStage[Reg] = RS_Global;
1011 // Other intervals are treated as new. This includes local intervals created
1012 // for blocks with multiple uses, and anything created by DCE.
1016 MF->verify(this, "After splitting live range around region");
1019 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1020 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1021 float BestCost = Hysteresis * calcSpillCost();
1022 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1023 const unsigned NoCand = ~0u;
1024 unsigned BestCand = NoCand;
1027 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
1028 if (GlobalCand.size() <= Cand)
1029 GlobalCand.resize(Cand+1);
1030 GlobalCand[Cand].reset(PhysReg);
1032 SpillPlacer->prepare(GlobalCand[Cand].LiveBundles);
1034 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
1035 if (!addSplitConstraints(Intf, Cost)) {
1036 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1039 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1040 if (Cost >= BestCost) {
1042 if (BestCand == NoCand)
1043 dbgs() << " worse than no bundles\n";
1045 dbgs() << " worse than "
1046 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1050 growRegion(GlobalCand[Cand], Intf);
1052 SpillPlacer->finish();
1054 // No live bundles, defer to splitSingleBlocks().
1055 if (!GlobalCand[Cand].LiveBundles.any()) {
1056 DEBUG(dbgs() << " no bundles.\n");
1060 Cost += calcGlobalSplitCost(GlobalCand[Cand], Intf);
1062 dbgs() << ", total = " << Cost << " with bundles";
1063 for (int i = GlobalCand[Cand].LiveBundles.find_first(); i>=0;
1064 i = GlobalCand[Cand].LiveBundles.find_next(i))
1065 dbgs() << " EB#" << i;
1068 if (Cost < BestCost) {
1070 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1074 if (BestCand == NoCand)
1077 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
1082 //===----------------------------------------------------------------------===//
1084 //===----------------------------------------------------------------------===//
1087 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1088 /// in order to use PhysReg between two entries in SA->UseSlots.
1090 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1092 void RAGreedy::calcGapWeights(unsigned PhysReg,
1093 SmallVectorImpl<float> &GapWeight) {
1094 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1095 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1096 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1097 const unsigned NumGaps = Uses.size()-1;
1099 // Start and end points for the interference check.
1100 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
1101 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
1103 GapWeight.assign(NumGaps, 0.0f);
1105 // Add interference from each overlapping register.
1106 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1107 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1108 .checkInterference())
1111 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
1112 // so we don't need InterferenceQuery.
1114 // Interference that overlaps an instruction is counted in both gaps
1115 // surrounding the instruction. The exception is interference before
1116 // StartIdx and after StopIdx.
1118 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
1119 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1120 // Skip the gaps before IntI.
1121 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1122 if (++Gap == NumGaps)
1127 // Update the gaps covered by IntI.
1128 const float weight = IntI.value()->weight;
1129 for (; Gap != NumGaps; ++Gap) {
1130 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1131 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1140 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
1141 /// before MI that has a slot index. If MI is the first mapped instruction in
1142 /// its block, return the block start index instead.
1144 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
1145 assert(MI && "Missing MachineInstr");
1146 const MachineBasicBlock *MBB = MI->getParent();
1147 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
1149 if (!(--I)->isDebugValue() && !I->isCopy())
1150 return Indexes->getInstructionIndex(I);
1151 return Indexes->getMBBStartIdx(MBB);
1154 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
1155 /// real non-copy instruction for each instruction in SA->UseSlots.
1157 void RAGreedy::calcPrevSlots() {
1158 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1160 PrevSlot.reserve(Uses.size());
1161 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
1162 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
1163 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
1167 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
1168 /// be beneficial to split before UseSlots[i].
1170 /// 0 is always a valid split point
1171 unsigned RAGreedy::nextSplitPoint(unsigned i) {
1172 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1173 const unsigned Size = Uses.size();
1174 assert(i != Size && "No split points after the end");
1175 // Allow split before i when Uses[i] is not adjacent to the previous use.
1176 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1181 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1184 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1185 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1186 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1187 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1189 // Note that it is possible to have an interval that is live-in or live-out
1190 // while only covering a single block - A phi-def can use undef values from
1191 // predecessors, and the block could be a single-block loop.
1192 // We don't bother doing anything clever about such a case, we simply assume
1193 // that the interval is continuous from FirstUse to LastUse. We should make
1194 // sure that we don't do anything illegal to such an interval, though.
1196 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1197 if (Uses.size() <= 2)
1199 const unsigned NumGaps = Uses.size()-1;
1202 dbgs() << "tryLocalSplit: ";
1203 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1204 dbgs() << ' ' << SA->UseSlots[i];
1208 // For every use, find the previous mapped non-copy instruction.
1209 // We use this to detect valid split points, and to estimate new interval
1213 unsigned BestBefore = NumGaps;
1214 unsigned BestAfter = 0;
1217 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1218 SmallVector<float, 8> GapWeight;
1221 while (unsigned PhysReg = Order.next()) {
1222 // Keep track of the largest spill weight that would need to be evicted in
1223 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1224 calcGapWeights(PhysReg, GapWeight);
1226 // Try to find the best sequence of gaps to close.
1227 // The new spill weight must be larger than any gap interference.
1229 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1230 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1232 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1233 // It is the spill weight that needs to be evicted.
1234 float MaxGap = GapWeight[0];
1235 for (unsigned i = 1; i != SplitAfter; ++i)
1236 MaxGap = std::max(MaxGap, GapWeight[i]);
1239 // Live before/after split?
1240 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1241 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1243 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1244 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1245 << " i=" << MaxGap);
1247 // Stop before the interval gets so big we wouldn't be making progress.
1248 if (!LiveBefore && !LiveAfter) {
1249 DEBUG(dbgs() << " all\n");
1252 // Should the interval be extended or shrunk?
1254 if (MaxGap < HUGE_VALF) {
1255 // Estimate the new spill weight.
1257 // Each instruction reads and writes the register, except the first
1258 // instr doesn't read when !FirstLive, and the last instr doesn't write
1261 // We will be inserting copies before and after, so the total number of
1262 // reads and writes is 2 * EstUses.
1264 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1265 2*(LiveBefore + LiveAfter);
1267 // Try to guess the size of the new interval. This should be trivial,
1268 // but the slot index of an inserted copy can be a lot smaller than the
1269 // instruction it is inserted before if there are many dead indexes
1272 // We measure the distance from the instruction before SplitBefore to
1273 // get a conservative estimate.
1275 // The final distance can still be different if inserting copies
1276 // triggers a slot index renumbering.
1278 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1279 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1280 // Would this split be possible to allocate?
1281 // Never allocate all gaps, we wouldn't be making progress.
1282 DEBUG(dbgs() << " w=" << EstWeight);
1283 if (EstWeight * Hysteresis >= MaxGap) {
1285 float Diff = EstWeight - MaxGap;
1286 if (Diff > BestDiff) {
1287 DEBUG(dbgs() << " (best)");
1288 BestDiff = Hysteresis * Diff;
1289 BestBefore = SplitBefore;
1290 BestAfter = SplitAfter;
1297 SplitBefore = nextSplitPoint(SplitBefore);
1298 if (SplitBefore < SplitAfter) {
1299 DEBUG(dbgs() << " shrink\n");
1300 // Recompute the max when necessary.
1301 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1302 MaxGap = GapWeight[SplitBefore];
1303 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1304 MaxGap = std::max(MaxGap, GapWeight[i]);
1311 // Try to extend the interval.
1312 if (SplitAfter >= NumGaps) {
1313 DEBUG(dbgs() << " end\n");
1317 DEBUG(dbgs() << " extend\n");
1318 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1319 SplitAfter != e; ++SplitAfter)
1320 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1325 // Didn't find any candidates?
1326 if (BestBefore == NumGaps)
1329 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1330 << '-' << Uses[BestAfter] << ", " << BestDiff
1331 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1333 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1337 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1338 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1339 SE->useIntv(SegStart, SegStop);
1341 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1342 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
1348 //===----------------------------------------------------------------------===//
1349 // Live Range Splitting
1350 //===----------------------------------------------------------------------===//
1352 /// trySplit - Try to split VirtReg or one of its interferences, making it
1354 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1355 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1356 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1357 // Local intervals are handled separately.
1358 if (LIS->intervalIsInOneMBB(VirtReg)) {
1359 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1360 SA->analyze(&VirtReg);
1361 return tryLocalSplit(VirtReg, Order, NewVRegs);
1364 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1366 // Don't iterate global splitting.
1367 // Move straight to spilling if this range was produced by a global split.
1368 if (getStage(VirtReg) >= RS_Global)
1371 SA->analyze(&VirtReg);
1373 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1374 // coalescer. That may cause the range to become allocatable which means that
1375 // tryRegionSplit won't be making progress. This check should be replaced with
1376 // an assertion when the coalescer is fixed.
1377 if (SA->didRepairRange()) {
1378 // VirtReg has changed, so all cached queries are invalid.
1379 invalidateVirtRegs();
1380 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1384 // First try to split around a region spanning multiple blocks.
1385 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1386 if (PhysReg || !NewVRegs.empty())
1389 // Then isolate blocks with multiple uses.
1390 SplitAnalysis::BlockPtrSet Blocks;
1391 if (SA->getMultiUseBlocks(Blocks)) {
1392 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1394 SE->splitSingleBlocks(Blocks);
1395 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
1397 MF->verify(this, "After splitting live range around basic blocks");
1400 // Don't assign any physregs.
1405 //===----------------------------------------------------------------------===//
1407 //===----------------------------------------------------------------------===//
1409 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1410 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1411 // First try assigning a free register.
1412 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1413 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1416 LiveRangeStage Stage = getStage(VirtReg);
1417 DEBUG(dbgs() << StageName[Stage] << '\n');
1419 // Try to evict a less worthy live range, but only for ranges from the primary
1420 // queue. The RS_Second ranges already failed to do this, and they should not
1421 // get a second chance until they have been split.
1422 if (Stage != RS_Second)
1423 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1426 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1428 // The first time we see a live range, don't try to split or spill.
1429 // Wait until the second time, when all smaller ranges have been allocated.
1430 // This gives a better picture of the interference to split around.
1431 if (Stage == RS_First) {
1432 LRStage[VirtReg.reg] = RS_Second;
1433 DEBUG(dbgs() << "wait for second round\n");
1434 NewVRegs.push_back(&VirtReg);
1438 // If we couldn't allocate a register from spilling, there is probably some
1439 // invalid inline assembly. The base class wil report it.
1440 if (Stage >= RS_Spill)
1443 // Try splitting VirtReg or interferences.
1444 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1445 if (PhysReg || !NewVRegs.empty())
1448 // Finally spill VirtReg itself.
1449 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1450 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1451 spiller().spill(LRE);
1452 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1455 MF->verify(this, "After spilling");
1457 // The live virtual register requesting allocation was spilled, so tell
1458 // the caller not to allocate anything during this round.
1462 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1463 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1464 << "********** Function: "
1465 << ((Value*)mf.getFunction())->getName() << '\n');
1469 MF->verify(this, "Before greedy register allocator");
1471 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1472 Indexes = &getAnalysis<SlotIndexes>();
1473 DomTree = &getAnalysis<MachineDominatorTree>();
1474 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1475 Loops = &getAnalysis<MachineLoopInfo>();
1476 LoopRanges = &getAnalysis<MachineLoopRanges>();
1477 Bundles = &getAnalysis<EdgeBundles>();
1478 SpillPlacer = &getAnalysis<SpillPlacement>();
1479 DebugVars = &getAnalysis<LiveDebugVariables>();
1481 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1482 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1484 LRStage.resize(MRI->getNumVirtRegs());
1485 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1489 LIS->addKillFlags();
1493 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1494 VRM->rewrite(Indexes);
1497 // Write out new DBG_VALUE instructions.
1498 DebugVars->emitDebugValues(VRM);
1500 // The pass output is in VirtRegMap. Release all the transient data.