1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "RegisterCoalescer.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Function.h"
29 #include "llvm/PassAnalysisSupport.h"
30 #include "llvm/CodeGen/CalcSpillWeights.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
33 #include "llvm/CodeGen/LiveStackAnalysis.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/CodeGen/RegAllocRegistry.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumEvicted, "Number of interferences evicted");
54 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
55 createGreedyRegisterAllocator);
58 class RAGreedy : public MachineFunctionPass,
60 private LiveRangeEdit::Delegate {
68 MachineDominatorTree *DomTree;
69 MachineLoopInfo *Loops;
71 SpillPlacement *SpillPlacer;
72 LiveDebugVariables *DebugVars;
75 std::auto_ptr<Spiller> SpillerInstance;
76 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
79 // Live ranges pass through a number of stages as we try to allocate them.
80 // Some of the stages may also create new live ranges:
82 // - Region splitting.
83 // - Per-block splitting.
87 // Ranges produced by one of the stages skip the previous stages when they are
88 // dequeued. This improves performance because we can skip interference checks
89 // that are unlikely to give any results. It also guarantees that the live
90 // range splitting algorithm terminates, something that is otherwise hard to
93 RS_New, ///< Never seen before.
94 RS_First, ///< First time in the queue.
95 RS_Second, ///< Second time in the queue.
96 RS_Global, ///< Produced by global splitting.
97 RS_Local, ///< Produced by local splitting.
98 RS_Spill ///< Produced by spilling.
101 static const char *const StageName[];
103 // RegInfo - Keep additional information about each live range.
105 LiveRangeStage Stage;
107 // Cascade - Eviction loop prevention. See canEvictInterference().
110 RegInfo() : Stage(RS_New), Cascade(0) {}
113 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
115 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
116 return ExtraRegInfo[VirtReg.reg].Stage;
119 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
120 ExtraRegInfo.resize(MRI->getNumVirtRegs());
121 ExtraRegInfo[VirtReg.reg].Stage = Stage;
124 template<typename Iterator>
125 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
126 ExtraRegInfo.resize(MRI->getNumVirtRegs());
127 for (;Begin != End; ++Begin) {
128 unsigned Reg = (*Begin)->reg;
129 if (ExtraRegInfo[Reg].Stage == RS_New)
130 ExtraRegInfo[Reg].Stage = NewStage;
134 /// Cost of evicting interference.
135 struct EvictionCost {
136 unsigned BrokenHints; ///< Total number of broken hints.
137 float MaxWeight; ///< Maximum spill weight evicted.
139 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
141 bool operator<(const EvictionCost &O) const {
142 if (BrokenHints != O.BrokenHints)
143 return BrokenHints < O.BrokenHints;
144 return MaxWeight < O.MaxWeight;
149 std::auto_ptr<SplitAnalysis> SA;
150 std::auto_ptr<SplitEditor> SE;
152 /// Cached per-block interference maps
153 InterferenceCache IntfCache;
155 /// All basic blocks where the current register has uses.
156 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
158 /// Global live range splitting candidate info.
159 struct GlobalSplitCandidate {
161 InterferenceCache::Cursor Intf;
162 BitVector LiveBundles;
163 SmallVector<unsigned, 8> ActiveBlocks;
165 void reset(InterferenceCache &Cache, unsigned Reg) {
167 Intf.setPhysReg(Cache, Reg);
169 ActiveBlocks.clear();
173 /// Candidate info for for each PhysReg in AllocationOrder.
174 /// This vector never shrinks, but grows to the size of the largest register
176 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
181 /// Return the pass name.
182 virtual const char* getPassName() const {
183 return "Greedy Register Allocator";
186 /// RAGreedy analysis usage.
187 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
188 virtual void releaseMemory();
189 virtual Spiller &spiller() { return *SpillerInstance; }
190 virtual void enqueue(LiveInterval *LI);
191 virtual LiveInterval *dequeue();
192 virtual unsigned selectOrSplit(LiveInterval&,
193 SmallVectorImpl<LiveInterval*>&);
195 /// Perform register allocation.
196 virtual bool runOnMachineFunction(MachineFunction &mf);
201 void LRE_WillEraseInstruction(MachineInstr*);
202 bool LRE_CanEraseVirtReg(unsigned);
203 void LRE_WillShrinkVirtReg(unsigned);
204 void LRE_DidCloneVirtReg(unsigned, unsigned);
206 float calcSpillCost();
207 bool addSplitConstraints(InterferenceCache::Cursor, float&);
208 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
209 void growRegion(GlobalSplitCandidate &Cand);
210 float calcGlobalSplitCost(GlobalSplitCandidate&);
211 bool calcCompactRegion(GlobalSplitCandidate&);
212 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
213 SmallVectorImpl<LiveInterval*>&);
214 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
215 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
216 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
217 void evictInterference(LiveInterval&, unsigned,
218 SmallVectorImpl<LiveInterval*>&);
220 unsigned tryAssign(LiveInterval&, AllocationOrder&,
221 SmallVectorImpl<LiveInterval*>&);
222 unsigned tryEvict(LiveInterval&, AllocationOrder&,
223 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
224 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
225 SmallVectorImpl<LiveInterval*>&);
226 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
227 SmallVectorImpl<LiveInterval*>&);
228 unsigned trySplit(LiveInterval&, AllocationOrder&,
229 SmallVectorImpl<LiveInterval*>&);
231 } // end anonymous namespace
233 char RAGreedy::ID = 0;
236 const char *const RAGreedy::StageName[] = {
246 // Hysteresis to use when comparing floats.
247 // This helps stabilize decisions based on float comparisons.
248 const float Hysteresis = 0.98f;
251 FunctionPass* llvm::createGreedyRegisterAllocator() {
252 return new RAGreedy();
255 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
256 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
257 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
258 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
259 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
260 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
261 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
262 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
263 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
264 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
265 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
266 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
267 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
268 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
271 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
272 AU.setPreservesCFG();
273 AU.addRequired<AliasAnalysis>();
274 AU.addPreserved<AliasAnalysis>();
275 AU.addRequired<LiveIntervals>();
276 AU.addRequired<SlotIndexes>();
277 AU.addPreserved<SlotIndexes>();
278 AU.addRequired<LiveDebugVariables>();
279 AU.addPreserved<LiveDebugVariables>();
281 AU.addRequiredID(StrongPHIEliminationID);
282 AU.addRequiredTransitive<RegisterCoalescer>();
283 AU.addRequired<CalculateSpillWeights>();
284 AU.addRequired<LiveStacks>();
285 AU.addPreserved<LiveStacks>();
286 AU.addRequired<MachineDominatorTree>();
287 AU.addPreserved<MachineDominatorTree>();
288 AU.addRequired<MachineLoopInfo>();
289 AU.addPreserved<MachineLoopInfo>();
290 AU.addRequired<VirtRegMap>();
291 AU.addPreserved<VirtRegMap>();
292 AU.addRequired<EdgeBundles>();
293 AU.addRequired<SpillPlacement>();
294 MachineFunctionPass::getAnalysisUsage(AU);
298 //===----------------------------------------------------------------------===//
299 // LiveRangeEdit delegate methods
300 //===----------------------------------------------------------------------===//
302 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
303 // LRE itself will remove from SlotIndexes and parent basic block.
304 VRM->RemoveMachineInstrFromMaps(MI);
307 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
308 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
309 unassign(LIS->getInterval(VirtReg), PhysReg);
312 // Unassigned virtreg is probably in the priority queue.
313 // RegAllocBase will erase it after dequeueing.
317 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
318 unsigned PhysReg = VRM->getPhys(VirtReg);
322 // Register is assigned, put it back on the queue for reassignment.
323 LiveInterval &LI = LIS->getInterval(VirtReg);
324 unassign(LI, PhysReg);
328 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
329 // LRE may clone a virtual register because dead code elimination causes it to
330 // be split into connected components. Ensure that the new register gets the
331 // same stage as the parent.
332 ExtraRegInfo.grow(New);
333 ExtraRegInfo[New] = ExtraRegInfo[Old];
336 void RAGreedy::releaseMemory() {
337 SpillerInstance.reset(0);
338 ExtraRegInfo.clear();
340 RegAllocBase::releaseMemory();
343 void RAGreedy::enqueue(LiveInterval *LI) {
344 // Prioritize live ranges by size, assigning larger ranges first.
345 // The queue holds (size, reg) pairs.
346 const unsigned Size = LI->getSize();
347 const unsigned Reg = LI->reg;
348 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
349 "Can only enqueue virtual registers");
352 ExtraRegInfo.grow(Reg);
353 if (ExtraRegInfo[Reg].Stage == RS_New)
354 ExtraRegInfo[Reg].Stage = RS_First;
356 if (ExtraRegInfo[Reg].Stage == RS_Second)
357 // Unsplit ranges that couldn't be allocated immediately are deferred until
358 // everything else has been allocated. Long ranges are allocated last so
359 // they are split against realistic interference.
360 Prio = (1u << 31) - Size;
362 // Everything else is allocated in long->short order. Long ranges that don't
363 // fit should be spilled ASAP so they don't create interference.
364 Prio = (1u << 31) + Size;
366 // Boost ranges that have a physical register hint.
367 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
371 Queue.push(std::make_pair(Prio, Reg));
374 LiveInterval *RAGreedy::dequeue() {
377 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
383 //===----------------------------------------------------------------------===//
385 //===----------------------------------------------------------------------===//
387 /// tryAssign - Try to assign VirtReg to an available register.
388 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
389 AllocationOrder &Order,
390 SmallVectorImpl<LiveInterval*> &NewVRegs) {
393 while ((PhysReg = Order.next()))
394 if (!checkPhysRegInterference(VirtReg, PhysReg))
396 if (!PhysReg || Order.isHint(PhysReg))
399 // PhysReg is available, but there may be a better choice.
401 // If we missed a simple hint, try to cheaply evict interference from the
402 // preferred register.
403 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
404 if (Order.isHint(Hint)) {
405 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
406 EvictionCost MaxCost(1);
407 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
408 evictInterference(VirtReg, Hint, NewVRegs);
413 // Try to evict interference from a cheaper alternative.
414 unsigned Cost = TRI->getCostPerUse(PhysReg);
416 // Most registers have 0 additional cost.
420 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
422 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
423 return CheapReg ? CheapReg : PhysReg;
427 //===----------------------------------------------------------------------===//
428 // Interference eviction
429 //===----------------------------------------------------------------------===//
431 /// shouldEvict - determine if A should evict the assigned live range B. The
432 /// eviction policy defined by this function together with the allocation order
433 /// defined by enqueue() decides which registers ultimately end up being split
436 /// Cascade numbers are used to prevent infinite loops if this function is a
439 /// @param A The live range to be assigned.
440 /// @param IsHint True when A is about to be assigned to its preferred
442 /// @param B The live range to be evicted.
443 /// @param BreaksHint True when B is already assigned to its preferred register.
444 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
445 LiveInterval &B, bool BreaksHint) {
446 bool CanSplit = getStage(B) <= RS_Second;
448 // Be fairly aggressive about following hints as long as the evictee can be
450 if (CanSplit && IsHint && !BreaksHint)
453 return A.weight > B.weight;
456 /// canEvictInterference - Return true if all interferences between VirtReg and
457 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
459 /// @param VirtReg Live range that is about to be assigned.
460 /// @param PhysReg Desired register for assignment.
461 /// @prarm IsHint True when PhysReg is VirtReg's preferred register.
462 /// @param MaxCost Only look for cheaper candidates and update with new cost
463 /// when returning true.
464 /// @returns True when interference can be evicted cheaper than MaxCost.
465 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
466 bool IsHint, EvictionCost &MaxCost) {
467 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
468 // involved in an eviction before. If a cascade number was assigned, deny
469 // evicting anything with the same or a newer cascade number. This prevents
470 // infinite eviction loops.
472 // This works out so a register without a cascade number is allowed to evict
473 // anything, and it can be evicted by anything.
474 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
476 Cascade = NextCascade;
479 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
480 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
481 // If there is 10 or more interferences, chances are one is heavier.
482 if (Q.collectInterferingVRegs(10) >= 10)
485 // Check if any interfering live range is heavier than MaxWeight.
486 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
487 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
488 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
490 // Never evict spill products. They cannot split or spill.
491 if (getStage(*Intf) == RS_Spill)
493 // Once a live range becomes small enough, it is urgent that we find a
494 // register for it. This is indicated by an infinite spill weight. These
495 // urgent live ranges get to evict almost anything.
496 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable();
497 // Only evict older cascades or live ranges without a cascade.
498 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
499 if (Cascade <= IntfCascade) {
502 // We permit breaking cascades for urgent evictions. It should be the
503 // last resort, though, so make it really expensive.
504 Cost.BrokenHints += 10;
506 // Would this break a satisfied hint?
507 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
508 // Update eviction cost.
509 Cost.BrokenHints += BreaksHint;
510 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
511 // Abort if this would be too expensive.
512 if (!(Cost < MaxCost))
514 // Finally, apply the eviction policy for non-urgent evictions.
515 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
523 /// evictInterference - Evict any interferring registers that prevent VirtReg
524 /// from being assigned to Physreg. This assumes that canEvictInterference
526 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
527 SmallVectorImpl<LiveInterval*> &NewVRegs) {
528 // Make sure that VirtReg has a cascade number, and assign that cascade
529 // number to every evicted register. These live ranges than then only be
530 // evicted by a newer cascade, preventing infinite loops.
531 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
533 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
535 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
536 << " interference: Cascade " << Cascade << '\n');
537 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
538 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
539 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
540 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
541 LiveInterval *Intf = Q.interferingVRegs()[i];
542 unassign(*Intf, VRM->getPhys(Intf->reg));
543 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
544 VirtReg.isSpillable() < Intf->isSpillable()) &&
545 "Cannot decrease cascade number, illegal eviction");
546 ExtraRegInfo[Intf->reg].Cascade = Cascade;
548 NewVRegs.push_back(Intf);
553 /// tryEvict - Try to evict all interferences for a physreg.
554 /// @param VirtReg Currently unassigned virtual register.
555 /// @param Order Physregs to try.
556 /// @return Physreg to assign VirtReg, or 0.
557 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
558 AllocationOrder &Order,
559 SmallVectorImpl<LiveInterval*> &NewVRegs,
560 unsigned CostPerUseLimit) {
561 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
563 // Keep track of the cheapest interference seen so far.
564 EvictionCost BestCost(~0u);
565 unsigned BestPhys = 0;
567 // When we are just looking for a reduced cost per use, don't break any
568 // hints, and only evict smaller spill weights.
569 if (CostPerUseLimit < ~0u) {
570 BestCost.BrokenHints = 0;
571 BestCost.MaxWeight = VirtReg.weight;
575 while (unsigned PhysReg = Order.next()) {
576 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
578 // The first use of a callee-saved register in a function has cost 1.
579 // Don't start using a CSR when the CostPerUseLimit is low.
580 if (CostPerUseLimit == 1)
581 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
582 if (!MRI->isPhysRegUsed(CSR)) {
583 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
584 << PrintReg(CSR, TRI) << '\n');
588 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
594 // Stop if the hint can be used.
595 if (Order.isHint(PhysReg))
602 evictInterference(VirtReg, BestPhys, NewVRegs);
607 //===----------------------------------------------------------------------===//
609 //===----------------------------------------------------------------------===//
611 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
612 /// interference pattern in Physreg and its aliases. Add the constraints to
613 /// SpillPlacement and return the static cost of this split in Cost, assuming
614 /// that all preferences in SplitConstraints are met.
615 /// Return false if there are no bundles with positive bias.
616 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
618 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
620 // Reset interference dependent info.
621 SplitConstraints.resize(UseBlocks.size());
622 float StaticCost = 0;
623 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
624 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
625 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
627 BC.Number = BI.MBB->getNumber();
628 Intf.moveToBlock(BC.Number);
629 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
630 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
632 if (!Intf.hasInterference())
635 // Number of spill code instructions to insert.
638 // Interference for the live-in value.
640 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
641 BC.Entry = SpillPlacement::MustSpill, ++Ins;
642 else if (Intf.first() < BI.FirstUse)
643 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
644 else if (Intf.first() < BI.LastUse)
648 // Interference for the live-out value.
650 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
651 BC.Exit = SpillPlacement::MustSpill, ++Ins;
652 else if (Intf.last() > BI.LastUse)
653 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
654 else if (Intf.last() > BI.FirstUse)
658 // Accumulate the total frequency of inserted spill code.
660 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
664 // Add constraints for use-blocks. Note that these are the only constraints
665 // that may add a positive bias, it is downhill from here.
666 SpillPlacer->addConstraints(SplitConstraints);
667 return SpillPlacer->scanActiveBundles();
671 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
672 /// live-through blocks in Blocks.
673 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
674 ArrayRef<unsigned> Blocks) {
675 const unsigned GroupSize = 8;
676 SpillPlacement::BlockConstraint BCS[GroupSize];
677 unsigned TBS[GroupSize];
678 unsigned B = 0, T = 0;
680 for (unsigned i = 0; i != Blocks.size(); ++i) {
681 unsigned Number = Blocks[i];
682 Intf.moveToBlock(Number);
684 if (!Intf.hasInterference()) {
685 assert(T < GroupSize && "Array overflow");
687 if (++T == GroupSize) {
688 SpillPlacer->addLinks(makeArrayRef(TBS, T));
694 assert(B < GroupSize && "Array overflow");
695 BCS[B].Number = Number;
697 // Interference for the live-in value.
698 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
699 BCS[B].Entry = SpillPlacement::MustSpill;
701 BCS[B].Entry = SpillPlacement::PrefSpill;
703 // Interference for the live-out value.
704 if (Intf.last() >= SA->getLastSplitPoint(Number))
705 BCS[B].Exit = SpillPlacement::MustSpill;
707 BCS[B].Exit = SpillPlacement::PrefSpill;
709 if (++B == GroupSize) {
710 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
711 SpillPlacer->addConstraints(Array);
716 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
717 SpillPlacer->addConstraints(Array);
718 SpillPlacer->addLinks(makeArrayRef(TBS, T));
721 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
722 // Keep track of through blocks that have not been added to SpillPlacer.
723 BitVector Todo = SA->getThroughBlocks();
724 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
725 unsigned AddedTo = 0;
727 unsigned Visited = 0;
731 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
732 // Find new through blocks in the periphery of PrefRegBundles.
733 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
734 unsigned Bundle = NewBundles[i];
735 // Look at all blocks connected to Bundle in the full graph.
736 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
737 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
740 if (!Todo.test(Block))
743 // This is a new through block. Add it to SpillPlacer later.
744 ActiveBlocks.push_back(Block);
750 // Any new blocks to add?
751 if (ActiveBlocks.size() == AddedTo)
754 // Compute through constraints from the interference, or assume that all
755 // through blocks prefer spilling when forming compact regions.
756 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
758 addThroughConstraints(Cand.Intf, NewBlocks);
760 SpillPlacer->addPrefSpill(NewBlocks);
761 AddedTo = ActiveBlocks.size();
763 // Perhaps iterating can enable more bundles?
764 SpillPlacer->iterate();
766 DEBUG(dbgs() << ", v=" << Visited);
769 /// calcCompactRegion - Compute the set of edge bundles that should be live
770 /// when splitting the current live range into compact regions. Compact
771 /// regions can be computed without looking at interference. They are the
772 /// regions formed by removing all the live-through blocks from the live range.
774 /// Returns false if the current live range is already compact, or if the
775 /// compact regions would form single block regions anyway.
776 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
777 // Without any through blocks, the live range is already compact.
778 if (!SA->getNumThroughBlocks())
781 // Compact regions don't correspond to any physreg.
782 Cand.reset(IntfCache, 0);
784 DEBUG(dbgs() << "Compact region bundles");
786 // Use the spill placer to determine the live bundles. GrowRegion pretends
787 // that all the through blocks have interference when PhysReg is unset.
788 SpillPlacer->prepare(Cand.LiveBundles);
790 // The static split cost will be zero since Cand.Intf reports no interference.
792 if (!addSplitConstraints(Cand.Intf, Cost)) {
793 DEBUG(dbgs() << ", none.\n");
798 SpillPlacer->finish();
800 if (!Cand.LiveBundles.any()) {
801 DEBUG(dbgs() << ", none.\n");
806 for (int i = Cand.LiveBundles.find_first(); i>=0;
807 i = Cand.LiveBundles.find_next(i))
808 dbgs() << " EB#" << i;
814 /// calcSpillCost - Compute how expensive it would be to split the live range in
815 /// SA around all use blocks instead of forming bundle regions.
816 float RAGreedy::calcSpillCost() {
818 const LiveInterval &LI = SA->getParent();
819 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
820 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
821 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
822 unsigned Number = BI.MBB->getNumber();
823 // We normally only need one spill instruction - a load or a store.
824 Cost += SpillPlacer->getBlockFrequency(Number);
826 // Unless the value is redefined in the block.
827 if (BI.LiveIn && BI.LiveOut) {
828 SlotIndex Start, Stop;
829 tie(Start, Stop) = Indexes->getMBBRange(Number);
830 LiveInterval::const_iterator I = LI.find(Start);
831 assert(I != LI.end() && "Expected live-in value");
832 // Is there a different live-out value? If so, we need an extra spill
835 Cost += SpillPlacer->getBlockFrequency(Number);
841 /// calcGlobalSplitCost - Return the global split cost of following the split
842 /// pattern in LiveBundles. This cost should be added to the local cost of the
843 /// interference pattern in SplitConstraints.
845 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
846 float GlobalCost = 0;
847 const BitVector &LiveBundles = Cand.LiveBundles;
848 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
849 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
850 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
851 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
852 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
853 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
857 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
859 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
861 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
864 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
865 unsigned Number = Cand.ActiveBlocks[i];
866 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
867 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
868 if (!RegIn && !RegOut)
870 if (RegIn && RegOut) {
871 // We need double spill code if this block has interference.
872 Cand.Intf.moveToBlock(Number);
873 if (Cand.Intf.hasInterference())
874 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
877 // live-in / stack-out or stack-in live-out.
878 GlobalCost += SpillPlacer->getBlockFrequency(Number);
883 /// splitAroundRegion - Split VirtReg around the region determined by
884 /// LiveBundles. Make an effort to avoid interference from PhysReg.
886 /// The 'register' interval is going to contain as many uses as possible while
887 /// avoiding interference. The 'stack' interval is the complement constructed by
888 /// SplitEditor. It will contain the rest.
890 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
891 GlobalSplitCandidate &Cand,
892 SmallVectorImpl<LiveInterval*> &NewVRegs) {
893 const BitVector &LiveBundles = Cand.LiveBundles;
896 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
898 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
899 dbgs() << " EB#" << i;
903 InterferenceCache::Cursor &Intf = Cand.Intf;
904 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
907 // Create the main cross-block interval.
908 const unsigned MainIntv = SE->openIntv();
910 // First handle all the blocks with uses.
911 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
912 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
913 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
914 bool RegIn = BI.LiveIn &&
915 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
916 bool RegOut = BI.LiveOut &&
917 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
919 // Create separate intervals for isolated blocks with multiple uses.
920 if (!RegIn && !RegOut) {
921 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
922 if (!BI.isOneInstr()) {
923 SE->splitSingleBlock(BI);
924 SE->selectIntv(MainIntv);
929 Intf.moveToBlock(BI.MBB->getNumber());
932 SE->splitLiveThroughBlock(BI.MBB->getNumber(),
933 MainIntv, Intf.first(),
934 MainIntv, Intf.last());
936 SE->splitRegInBlock(BI, MainIntv, Intf.first());
938 SE->splitRegOutBlock(BI, MainIntv, Intf.last());
941 // Handle live-through blocks.
942 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
943 unsigned Number = Cand.ActiveBlocks[i];
944 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
945 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
946 if (!RegIn && !RegOut)
948 Intf.moveToBlock(Number);
949 SE->splitLiveThroughBlock(Number, RegIn ? MainIntv : 0, Intf.first(),
950 RegOut ? MainIntv : 0, Intf.last());
955 SmallVector<unsigned, 8> IntvMap;
956 SE->finish(&IntvMap);
957 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
959 ExtraRegInfo.resize(MRI->getNumVirtRegs());
960 unsigned OrigBlocks = SA->getNumLiveBlocks();
962 // Sort out the new intervals created by splitting. We get four kinds:
963 // - Remainder intervals should not be split again.
964 // - Candidate intervals can be assigned to Cand.PhysReg.
965 // - Block-local splits are candidates for local splitting.
966 // - DCE leftovers should go back on the queue.
967 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
968 LiveInterval &Reg = *LREdit.get(i);
970 // Ignore old intervals from DCE.
971 if (getStage(Reg) != RS_New)
974 // Remainder interval. Don't try splitting again, spill if it doesn't
976 if (IntvMap[i] == 0) {
977 setStage(Reg, RS_Global);
981 // Main interval. Allow repeated splitting as long as the number of live
982 // blocks is strictly decreasing.
983 if (IntvMap[i] == MainIntv) {
984 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
985 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
986 << " blocks as original.\n");
987 // Don't allow repeated splitting as a safe guard against looping.
988 setStage(Reg, RS_Global);
993 // Other intervals are treated as new. This includes local intervals created
994 // for blocks with multiple uses, and anything created by DCE.
998 MF->verify(this, "After splitting live range around region");
1001 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1002 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1003 float BestCost = Hysteresis * calcSpillCost();
1004 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1005 const unsigned NoCand = ~0u;
1006 unsigned BestCand = NoCand;
1007 unsigned NumCands = 0;
1010 while (unsigned PhysReg = Order.next()) {
1011 // Discard bad candidates before we run out of interference cache cursors.
1012 // This will only affect register classes with a lot of registers (>32).
1013 if (NumCands == IntfCache.getMaxCursors()) {
1014 unsigned WorstCount = ~0u;
1016 for (unsigned i = 0; i != NumCands; ++i) {
1019 unsigned Count = GlobalCand[i].LiveBundles.count();
1020 if (Count < WorstCount)
1021 Worst = i, WorstCount = Count;
1024 GlobalCand[Worst] = GlobalCand[NumCands];
1027 if (GlobalCand.size() <= NumCands)
1028 GlobalCand.resize(NumCands+1);
1029 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1030 Cand.reset(IntfCache, PhysReg);
1032 SpillPlacer->prepare(Cand.LiveBundles);
1034 if (!addSplitConstraints(Cand.Intf, Cost)) {
1035 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1038 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1039 if (Cost >= BestCost) {
1041 if (BestCand == NoCand)
1042 dbgs() << " worse than no bundles\n";
1044 dbgs() << " worse than "
1045 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1051 SpillPlacer->finish();
1053 // No live bundles, defer to splitSingleBlocks().
1054 if (!Cand.LiveBundles.any()) {
1055 DEBUG(dbgs() << " no bundles.\n");
1059 Cost += calcGlobalSplitCost(Cand);
1061 dbgs() << ", total = " << Cost << " with bundles";
1062 for (int i = Cand.LiveBundles.find_first(); i>=0;
1063 i = Cand.LiveBundles.find_next(i))
1064 dbgs() << " EB#" << i;
1067 if (Cost < BestCost) {
1068 BestCand = NumCands;
1069 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1074 if (BestCand == NoCand)
1077 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
1082 //===----------------------------------------------------------------------===//
1084 //===----------------------------------------------------------------------===//
1087 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1088 /// in order to use PhysReg between two entries in SA->UseSlots.
1090 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1092 void RAGreedy::calcGapWeights(unsigned PhysReg,
1093 SmallVectorImpl<float> &GapWeight) {
1094 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1095 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1096 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1097 const unsigned NumGaps = Uses.size()-1;
1099 // Start and end points for the interference check.
1100 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
1101 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
1103 GapWeight.assign(NumGaps, 0.0f);
1105 // Add interference from each overlapping register.
1106 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1107 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1108 .checkInterference())
1111 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
1112 // so we don't need InterferenceQuery.
1114 // Interference that overlaps an instruction is counted in both gaps
1115 // surrounding the instruction. The exception is interference before
1116 // StartIdx and after StopIdx.
1118 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
1119 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1120 // Skip the gaps before IntI.
1121 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1122 if (++Gap == NumGaps)
1127 // Update the gaps covered by IntI.
1128 const float weight = IntI.value()->weight;
1129 for (; Gap != NumGaps; ++Gap) {
1130 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1131 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1140 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1143 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1144 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1145 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1146 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1148 // Note that it is possible to have an interval that is live-in or live-out
1149 // while only covering a single block - A phi-def can use undef values from
1150 // predecessors, and the block could be a single-block loop.
1151 // We don't bother doing anything clever about such a case, we simply assume
1152 // that the interval is continuous from FirstUse to LastUse. We should make
1153 // sure that we don't do anything illegal to such an interval, though.
1155 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1156 if (Uses.size() <= 2)
1158 const unsigned NumGaps = Uses.size()-1;
1161 dbgs() << "tryLocalSplit: ";
1162 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1163 dbgs() << ' ' << SA->UseSlots[i];
1167 // Since we allow local split results to be split again, there is a risk of
1168 // creating infinite loops. It is tempting to require that the new live
1169 // ranges have less instructions than the original. That would guarantee
1170 // convergence, but it is too strict. A live range with 3 instructions can be
1171 // split 2+3 (including the COPY), and we want to allow that.
1173 // Instead we use these rules:
1175 // 1. Allow any split for ranges with getStage() < RS_Local. (Except for the
1176 // noop split, of course).
1177 // 2. Require progress be made for ranges with getStage() >= RS_Local. All
1178 // the new ranges must have fewer instructions than before the split.
1179 // 3. New ranges with the same number of instructions are marked RS_Local,
1180 // smaller ranges are marked RS_New.
1182 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1183 // excessive splitting and infinite loops.
1185 bool ProgressRequired = getStage(VirtReg) >= RS_Local;
1187 // Best split candidate.
1188 unsigned BestBefore = NumGaps;
1189 unsigned BestAfter = 0;
1192 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1193 SmallVector<float, 8> GapWeight;
1196 while (unsigned PhysReg = Order.next()) {
1197 // Keep track of the largest spill weight that would need to be evicted in
1198 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1199 calcGapWeights(PhysReg, GapWeight);
1201 // Try to find the best sequence of gaps to close.
1202 // The new spill weight must be larger than any gap interference.
1204 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1205 unsigned SplitBefore = 0, SplitAfter = 1;
1207 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1208 // It is the spill weight that needs to be evicted.
1209 float MaxGap = GapWeight[0];
1212 // Live before/after split?
1213 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1214 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1216 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1217 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1218 << " i=" << MaxGap);
1220 // Stop before the interval gets so big we wouldn't be making progress.
1221 if (!LiveBefore && !LiveAfter) {
1222 DEBUG(dbgs() << " all\n");
1225 // Should the interval be extended or shrunk?
1228 // How many gaps would the new range have?
1229 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1231 // Legally, without causing looping?
1232 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1234 if (Legal && MaxGap < HUGE_VALF) {
1235 // Estimate the new spill weight. Each instruction reads or writes the
1236 // register. Conservatively assume there are no read-modify-write
1239 // Try to guess the size of the new interval.
1240 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1241 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1242 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1243 // Would this split be possible to allocate?
1244 // Never allocate all gaps, we wouldn't be making progress.
1245 DEBUG(dbgs() << " w=" << EstWeight);
1246 if (EstWeight * Hysteresis >= MaxGap) {
1248 float Diff = EstWeight - MaxGap;
1249 if (Diff > BestDiff) {
1250 DEBUG(dbgs() << " (best)");
1251 BestDiff = Hysteresis * Diff;
1252 BestBefore = SplitBefore;
1253 BestAfter = SplitAfter;
1260 if (++SplitBefore < SplitAfter) {
1261 DEBUG(dbgs() << " shrink\n");
1262 // Recompute the max when necessary.
1263 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1264 MaxGap = GapWeight[SplitBefore];
1265 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1266 MaxGap = std::max(MaxGap, GapWeight[i]);
1273 // Try to extend the interval.
1274 if (SplitAfter >= NumGaps) {
1275 DEBUG(dbgs() << " end\n");
1279 DEBUG(dbgs() << " extend\n");
1280 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1284 // Didn't find any candidates?
1285 if (BestBefore == NumGaps)
1288 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1289 << '-' << Uses[BestAfter] << ", " << BestDiff
1290 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1292 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1296 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1297 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1298 SE->useIntv(SegStart, SegStop);
1299 SmallVector<unsigned, 8> IntvMap;
1300 SE->finish(&IntvMap);
1301 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1303 // If the new range has the same number of instructions as before, mark it as
1304 // RS_Local so the next split will be forced to make progress. Otherwise,
1305 // leave the new intervals as RS_New so they can compete.
1306 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1307 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1308 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1309 if (NewGaps >= NumGaps) {
1310 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1311 assert(!ProgressRequired && "Didn't make progress when it was required.");
1312 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1313 if (IntvMap[i] == 1) {
1314 setStage(*LREdit.get(i), RS_Local);
1315 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1317 DEBUG(dbgs() << '\n');
1324 //===----------------------------------------------------------------------===//
1325 // Live Range Splitting
1326 //===----------------------------------------------------------------------===//
1328 /// trySplit - Try to split VirtReg or one of its interferences, making it
1330 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1331 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1332 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1333 // Local intervals are handled separately.
1334 if (LIS->intervalIsInOneMBB(VirtReg)) {
1335 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1336 SA->analyze(&VirtReg);
1337 return tryLocalSplit(VirtReg, Order, NewVRegs);
1340 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1342 // Don't iterate global splitting.
1343 // Move straight to spilling if this range was produced by a global split.
1344 if (getStage(VirtReg) >= RS_Global)
1347 SA->analyze(&VirtReg);
1349 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1350 // coalescer. That may cause the range to become allocatable which means that
1351 // tryRegionSplit won't be making progress. This check should be replaced with
1352 // an assertion when the coalescer is fixed.
1353 if (SA->didRepairRange()) {
1354 // VirtReg has changed, so all cached queries are invalid.
1355 invalidateVirtRegs();
1356 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1360 // First try to split around a region spanning multiple blocks.
1361 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1362 if (PhysReg || !NewVRegs.empty())
1365 // Then isolate blocks with multiple uses.
1366 SplitAnalysis::BlockPtrSet Blocks;
1367 if (SA->getMultiUseBlocks(Blocks)) {
1368 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1370 SE->splitSingleBlocks(Blocks);
1371 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
1373 MF->verify(this, "After splitting live range around basic blocks");
1376 // Don't assign any physregs.
1381 //===----------------------------------------------------------------------===//
1383 //===----------------------------------------------------------------------===//
1385 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1386 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1387 // First try assigning a free register.
1388 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1389 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1392 LiveRangeStage Stage = getStage(VirtReg);
1393 DEBUG(dbgs() << StageName[Stage]
1394 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1396 // Try to evict a less worthy live range, but only for ranges from the primary
1397 // queue. The RS_Second ranges already failed to do this, and they should not
1398 // get a second chance until they have been split.
1399 if (Stage != RS_Second)
1400 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1403 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1405 // The first time we see a live range, don't try to split or spill.
1406 // Wait until the second time, when all smaller ranges have been allocated.
1407 // This gives a better picture of the interference to split around.
1408 if (Stage == RS_First) {
1409 setStage(VirtReg, RS_Second);
1410 DEBUG(dbgs() << "wait for second round\n");
1411 NewVRegs.push_back(&VirtReg);
1415 // If we couldn't allocate a register from spilling, there is probably some
1416 // invalid inline assembly. The base class wil report it.
1417 if (Stage >= RS_Spill || !VirtReg.isSpillable())
1420 // Try splitting VirtReg or interferences.
1421 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1422 if (PhysReg || !NewVRegs.empty())
1425 // Finally spill VirtReg itself.
1426 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1427 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1428 spiller().spill(LRE);
1429 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1432 MF->verify(this, "After spilling");
1434 // The live virtual register requesting allocation was spilled, so tell
1435 // the caller not to allocate anything during this round.
1439 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1440 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1441 << "********** Function: "
1442 << ((Value*)mf.getFunction())->getName() << '\n');
1446 MF->verify(this, "Before greedy register allocator");
1448 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1449 Indexes = &getAnalysis<SlotIndexes>();
1450 DomTree = &getAnalysis<MachineDominatorTree>();
1451 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1452 Loops = &getAnalysis<MachineLoopInfo>();
1453 Bundles = &getAnalysis<EdgeBundles>();
1454 SpillPlacer = &getAnalysis<SpillPlacement>();
1455 DebugVars = &getAnalysis<LiveDebugVariables>();
1457 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1458 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1459 ExtraRegInfo.clear();
1460 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1462 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1466 LIS->addKillFlags();
1470 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1471 VRM->rewrite(Indexes);
1474 // Write out new DBG_VALUE instructions.
1475 DebugVars->emitDebugValues(VRM);
1477 // The pass output is in VirtRegMap. Release all the transient data.