1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "LiveIntervalUnion.h"
18 #include "LiveRangeEdit.h"
19 #include "RegAllocBase.h"
21 #include "SpillPlacement.h"
23 #include "VirtRegMap.h"
24 #include "VirtRegRewriter.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Function.h"
27 #include "llvm/PassAnalysisSupport.h"
28 #include "llvm/CodeGen/CalcSpillWeights.h"
29 #include "llvm/CodeGen/EdgeBundles.h"
30 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
31 #include "llvm/CodeGen/LiveStackAnalysis.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineLoopRanges.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/CodeGen/RegAllocRegistry.h"
39 #include "llvm/CodeGen/RegisterCoalescer.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
48 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
49 createGreedyRegisterAllocator);
52 class RAGreedy : public MachineFunctionPass, public RegAllocBase {
55 BitVector ReservedRegs;
60 MachineDominatorTree *DomTree;
61 MachineLoopInfo *Loops;
62 MachineLoopRanges *LoopRanges;
64 SpillPlacement *SpillPlacer;
67 std::auto_ptr<Spiller> SpillerInstance;
68 std::auto_ptr<SplitAnalysis> SA;
72 /// All basic blocks where the current register is live.
73 SmallVector<SpillPlacement::BlockConstraint, 8> SpillConstraints;
78 /// Return the pass name.
79 virtual const char* getPassName() const {
80 return "Greedy Register Allocator";
83 /// RAGreedy analysis usage.
84 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
86 virtual void releaseMemory();
88 virtual Spiller &spiller() { return *SpillerInstance; }
90 virtual float getPriority(LiveInterval *LI);
92 virtual unsigned selectOrSplit(LiveInterval&,
93 SmallVectorImpl<LiveInterval*>&);
95 /// Perform register allocation.
96 virtual bool runOnMachineFunction(MachineFunction &mf);
101 bool checkUncachedInterference(LiveInterval&, unsigned);
102 LiveInterval *getSingleInterference(LiveInterval&, unsigned);
103 bool reassignVReg(LiveInterval &InterferingVReg, unsigned OldPhysReg);
104 float calcInterferenceWeight(LiveInterval&, unsigned);
105 float calcInterferenceInfo(LiveInterval&, unsigned);
106 float calcGlobalSplitCost(const BitVector&);
107 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
108 SmallVectorImpl<LiveInterval*>&);
110 unsigned tryReassignOrEvict(LiveInterval&, AllocationOrder&,
111 SmallVectorImpl<LiveInterval*>&);
112 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
113 SmallVectorImpl<LiveInterval*>&);
114 unsigned trySplit(LiveInterval&, AllocationOrder&,
115 SmallVectorImpl<LiveInterval*>&);
116 unsigned trySpillInterferences(LiveInterval&, AllocationOrder&,
117 SmallVectorImpl<LiveInterval*>&);
119 } // end anonymous namespace
121 char RAGreedy::ID = 0;
123 FunctionPass* llvm::createGreedyRegisterAllocator() {
124 return new RAGreedy();
127 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
128 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
129 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
130 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
131 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
132 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
133 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
134 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
135 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
136 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
137 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
138 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
139 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
140 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
143 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
144 AU.setPreservesCFG();
145 AU.addRequired<AliasAnalysis>();
146 AU.addPreserved<AliasAnalysis>();
147 AU.addRequired<LiveIntervals>();
148 AU.addRequired<SlotIndexes>();
149 AU.addPreserved<SlotIndexes>();
151 AU.addRequiredID(StrongPHIEliminationID);
152 AU.addRequiredTransitive<RegisterCoalescer>();
153 AU.addRequired<CalculateSpillWeights>();
154 AU.addRequired<LiveStacks>();
155 AU.addPreserved<LiveStacks>();
156 AU.addRequired<MachineDominatorTree>();
157 AU.addPreserved<MachineDominatorTree>();
158 AU.addRequired<MachineLoopInfo>();
159 AU.addPreserved<MachineLoopInfo>();
160 AU.addRequired<MachineLoopRanges>();
161 AU.addPreserved<MachineLoopRanges>();
162 AU.addRequired<VirtRegMap>();
163 AU.addPreserved<VirtRegMap>();
164 AU.addRequired<EdgeBundles>();
165 AU.addRequired<SpillPlacement>();
166 MachineFunctionPass::getAnalysisUsage(AU);
169 void RAGreedy::releaseMemory() {
170 SpillerInstance.reset(0);
171 RegAllocBase::releaseMemory();
174 float RAGreedy::getPriority(LiveInterval *LI) {
175 float Priority = LI->weight;
177 // Prioritize hinted registers so they are allocated first.
178 std::pair<unsigned, unsigned> Hint;
179 if (Hint.first || Hint.second) {
180 // The hint can be target specific, a virtual register, or a physreg.
183 // Prefer physreg hints above anything else.
184 if (Hint.first == 0 && TargetRegisterInfo::isPhysicalRegister(Hint.second))
191 //===----------------------------------------------------------------------===//
192 // Register Reassignment
193 //===----------------------------------------------------------------------===//
195 // Check interference without using the cache.
196 bool RAGreedy::checkUncachedInterference(LiveInterval &VirtReg,
198 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
199 LiveIntervalUnion::Query subQ(&VirtReg, &PhysReg2LiveUnion[*AliasI]);
200 if (subQ.checkInterference())
206 /// getSingleInterference - Return the single interfering virtual register
207 /// assigned to PhysReg. Return 0 if more than one virtual register is
209 LiveInterval *RAGreedy::getSingleInterference(LiveInterval &VirtReg,
211 // Check physreg and aliases.
212 LiveInterval *Interference = 0;
213 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
214 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
215 if (Q.checkInterference()) {
218 Q.collectInterferingVRegs(1);
219 if (!Q.seenAllInterferences())
221 Interference = Q.interferingVRegs().front();
227 // Attempt to reassign this virtual register to a different physical register.
229 // FIXME: we are not yet caching these "second-level" interferences discovered
230 // in the sub-queries. These interferences can change with each call to
231 // selectOrSplit. However, we could implement a "may-interfere" cache that
232 // could be conservatively dirtied when we reassign or split.
234 // FIXME: This may result in a lot of alias queries. We could summarize alias
235 // live intervals in their parent register's live union, but it's messy.
236 bool RAGreedy::reassignVReg(LiveInterval &InterferingVReg,
237 unsigned WantedPhysReg) {
238 assert(TargetRegisterInfo::isVirtualRegister(InterferingVReg.reg) &&
239 "Can only reassign virtual registers");
240 assert(TRI->regsOverlap(WantedPhysReg, VRM->getPhys(InterferingVReg.reg)) &&
241 "inconsistent phys reg assigment");
243 AllocationOrder Order(InterferingVReg.reg, *VRM, ReservedRegs);
244 while (unsigned PhysReg = Order.next()) {
245 // Don't reassign to a WantedPhysReg alias.
246 if (TRI->regsOverlap(PhysReg, WantedPhysReg))
249 if (checkUncachedInterference(InterferingVReg, PhysReg))
252 // Reassign the interfering virtual reg to this physical reg.
253 unsigned OldAssign = VRM->getPhys(InterferingVReg.reg);
254 DEBUG(dbgs() << "reassigning: " << InterferingVReg << " from " <<
255 TRI->getName(OldAssign) << " to " << TRI->getName(PhysReg) << '\n');
256 unassign(InterferingVReg, OldAssign);
257 assign(InterferingVReg, PhysReg);
263 /// tryReassignOrEvict - Try to reassign a single interferences to a different
264 /// physreg, or evict a single interference with a lower spill weight.
265 /// @param VirtReg Currently unassigned virtual register.
266 /// @param Order Physregs to try.
267 /// @return Physreg to assign VirtReg, or 0.
268 unsigned RAGreedy::tryReassignOrEvict(LiveInterval &VirtReg,
269 AllocationOrder &Order,
270 SmallVectorImpl<LiveInterval*> &NewVRegs){
271 NamedRegionTimer T("Reassign", TimerGroupName, TimePassesIsEnabled);
273 // Keep track of the lightest single interference seen so far.
274 float BestWeight = VirtReg.weight;
275 LiveInterval *BestVirt = 0;
276 unsigned BestPhys = 0;
279 while (unsigned PhysReg = Order.next()) {
280 LiveInterval *InterferingVReg = getSingleInterference(VirtReg, PhysReg);
281 if (!InterferingVReg)
283 if (TargetRegisterInfo::isPhysicalRegister(InterferingVReg->reg))
285 if (reassignVReg(*InterferingVReg, PhysReg))
288 // Cannot reassign, is this an eviction candidate?
289 if (InterferingVReg->weight < BestWeight) {
290 BestVirt = InterferingVReg;
292 BestWeight = InterferingVReg->weight;
296 // Nothing reassigned, can we evict a lighter single interference?
298 DEBUG(dbgs() << "evicting lighter " << *BestVirt << '\n');
299 unassign(*BestVirt, VRM->getPhys(BestVirt->reg));
300 NewVRegs.push_back(BestVirt);
308 //===----------------------------------------------------------------------===//
310 //===----------------------------------------------------------------------===//
312 /// calcInterferenceInfo - Compute per-block outgoing and ingoing constraints
313 /// when considering interference from PhysReg. Also compute an optimistic local
314 /// cost of this interference pattern.
316 /// The final cost of a split is the local cost + global cost of preferences
317 /// broken by SpillPlacement.
319 float RAGreedy::calcInterferenceInfo(LiveInterval &VirtReg, unsigned PhysReg) {
320 // Reset interference dependent info.
321 SpillConstraints.resize(SA->LiveBlocks.size());
322 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
323 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
324 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
325 BC.Number = BI.MBB->getNumber();
326 BC.Entry = (BI.Uses && BI.LiveIn) ?
327 SpillPlacement::PrefReg : SpillPlacement::DontCare;
328 BC.Exit = (BI.Uses && BI.LiveOut) ?
329 SpillPlacement::PrefReg : SpillPlacement::DontCare;
330 BI.OverlapEntry = BI.OverlapExit = false;
333 // Add interference info from each PhysReg alias.
334 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
335 if (!query(VirtReg, *AI).checkInterference())
337 LiveIntervalUnion::SegmentIter IntI =
338 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
342 // Determine which blocks have interference live in or after the last split
344 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
345 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
346 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
347 SlotIndex Start, Stop;
348 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
350 // Skip interference-free blocks.
351 if (IntI.start() >= Stop)
354 // Is the interference live-in?
356 IntI.advanceTo(Start);
359 if (IntI.start() <= Start)
360 BC.Entry = SpillPlacement::MustSpill;
363 // Is the interference overlapping the last split point?
365 if (IntI.stop() < BI.LastSplitPoint)
366 IntI.advanceTo(BI.LastSplitPoint.getPrevSlot());
369 if (IntI.start() < Stop)
370 BC.Exit = SpillPlacement::MustSpill;
374 // Rewind iterator and check other interferences.
375 IntI.find(VirtReg.beginIndex());
376 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
377 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
378 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
379 SlotIndex Start, Stop;
380 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
382 // Skip interference-free blocks.
383 if (IntI.start() >= Stop)
386 // Handle transparent blocks with interference separately.
387 // Transparent blocks never incur any fixed cost.
388 if (BI.LiveThrough && !BI.Uses) {
389 IntI.advanceTo(Start);
392 if (IntI.start() >= Stop)
395 if (BC.Entry != SpillPlacement::MustSpill)
396 BC.Entry = SpillPlacement::PrefSpill;
397 if (BC.Exit != SpillPlacement::MustSpill)
398 BC.Exit = SpillPlacement::PrefSpill;
402 // Now we only have blocks with uses left.
403 // Check if the interference overlaps the uses.
404 assert(BI.Uses && "Non-transparent block without any uses");
406 // Check interference on entry.
407 if (BI.LiveIn && BC.Entry != SpillPlacement::MustSpill) {
408 IntI.advanceTo(Start);
411 // Not live in, but before the first use.
412 if (IntI.start() < BI.FirstUse)
413 BC.Entry = SpillPlacement::PrefSpill;
416 // Does interference overlap the uses in the entry segment
418 if (BI.LiveIn && !BI.OverlapEntry) {
419 IntI.advanceTo(BI.FirstUse);
422 // A live-through interval has no kill.
423 // Check [FirstUse;LastUse) instead.
424 if (IntI.start() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
425 BI.OverlapEntry = true;
428 // Does interference overlap the uses in the exit segment [Def;LastUse)?
429 if (BI.LiveOut && !BI.LiveThrough && !BI.OverlapExit) {
430 IntI.advanceTo(BI.Def);
433 if (IntI.start() < BI.LastUse)
434 BI.OverlapExit = true;
437 // Check interference on exit.
438 if (BI.LiveOut && BC.Exit != SpillPlacement::MustSpill) {
439 // Check interference between LastUse and Stop.
440 if (BC.Exit != SpillPlacement::PrefSpill) {
441 IntI.advanceTo(BI.LastUse);
444 if (IntI.start() < Stop)
445 BC.Exit = SpillPlacement::PrefSpill;
451 // Accumulate a local cost of this interference pattern.
453 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
454 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
457 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
458 unsigned Inserts = 0;
460 // Do we need spill code for the entry segment?
462 Inserts += BI.OverlapEntry || BC.Entry != SpillPlacement::PrefReg;
464 // For the exit segment?
466 Inserts += BI.OverlapExit || BC.Exit != SpillPlacement::PrefReg;
468 // The local cost of spill code in this block is the block frequency times
469 // the number of spill instructions inserted.
471 LocalCost += Inserts * SpillPlacer->getBlockFrequency(BI.MBB);
473 DEBUG(dbgs() << "Local cost of " << PrintReg(PhysReg, TRI) << " = "
474 << LocalCost << '\n');
478 /// calcGlobalSplitCost - Return the global split cost of following the split
479 /// pattern in LiveBundles. This cost should be added to the local cost of the
480 /// interference pattern in SpillConstraints.
482 float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) {
483 float GlobalCost = 0;
484 for (unsigned i = 0, e = SpillConstraints.size(); i != e; ++i) {
485 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
486 unsigned Inserts = 0;
487 // Broken entry preference?
488 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 0)] !=
489 (BC.Entry == SpillPlacement::PrefReg);
490 // Broken exit preference?
491 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 1)] !=
492 (BC.Exit == SpillPlacement::PrefReg);
495 Inserts * SpillPlacer->getBlockFrequency(SA->LiveBlocks[i].MBB);
497 DEBUG(dbgs() << "Global cost = " << GlobalCost << '\n');
501 /// splitAroundRegion - Split VirtReg around the region determined by
502 /// LiveBundles. Make an effort to avoid interference from PhysReg.
504 /// The 'register' interval is going to contain as many uses as possible while
505 /// avoiding interference. The 'stack' interval is the complement constructed by
506 /// SplitEditor. It will contain the rest.
508 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
509 const BitVector &LiveBundles,
510 SmallVectorImpl<LiveInterval*> &NewVRegs) {
512 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
514 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
515 dbgs() << " EB#" << i;
519 // First compute interference ranges in the live blocks.
520 typedef std::pair<SlotIndex, SlotIndex> IndexPair;
521 SmallVector<IndexPair, 8> InterferenceRanges;
522 InterferenceRanges.resize(SA->LiveBlocks.size());
523 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
524 if (!query(VirtReg, *AI).checkInterference())
526 LiveIntervalUnion::SegmentIter IntI =
527 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
530 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
531 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
532 IndexPair &IP = InterferenceRanges[i];
533 SlotIndex Start, Stop;
534 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
535 // Skip interference-free blocks.
536 if (IntI.start() >= Stop)
539 // First interference in block.
541 IntI.advanceTo(Start);
544 if (IntI.start() >= Stop)
546 if (!IP.first.isValid() || IntI.start() < IP.first)
547 IP.first = IntI.start();
550 // Last interference in block.
552 IntI.advanceTo(Stop);
553 if (!IntI.valid() || IntI.start() >= Stop)
555 if (IntI.stop() <= Start)
557 if (!IP.second.isValid() || IntI.stop() > IP.second)
558 IP.second = IntI.stop();
563 SmallVector<LiveInterval*, 4> SpillRegs;
564 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
565 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
567 // Create the main cross-block interval.
570 // First add all defs that are live out of a block.
571 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
572 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
573 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
574 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
576 // Should the register be live out?
577 if (!BI.LiveOut || !RegOut)
580 IndexPair &IP = InterferenceRanges[i];
581 SlotIndex Start, Stop;
582 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
584 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
585 << Bundles->getBundle(BI.MBB->getNumber(), 1)
586 << " intf [" << IP.first << ';' << IP.second << ')');
588 // The interference interval should either be invalid or overlap MBB.
589 assert((!IP.first.isValid() || IP.first < Stop) && "Bad interference");
590 assert((!IP.second.isValid() || IP.second > Start) && "Bad interference");
592 // Check interference leaving the block.
593 if (!IP.second.isValid()) {
594 // Block is interference-free.
595 DEBUG(dbgs() << ", no interference");
597 assert(BI.LiveThrough && "No uses, but not live through block?");
598 // Block is live-through without interference.
599 DEBUG(dbgs() << ", no uses"
600 << (RegIn ? ", live-through.\n" : ", stack in.\n"));
602 SE.enterIntvAtEnd(*BI.MBB);
605 if (!BI.LiveThrough) {
606 DEBUG(dbgs() << ", not live-through.\n");
607 SE.useIntv(SE.enterIntvBefore(BI.Def), Stop);
611 // Block is live-through, but entry bundle is on the stack.
612 // Reload just before the first use.
613 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
614 SE.useIntv(SE.enterIntvBefore(BI.FirstUse), Stop);
617 DEBUG(dbgs() << ", live-through.\n");
621 // Block has interference.
622 DEBUG(dbgs() << ", interference to " << IP.second);
624 if (!BI.LiveThrough && IP.second <= BI.Def) {
625 // The interference doesn't reach the outgoing segment.
626 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
627 SE.useIntv(BI.Def, Stop);
633 // No uses in block, avoid interference by reloading as late as possible.
634 DEBUG(dbgs() << ", no uses.\n");
635 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
636 assert(SegStart >= IP.second && "Couldn't avoid interference");
640 if (IP.second.getBoundaryIndex() < BI.LastUse) {
641 // There are interference-free uses at the end of the block.
642 // Find the first use that can get the live-out register.
643 SmallVectorImpl<SlotIndex>::const_iterator UI =
644 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
645 IP.second.getBoundaryIndex());
646 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
648 assert(Use <= BI.LastUse && "Couldn't find last use");
649 // Only attempt a split befroe the last split point.
650 if (Use.getBaseIndex() <= BI.LastSplitPoint) {
651 DEBUG(dbgs() << ", free use at " << Use << ".\n");
652 SlotIndex SegStart = SE.enterIntvBefore(Use);
653 assert(SegStart >= IP.second && "Couldn't avoid interference");
654 assert(SegStart < BI.LastSplitPoint && "Impossible split point");
655 SE.useIntv(SegStart, Stop);
660 // Interference is after the last use.
661 DEBUG(dbgs() << " after last use.\n");
662 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
663 assert(SegStart >= IP.second && "Couldn't avoid interference");
666 // Now all defs leading to live bundles are handled, do everything else.
667 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
668 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
669 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
670 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
672 // Is the register live-in?
673 if (!BI.LiveIn || !RegIn)
676 // We have an incoming register. Check for interference.
677 IndexPair &IP = InterferenceRanges[i];
678 SlotIndex Start, Stop;
679 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
681 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
682 << " -> BB#" << BI.MBB->getNumber());
684 // Check interference entering the block.
685 if (!IP.first.isValid()) {
686 // Block is interference-free.
687 DEBUG(dbgs() << ", no interference");
689 assert(BI.LiveThrough && "No uses, but not live through block?");
690 // Block is live-through without interference.
692 DEBUG(dbgs() << ", no uses, live-through.\n");
693 SE.useIntv(Start, Stop);
695 DEBUG(dbgs() << ", no uses, stack-out.\n");
696 SE.leaveIntvAtTop(*BI.MBB);
700 if (!BI.LiveThrough) {
701 DEBUG(dbgs() << ", killed in block.\n");
702 SE.useIntv(Start, SE.leaveIntvAfter(BI.Kill));
706 // Block is live-through, but exit bundle is on the stack.
707 // Spill immediately after the last use.
708 if (BI.LastUse < BI.LastSplitPoint) {
709 DEBUG(dbgs() << ", uses, stack-out.\n");
710 SE.useIntv(Start, SE.leaveIntvAfter(BI.LastUse));
713 // The last use is after the last split point, it is probably an
715 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
716 << BI.LastSplitPoint << ", stack-out.\n");
717 SlotIndex SegEnd = SE.leaveIntvBefore(BI.LastSplitPoint);
718 SE.useIntv(Start, SegEnd);
719 // Run a double interval from the split to the last use.
720 // This makes it possible to spill the complement without affecting the
722 SE.overlapIntv(SegEnd, BI.LastUse);
725 // Register is live-through.
726 DEBUG(dbgs() << ", uses, live-through.\n");
727 SE.useIntv(Start, Stop);
731 // Block has interference.
732 DEBUG(dbgs() << ", interference from " << IP.first);
734 if (!BI.LiveThrough && IP.first >= BI.Kill) {
735 // The interference doesn't reach the outgoing segment.
736 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
737 SE.useIntv(Start, BI.Kill);
742 // No uses in block, avoid interference by spilling as soon as possible.
743 DEBUG(dbgs() << ", no uses.\n");
744 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
745 assert(SegEnd <= IP.first && "Couldn't avoid interference");
748 if (IP.first.getBaseIndex() > BI.FirstUse) {
749 // There are interference-free uses at the beginning of the block.
750 // Find the last use that can get the register.
751 SmallVectorImpl<SlotIndex>::const_iterator UI =
752 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
753 IP.first.getBaseIndex());
754 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
755 SlotIndex Use = (--UI)->getBoundaryIndex();
756 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
757 SlotIndex SegEnd = SE.leaveIntvAfter(Use);
758 assert(SegEnd <= IP.first && "Couldn't avoid interference");
759 SE.useIntv(Start, SegEnd);
763 // Interference is before the first use.
764 DEBUG(dbgs() << " before first use.\n");
765 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
766 assert(SegEnd <= IP.first && "Couldn't avoid interference");
771 // FIXME: Should we be more aggressive about splitting the stack region into
772 // per-block segments? The current approach allows the stack region to
773 // separate into connected components. Some components may be allocatable.
777 MF->verify(this, "After splitting live range around region");
780 // Make sure that at least one of the new intervals can allocate to PhysReg.
781 // That was the whole point of splitting the live range.
783 for (LiveRangeEdit::iterator I = LREdit.begin(), E = LREdit.end(); I != E;
785 if (!checkUncachedInterference(**I, PhysReg)) {
789 assert(found && "No allocatable intervals after pointless splitting");
794 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
795 SmallVectorImpl<LiveInterval*> &NewVRegs) {
796 BitVector LiveBundles, BestBundles;
798 unsigned BestReg = 0;
800 while (unsigned PhysReg = Order.next()) {
801 float Cost = calcInterferenceInfo(VirtReg, PhysReg);
802 if (BestReg && Cost >= BestCost)
805 SpillPlacer->placeSpills(SpillConstraints, LiveBundles);
806 // No live bundles, defer to splitSingleBlocks().
807 if (!LiveBundles.any())
810 Cost += calcGlobalSplitCost(LiveBundles);
811 if (!BestReg || Cost < BestCost) {
814 BestBundles.swap(LiveBundles);
821 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
826 //===----------------------------------------------------------------------===//
827 // Live Range Splitting
828 //===----------------------------------------------------------------------===//
830 /// trySplit - Try to split VirtReg or one of its interferences, making it
832 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
833 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
834 SmallVectorImpl<LiveInterval*>&NewVRegs) {
835 NamedRegionTimer T("Splitter", TimerGroupName, TimePassesIsEnabled);
836 SA->analyze(&VirtReg);
838 // Don't attempt splitting on local intervals for now. TBD.
839 if (LIS->intervalIsInOneMBB(VirtReg))
842 // First try to split around a region spanning multiple blocks.
843 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
844 if (PhysReg || !NewVRegs.empty())
847 // Then isolate blocks with multiple uses.
848 SplitAnalysis::BlockPtrSet Blocks;
849 if (SA->getMultiUseBlocks(Blocks)) {
850 SmallVector<LiveInterval*, 4> SpillRegs;
851 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
852 SplitEditor(*SA, *LIS, *VRM, *DomTree, LREdit).splitSingleBlocks(Blocks);
854 MF->verify(this, "After splitting live range around basic blocks");
857 // Don't assign any physregs.
862 //===----------------------------------------------------------------------===//
864 //===----------------------------------------------------------------------===//
866 /// calcInterferenceWeight - Calculate the combined spill weight of
867 /// interferences when assigning VirtReg to PhysReg.
868 float RAGreedy::calcInterferenceWeight(LiveInterval &VirtReg, unsigned PhysReg){
870 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
871 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
872 Q.collectInterferingVRegs();
873 if (Q.seenUnspillableVReg())
875 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i)
876 Sum += Q.interferingVRegs()[i]->weight;
881 /// trySpillInterferences - Try to spill interfering registers instead of the
882 /// current one. Only do it if the accumulated spill weight is smaller than the
883 /// current spill weight.
884 unsigned RAGreedy::trySpillInterferences(LiveInterval &VirtReg,
885 AllocationOrder &Order,
886 SmallVectorImpl<LiveInterval*> &NewVRegs) {
887 NamedRegionTimer T("Spill Interference", TimerGroupName, TimePassesIsEnabled);
888 unsigned BestPhys = 0;
889 float BestWeight = 0;
892 while (unsigned PhysReg = Order.next()) {
893 float Weight = calcInterferenceWeight(VirtReg, PhysReg);
894 if (Weight == HUGE_VALF || Weight >= VirtReg.weight)
896 if (!BestPhys || Weight < BestWeight)
897 BestPhys = PhysReg, BestWeight = Weight;
900 // No candidates found.
904 // Collect all interfering registers.
905 SmallVector<LiveInterval*, 8> Spills;
906 for (const unsigned *AI = TRI->getOverlaps(BestPhys); *AI; ++AI) {
907 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
908 Spills.append(Q.interferingVRegs().begin(), Q.interferingVRegs().end());
909 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
910 LiveInterval *VReg = Q.interferingVRegs()[i];
911 unassign(*VReg, *AI);
916 DEBUG(dbgs() << "spilling " << Spills.size() << " interferences with weight "
917 << BestWeight << '\n');
918 for (unsigned i = 0, e = Spills.size(); i != e; ++i)
919 spiller().spill(Spills[i], NewVRegs, Spills);
924 //===----------------------------------------------------------------------===//
926 //===----------------------------------------------------------------------===//
928 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
929 SmallVectorImpl<LiveInterval*> &NewVRegs) {
930 // First try assigning a free register.
931 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
932 while (unsigned PhysReg = Order.next()) {
933 if (!checkPhysRegInterference(VirtReg, PhysReg))
937 // Try to reassign interferences.
938 if (unsigned PhysReg = tryReassignOrEvict(VirtReg, Order, NewVRegs))
941 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
943 // Try splitting VirtReg or interferences.
944 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
945 if (PhysReg || !NewVRegs.empty())
948 // Try to spill another interfering reg with less spill weight.
949 PhysReg = trySpillInterferences(VirtReg, Order, NewVRegs);
953 // Finally spill VirtReg itself.
954 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
955 SmallVector<LiveInterval*, 1> pendingSpills;
956 spiller().spill(&VirtReg, NewVRegs, pendingSpills);
958 // The live virtual register requesting allocation was spilled, so tell
959 // the caller not to allocate anything during this round.
963 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
964 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
965 << "********** Function: "
966 << ((Value*)mf.getFunction())->getName() << '\n');
970 MF->verify(this, "Before greedy register allocator");
972 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
973 Indexes = &getAnalysis<SlotIndexes>();
974 DomTree = &getAnalysis<MachineDominatorTree>();
975 ReservedRegs = TRI->getReservedRegs(*MF);
976 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
977 Loops = &getAnalysis<MachineLoopInfo>();
978 LoopRanges = &getAnalysis<MachineLoopRanges>();
979 Bundles = &getAnalysis<EdgeBundles>();
980 SpillPlacer = &getAnalysis<SpillPlacement>();
982 SA.reset(new SplitAnalysis(*MF, *LIS, *Loops));
990 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
991 std::auto_ptr<VirtRegRewriter> rewriter(createVirtRegRewriter());
992 rewriter->runOnMachineFunction(*MF, *VRM, LIS);
995 // The pass output is in VirtRegMap. Release all the transient data.