1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "llvm/CodeGen/Passes.h"
17 #include "AllocationOrder.h"
18 #include "InterferenceCache.h"
19 #include "LiveDebugVariables.h"
20 #include "RegAllocBase.h"
21 #include "SpillPlacement.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/CodeGen/CalcSpillWeights.h"
27 #include "llvm/CodeGen/EdgeBundles.h"
28 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
29 #include "llvm/CodeGen/LiveRangeEdit.h"
30 #include "llvm/CodeGen/LiveRegMatrix.h"
31 #include "llvm/CodeGen/LiveStackAnalysis.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/RegAllocRegistry.h"
37 #include "llvm/CodeGen/VirtRegMap.h"
38 #include "llvm/PassAnalysisSupport.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/Timer.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetOptions.h"
49 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
50 STATISTIC(NumLocalSplits, "Number of split local live ranges");
51 STATISTIC(NumEvicted, "Number of interferences evicted");
53 static cl::opt<SplitEditor::ComplementSpillMode>
54 SplitSpillMode("split-spill-mode", cl::Hidden,
55 cl::desc("Spill mode for splitting live ranges"),
56 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
57 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
58 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
60 cl::init(SplitEditor::SM_Partition));
62 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
63 createGreedyRegisterAllocator);
66 class RAGreedy : public MachineFunctionPass,
68 private LiveRangeEdit::Delegate {
75 MachineDominatorTree *DomTree;
76 MachineLoopInfo *Loops;
78 SpillPlacement *SpillPlacer;
79 LiveDebugVariables *DebugVars;
82 std::auto_ptr<Spiller> SpillerInstance;
83 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
86 // Live ranges pass through a number of stages as we try to allocate them.
87 // Some of the stages may also create new live ranges:
89 // - Region splitting.
90 // - Per-block splitting.
94 // Ranges produced by one of the stages skip the previous stages when they are
95 // dequeued. This improves performance because we can skip interference checks
96 // that are unlikely to give any results. It also guarantees that the live
97 // range splitting algorithm terminates, something that is otherwise hard to
100 /// Newly created live range that has never been queued.
103 /// Only attempt assignment and eviction. Then requeue as RS_Split.
106 /// Attempt live range splitting if assignment is impossible.
109 /// Attempt more aggressive live range splitting that is guaranteed to make
110 /// progress. This is used for split products that may not be making
114 /// Live range will be spilled. No more splitting will be attempted.
117 /// There is nothing more we can do to this live range. Abort compilation
118 /// if it can't be assigned.
122 static const char *const StageName[];
124 // RegInfo - Keep additional information about each live range.
126 LiveRangeStage Stage;
128 // Cascade - Eviction loop prevention. See canEvictInterference().
131 RegInfo() : Stage(RS_New), Cascade(0) {}
134 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
136 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
137 return ExtraRegInfo[VirtReg.reg].Stage;
140 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
141 ExtraRegInfo.resize(MRI->getNumVirtRegs());
142 ExtraRegInfo[VirtReg.reg].Stage = Stage;
145 template<typename Iterator>
146 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
147 ExtraRegInfo.resize(MRI->getNumVirtRegs());
148 for (;Begin != End; ++Begin) {
149 unsigned Reg = (*Begin)->reg;
150 if (ExtraRegInfo[Reg].Stage == RS_New)
151 ExtraRegInfo[Reg].Stage = NewStage;
155 /// Cost of evicting interference.
156 struct EvictionCost {
157 unsigned BrokenHints; ///< Total number of broken hints.
158 float MaxWeight; ///< Maximum spill weight evicted.
160 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
162 bool operator<(const EvictionCost &O) const {
163 if (BrokenHints != O.BrokenHints)
164 return BrokenHints < O.BrokenHints;
165 return MaxWeight < O.MaxWeight;
170 std::auto_ptr<SplitAnalysis> SA;
171 std::auto_ptr<SplitEditor> SE;
173 /// Cached per-block interference maps
174 InterferenceCache IntfCache;
176 /// All basic blocks where the current register has uses.
177 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
179 /// Global live range splitting candidate info.
180 struct GlobalSplitCandidate {
181 // Register intended for assignment, or 0.
184 // SplitKit interval index for this candidate.
187 // Interference for PhysReg.
188 InterferenceCache::Cursor Intf;
190 // Bundles where this candidate should be live.
191 BitVector LiveBundles;
192 SmallVector<unsigned, 8> ActiveBlocks;
194 void reset(InterferenceCache &Cache, unsigned Reg) {
197 Intf.setPhysReg(Cache, Reg);
199 ActiveBlocks.clear();
202 // Set B[i] = C for every live bundle where B[i] was NoCand.
203 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
205 for (int i = LiveBundles.find_first(); i >= 0;
206 i = LiveBundles.find_next(i))
207 if (B[i] == NoCand) {
215 /// Candidate info for for each PhysReg in AllocationOrder.
216 /// This vector never shrinks, but grows to the size of the largest register
218 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
220 enum { NoCand = ~0u };
222 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
223 /// NoCand which indicates the stack interval.
224 SmallVector<unsigned, 32> BundleCand;
229 /// Return the pass name.
230 virtual const char* getPassName() const {
231 return "Greedy Register Allocator";
234 /// RAGreedy analysis usage.
235 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
236 virtual void releaseMemory();
237 virtual Spiller &spiller() { return *SpillerInstance; }
238 virtual void enqueue(LiveInterval *LI);
239 virtual LiveInterval *dequeue();
240 virtual unsigned selectOrSplit(LiveInterval&,
241 SmallVectorImpl<LiveInterval*>&);
243 /// Perform register allocation.
244 virtual bool runOnMachineFunction(MachineFunction &mf);
249 bool LRE_CanEraseVirtReg(unsigned);
250 void LRE_WillShrinkVirtReg(unsigned);
251 void LRE_DidCloneVirtReg(unsigned, unsigned);
253 float calcSpillCost();
254 bool addSplitConstraints(InterferenceCache::Cursor, float&);
255 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
256 void growRegion(GlobalSplitCandidate &Cand);
257 float calcGlobalSplitCost(GlobalSplitCandidate&);
258 bool calcCompactRegion(GlobalSplitCandidate&);
259 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
260 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
261 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
262 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
263 void evictInterference(LiveInterval&, unsigned,
264 SmallVectorImpl<LiveInterval*>&);
266 unsigned tryAssign(LiveInterval&, AllocationOrder&,
267 SmallVectorImpl<LiveInterval*>&);
268 unsigned tryEvict(LiveInterval&, AllocationOrder&,
269 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
270 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
271 SmallVectorImpl<LiveInterval*>&);
272 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
273 SmallVectorImpl<LiveInterval*>&);
274 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
275 SmallVectorImpl<LiveInterval*>&);
276 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
277 SmallVectorImpl<LiveInterval*>&);
278 unsigned trySplit(LiveInterval&, AllocationOrder&,
279 SmallVectorImpl<LiveInterval*>&);
281 } // end anonymous namespace
283 char RAGreedy::ID = 0;
286 const char *const RAGreedy::StageName[] = {
296 // Hysteresis to use when comparing floats.
297 // This helps stabilize decisions based on float comparisons.
298 const float Hysteresis = 0.98f;
301 FunctionPass* llvm::createGreedyRegisterAllocator() {
302 return new RAGreedy();
305 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
306 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
307 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
308 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
309 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
310 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
311 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
312 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
313 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
314 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
315 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
316 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
317 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
318 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
319 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
322 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
323 AU.setPreservesCFG();
324 AU.addRequired<AliasAnalysis>();
325 AU.addPreserved<AliasAnalysis>();
326 AU.addRequired<LiveIntervals>();
327 AU.addPreserved<LiveIntervals>();
328 AU.addRequired<SlotIndexes>();
329 AU.addPreserved<SlotIndexes>();
330 AU.addRequired<LiveDebugVariables>();
331 AU.addPreserved<LiveDebugVariables>();
332 AU.addRequired<LiveStacks>();
333 AU.addPreserved<LiveStacks>();
334 AU.addRequired<CalculateSpillWeights>();
335 AU.addRequired<MachineDominatorTree>();
336 AU.addPreserved<MachineDominatorTree>();
337 AU.addRequired<MachineLoopInfo>();
338 AU.addPreserved<MachineLoopInfo>();
339 AU.addRequired<VirtRegMap>();
340 AU.addPreserved<VirtRegMap>();
341 AU.addRequired<LiveRegMatrix>();
342 AU.addPreserved<LiveRegMatrix>();
343 AU.addRequired<EdgeBundles>();
344 AU.addRequired<SpillPlacement>();
345 MachineFunctionPass::getAnalysisUsage(AU);
349 //===----------------------------------------------------------------------===//
350 // LiveRangeEdit delegate methods
351 //===----------------------------------------------------------------------===//
353 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
354 if (VRM->hasPhys(VirtReg)) {
355 Matrix->unassign(LIS->getInterval(VirtReg));
358 // Unassigned virtreg is probably in the priority queue.
359 // RegAllocBase will erase it after dequeueing.
363 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
364 if (!VRM->hasPhys(VirtReg))
367 // Register is assigned, put it back on the queue for reassignment.
368 LiveInterval &LI = LIS->getInterval(VirtReg);
369 Matrix->unassign(LI);
373 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
374 // Cloning a register we haven't even heard about yet? Just ignore it.
375 if (!ExtraRegInfo.inBounds(Old))
378 // LRE may clone a virtual register because dead code elimination causes it to
379 // be split into connected components. The new components are much smaller
380 // than the original, so they should get a new chance at being assigned.
381 // same stage as the parent.
382 ExtraRegInfo[Old].Stage = RS_Assign;
383 ExtraRegInfo.grow(New);
384 ExtraRegInfo[New] = ExtraRegInfo[Old];
387 void RAGreedy::releaseMemory() {
388 SpillerInstance.reset(0);
389 ExtraRegInfo.clear();
393 void RAGreedy::enqueue(LiveInterval *LI) {
394 // Prioritize live ranges by size, assigning larger ranges first.
395 // The queue holds (size, reg) pairs.
396 const unsigned Size = LI->getSize();
397 const unsigned Reg = LI->reg;
398 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
399 "Can only enqueue virtual registers");
402 ExtraRegInfo.grow(Reg);
403 if (ExtraRegInfo[Reg].Stage == RS_New)
404 ExtraRegInfo[Reg].Stage = RS_Assign;
406 if (ExtraRegInfo[Reg].Stage == RS_Split) {
407 // Unsplit ranges that couldn't be allocated immediately are deferred until
408 // everything else has been allocated.
411 // Everything is allocated in long->short order. Long ranges that don't fit
412 // should be spilled (or split) ASAP so they don't create interference.
413 Prio = (1u << 31) + Size;
415 // Boost ranges that have a physical register hint.
416 if (VRM->hasKnownPreference(Reg))
420 Queue.push(std::make_pair(Prio, ~Reg));
423 LiveInterval *RAGreedy::dequeue() {
426 LiveInterval *LI = &LIS->getInterval(~Queue.top().second);
432 //===----------------------------------------------------------------------===//
434 //===----------------------------------------------------------------------===//
436 /// tryAssign - Try to assign VirtReg to an available register.
437 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
438 AllocationOrder &Order,
439 SmallVectorImpl<LiveInterval*> &NewVRegs) {
442 while ((PhysReg = Order.next()))
443 if (!Matrix->checkInterference(VirtReg, PhysReg))
445 if (!PhysReg || Order.isHint())
448 // PhysReg is available, but there may be a better choice.
450 // If we missed a simple hint, try to cheaply evict interference from the
451 // preferred register.
452 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
453 if (Order.isHint(Hint)) {
454 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
455 EvictionCost MaxCost(1);
456 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
457 evictInterference(VirtReg, Hint, NewVRegs);
462 // Try to evict interference from a cheaper alternative.
463 unsigned Cost = TRI->getCostPerUse(PhysReg);
465 // Most registers have 0 additional cost.
469 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
471 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
472 return CheapReg ? CheapReg : PhysReg;
476 //===----------------------------------------------------------------------===//
477 // Interference eviction
478 //===----------------------------------------------------------------------===//
480 /// shouldEvict - determine if A should evict the assigned live range B. The
481 /// eviction policy defined by this function together with the allocation order
482 /// defined by enqueue() decides which registers ultimately end up being split
485 /// Cascade numbers are used to prevent infinite loops if this function is a
488 /// @param A The live range to be assigned.
489 /// @param IsHint True when A is about to be assigned to its preferred
491 /// @param B The live range to be evicted.
492 /// @param BreaksHint True when B is already assigned to its preferred register.
493 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
494 LiveInterval &B, bool BreaksHint) {
495 bool CanSplit = getStage(B) < RS_Spill;
497 // Be fairly aggressive about following hints as long as the evictee can be
499 if (CanSplit && IsHint && !BreaksHint)
502 return A.weight > B.weight;
505 /// canEvictInterference - Return true if all interferences between VirtReg and
506 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
508 /// @param VirtReg Live range that is about to be assigned.
509 /// @param PhysReg Desired register for assignment.
510 /// @param IsHint True when PhysReg is VirtReg's preferred register.
511 /// @param MaxCost Only look for cheaper candidates and update with new cost
512 /// when returning true.
513 /// @returns True when interference can be evicted cheaper than MaxCost.
514 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
515 bool IsHint, EvictionCost &MaxCost) {
516 // It is only possible to evict virtual register interference.
517 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
520 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
521 // involved in an eviction before. If a cascade number was assigned, deny
522 // evicting anything with the same or a newer cascade number. This prevents
523 // infinite eviction loops.
525 // This works out so a register without a cascade number is allowed to evict
526 // anything, and it can be evicted by anything.
527 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
529 Cascade = NextCascade;
532 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
533 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
534 // If there is 10 or more interferences, chances are one is heavier.
535 if (Q.collectInterferingVRegs(10) >= 10)
538 // Check if any interfering live range is heavier than MaxWeight.
539 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
540 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
541 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
542 "Only expecting virtual register interference from query");
543 // Never evict spill products. They cannot split or spill.
544 if (getStage(*Intf) == RS_Done)
546 // Once a live range becomes small enough, it is urgent that we find a
547 // register for it. This is indicated by an infinite spill weight. These
548 // urgent live ranges get to evict almost anything.
550 // Also allow urgent evictions of unspillable ranges from a strictly
551 // larger allocation order.
552 bool Urgent = !VirtReg.isSpillable() &&
553 (Intf->isSpillable() ||
554 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
555 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
556 // Only evict older cascades or live ranges without a cascade.
557 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
558 if (Cascade <= IntfCascade) {
561 // We permit breaking cascades for urgent evictions. It should be the
562 // last resort, though, so make it really expensive.
563 Cost.BrokenHints += 10;
565 // Would this break a satisfied hint?
566 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
567 // Update eviction cost.
568 Cost.BrokenHints += BreaksHint;
569 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
570 // Abort if this would be too expensive.
571 if (!(Cost < MaxCost))
573 // Finally, apply the eviction policy for non-urgent evictions.
574 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
582 /// evictInterference - Evict any interferring registers that prevent VirtReg
583 /// from being assigned to Physreg. This assumes that canEvictInterference
585 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
586 SmallVectorImpl<LiveInterval*> &NewVRegs) {
587 // Make sure that VirtReg has a cascade number, and assign that cascade
588 // number to every evicted register. These live ranges than then only be
589 // evicted by a newer cascade, preventing infinite loops.
590 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
592 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
594 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
595 << " interference: Cascade " << Cascade << '\n');
597 // Collect all interfering virtregs first.
598 SmallVector<LiveInterval*, 8> Intfs;
599 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
600 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
601 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
602 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
603 Intfs.append(IVR.begin(), IVR.end());
606 // Evict them second. This will invalidate the queries.
607 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
608 LiveInterval *Intf = Intfs[i];
609 // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
610 if (!VRM->hasPhys(Intf->reg))
612 Matrix->unassign(*Intf);
613 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
614 VirtReg.isSpillable() < Intf->isSpillable()) &&
615 "Cannot decrease cascade number, illegal eviction");
616 ExtraRegInfo[Intf->reg].Cascade = Cascade;
618 NewVRegs.push_back(Intf);
622 /// tryEvict - Try to evict all interferences for a physreg.
623 /// @param VirtReg Currently unassigned virtual register.
624 /// @param Order Physregs to try.
625 /// @return Physreg to assign VirtReg, or 0.
626 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
627 AllocationOrder &Order,
628 SmallVectorImpl<LiveInterval*> &NewVRegs,
629 unsigned CostPerUseLimit) {
630 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
632 // Keep track of the cheapest interference seen so far.
633 EvictionCost BestCost(~0u);
634 unsigned BestPhys = 0;
636 // When we are just looking for a reduced cost per use, don't break any
637 // hints, and only evict smaller spill weights.
638 if (CostPerUseLimit < ~0u) {
639 BestCost.BrokenHints = 0;
640 BestCost.MaxWeight = VirtReg.weight;
644 while (unsigned PhysReg = Order.next()) {
645 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
647 // The first use of a callee-saved register in a function has cost 1.
648 // Don't start using a CSR when the CostPerUseLimit is low.
649 if (CostPerUseLimit == 1)
650 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
651 if (!MRI->isPhysRegUsed(CSR)) {
652 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
653 << PrintReg(CSR, TRI) << '\n');
657 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
663 // Stop if the hint can be used.
671 evictInterference(VirtReg, BestPhys, NewVRegs);
676 //===----------------------------------------------------------------------===//
678 //===----------------------------------------------------------------------===//
680 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
681 /// interference pattern in Physreg and its aliases. Add the constraints to
682 /// SpillPlacement and return the static cost of this split in Cost, assuming
683 /// that all preferences in SplitConstraints are met.
684 /// Return false if there are no bundles with positive bias.
685 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
687 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
689 // Reset interference dependent info.
690 SplitConstraints.resize(UseBlocks.size());
691 float StaticCost = 0;
692 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
693 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
694 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
696 BC.Number = BI.MBB->getNumber();
697 Intf.moveToBlock(BC.Number);
698 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
699 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
700 BC.ChangesValue = BI.FirstDef;
702 if (!Intf.hasInterference())
705 // Number of spill code instructions to insert.
708 // Interference for the live-in value.
710 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
711 BC.Entry = SpillPlacement::MustSpill, ++Ins;
712 else if (Intf.first() < BI.FirstInstr)
713 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
714 else if (Intf.first() < BI.LastInstr)
718 // Interference for the live-out value.
720 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
721 BC.Exit = SpillPlacement::MustSpill, ++Ins;
722 else if (Intf.last() > BI.LastInstr)
723 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
724 else if (Intf.last() > BI.FirstInstr)
728 // Accumulate the total frequency of inserted spill code.
730 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
734 // Add constraints for use-blocks. Note that these are the only constraints
735 // that may add a positive bias, it is downhill from here.
736 SpillPlacer->addConstraints(SplitConstraints);
737 return SpillPlacer->scanActiveBundles();
741 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
742 /// live-through blocks in Blocks.
743 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
744 ArrayRef<unsigned> Blocks) {
745 const unsigned GroupSize = 8;
746 SpillPlacement::BlockConstraint BCS[GroupSize];
747 unsigned TBS[GroupSize];
748 unsigned B = 0, T = 0;
750 for (unsigned i = 0; i != Blocks.size(); ++i) {
751 unsigned Number = Blocks[i];
752 Intf.moveToBlock(Number);
754 if (!Intf.hasInterference()) {
755 assert(T < GroupSize && "Array overflow");
757 if (++T == GroupSize) {
758 SpillPlacer->addLinks(makeArrayRef(TBS, T));
764 assert(B < GroupSize && "Array overflow");
765 BCS[B].Number = Number;
767 // Interference for the live-in value.
768 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
769 BCS[B].Entry = SpillPlacement::MustSpill;
771 BCS[B].Entry = SpillPlacement::PrefSpill;
773 // Interference for the live-out value.
774 if (Intf.last() >= SA->getLastSplitPoint(Number))
775 BCS[B].Exit = SpillPlacement::MustSpill;
777 BCS[B].Exit = SpillPlacement::PrefSpill;
779 if (++B == GroupSize) {
780 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
781 SpillPlacer->addConstraints(Array);
786 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
787 SpillPlacer->addConstraints(Array);
788 SpillPlacer->addLinks(makeArrayRef(TBS, T));
791 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
792 // Keep track of through blocks that have not been added to SpillPlacer.
793 BitVector Todo = SA->getThroughBlocks();
794 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
795 unsigned AddedTo = 0;
797 unsigned Visited = 0;
801 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
802 // Find new through blocks in the periphery of PrefRegBundles.
803 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
804 unsigned Bundle = NewBundles[i];
805 // Look at all blocks connected to Bundle in the full graph.
806 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
807 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
810 if (!Todo.test(Block))
813 // This is a new through block. Add it to SpillPlacer later.
814 ActiveBlocks.push_back(Block);
820 // Any new blocks to add?
821 if (ActiveBlocks.size() == AddedTo)
824 // Compute through constraints from the interference, or assume that all
825 // through blocks prefer spilling when forming compact regions.
826 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
828 addThroughConstraints(Cand.Intf, NewBlocks);
830 // Provide a strong negative bias on through blocks to prevent unwanted
831 // liveness on loop backedges.
832 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
833 AddedTo = ActiveBlocks.size();
835 // Perhaps iterating can enable more bundles?
836 SpillPlacer->iterate();
838 DEBUG(dbgs() << ", v=" << Visited);
841 /// calcCompactRegion - Compute the set of edge bundles that should be live
842 /// when splitting the current live range into compact regions. Compact
843 /// regions can be computed without looking at interference. They are the
844 /// regions formed by removing all the live-through blocks from the live range.
846 /// Returns false if the current live range is already compact, or if the
847 /// compact regions would form single block regions anyway.
848 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
849 // Without any through blocks, the live range is already compact.
850 if (!SA->getNumThroughBlocks())
853 // Compact regions don't correspond to any physreg.
854 Cand.reset(IntfCache, 0);
856 DEBUG(dbgs() << "Compact region bundles");
858 // Use the spill placer to determine the live bundles. GrowRegion pretends
859 // that all the through blocks have interference when PhysReg is unset.
860 SpillPlacer->prepare(Cand.LiveBundles);
862 // The static split cost will be zero since Cand.Intf reports no interference.
864 if (!addSplitConstraints(Cand.Intf, Cost)) {
865 DEBUG(dbgs() << ", none.\n");
870 SpillPlacer->finish();
872 if (!Cand.LiveBundles.any()) {
873 DEBUG(dbgs() << ", none.\n");
878 for (int i = Cand.LiveBundles.find_first(); i>=0;
879 i = Cand.LiveBundles.find_next(i))
880 dbgs() << " EB#" << i;
886 /// calcSpillCost - Compute how expensive it would be to split the live range in
887 /// SA around all use blocks instead of forming bundle regions.
888 float RAGreedy::calcSpillCost() {
890 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
891 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
892 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
893 unsigned Number = BI.MBB->getNumber();
894 // We normally only need one spill instruction - a load or a store.
895 Cost += SpillPlacer->getBlockFrequency(Number);
897 // Unless the value is redefined in the block.
898 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
899 Cost += SpillPlacer->getBlockFrequency(Number);
904 /// calcGlobalSplitCost - Return the global split cost of following the split
905 /// pattern in LiveBundles. This cost should be added to the local cost of the
906 /// interference pattern in SplitConstraints.
908 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
909 float GlobalCost = 0;
910 const BitVector &LiveBundles = Cand.LiveBundles;
911 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
912 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
913 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
914 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
915 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
916 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
920 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
922 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
924 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
927 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
928 unsigned Number = Cand.ActiveBlocks[i];
929 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
930 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
931 if (!RegIn && !RegOut)
933 if (RegIn && RegOut) {
934 // We need double spill code if this block has interference.
935 Cand.Intf.moveToBlock(Number);
936 if (Cand.Intf.hasInterference())
937 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
940 // live-in / stack-out or stack-in live-out.
941 GlobalCost += SpillPlacer->getBlockFrequency(Number);
946 /// splitAroundRegion - Split the current live range around the regions
947 /// determined by BundleCand and GlobalCand.
949 /// Before calling this function, GlobalCand and BundleCand must be initialized
950 /// so each bundle is assigned to a valid candidate, or NoCand for the
951 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
952 /// objects must be initialized for the current live range, and intervals
953 /// created for the used candidates.
955 /// @param LREdit The LiveRangeEdit object handling the current split.
956 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
957 /// must appear in this list.
958 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
959 ArrayRef<unsigned> UsedCands) {
960 // These are the intervals created for new global ranges. We may create more
961 // intervals for local ranges.
962 const unsigned NumGlobalIntvs = LREdit.size();
963 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
964 assert(NumGlobalIntvs && "No global intervals configured");
966 // Isolate even single instructions when dealing with a proper sub-class.
967 // That guarantees register class inflation for the stack interval because it
969 unsigned Reg = SA->getParent().reg;
970 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
972 // First handle all the blocks with uses.
973 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
974 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
975 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
976 unsigned Number = BI.MBB->getNumber();
977 unsigned IntvIn = 0, IntvOut = 0;
978 SlotIndex IntfIn, IntfOut;
980 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
981 if (CandIn != NoCand) {
982 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
983 IntvIn = Cand.IntvIdx;
984 Cand.Intf.moveToBlock(Number);
985 IntfIn = Cand.Intf.first();
989 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
990 if (CandOut != NoCand) {
991 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
992 IntvOut = Cand.IntvIdx;
993 Cand.Intf.moveToBlock(Number);
994 IntfOut = Cand.Intf.last();
998 // Create separate intervals for isolated blocks with multiple uses.
999 if (!IntvIn && !IntvOut) {
1000 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
1001 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1002 SE->splitSingleBlock(BI);
1006 if (IntvIn && IntvOut)
1007 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1009 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1011 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1014 // Handle live-through blocks. The relevant live-through blocks are stored in
1015 // the ActiveBlocks list with each candidate. We need to filter out
1017 BitVector Todo = SA->getThroughBlocks();
1018 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1019 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1020 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1021 unsigned Number = Blocks[i];
1022 if (!Todo.test(Number))
1026 unsigned IntvIn = 0, IntvOut = 0;
1027 SlotIndex IntfIn, IntfOut;
1029 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1030 if (CandIn != NoCand) {
1031 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1032 IntvIn = Cand.IntvIdx;
1033 Cand.Intf.moveToBlock(Number);
1034 IntfIn = Cand.Intf.first();
1037 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1038 if (CandOut != NoCand) {
1039 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1040 IntvOut = Cand.IntvIdx;
1041 Cand.Intf.moveToBlock(Number);
1042 IntfOut = Cand.Intf.last();
1044 if (!IntvIn && !IntvOut)
1046 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1052 SmallVector<unsigned, 8> IntvMap;
1053 SE->finish(&IntvMap);
1054 DebugVars->splitRegister(Reg, LREdit.regs());
1056 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1057 unsigned OrigBlocks = SA->getNumLiveBlocks();
1059 // Sort out the new intervals created by splitting. We get four kinds:
1060 // - Remainder intervals should not be split again.
1061 // - Candidate intervals can be assigned to Cand.PhysReg.
1062 // - Block-local splits are candidates for local splitting.
1063 // - DCE leftovers should go back on the queue.
1064 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1065 LiveInterval &Reg = *LREdit.get(i);
1067 // Ignore old intervals from DCE.
1068 if (getStage(Reg) != RS_New)
1071 // Remainder interval. Don't try splitting again, spill if it doesn't
1073 if (IntvMap[i] == 0) {
1074 setStage(Reg, RS_Spill);
1078 // Global intervals. Allow repeated splitting as long as the number of live
1079 // blocks is strictly decreasing.
1080 if (IntvMap[i] < NumGlobalIntvs) {
1081 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1082 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1083 << " blocks as original.\n");
1084 // Don't allow repeated splitting as a safe guard against looping.
1085 setStage(Reg, RS_Split2);
1090 // Other intervals are treated as new. This includes local intervals created
1091 // for blocks with multiple uses, and anything created by DCE.
1095 MF->verify(this, "After splitting live range around region");
1098 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1099 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1100 unsigned NumCands = 0;
1101 unsigned BestCand = NoCand;
1103 SmallVector<unsigned, 8> UsedCands;
1105 // Check if we can split this live range around a compact region.
1106 bool HasCompact = calcCompactRegion(GlobalCand.front());
1108 // Yes, keep GlobalCand[0] as the compact region candidate.
1110 BestCost = HUGE_VALF;
1112 // No benefit from the compact region, our fallback will be per-block
1113 // splitting. Make sure we find a solution that is cheaper than spilling.
1114 BestCost = Hysteresis * calcSpillCost();
1115 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1119 while (unsigned PhysReg = Order.next()) {
1120 // Discard bad candidates before we run out of interference cache cursors.
1121 // This will only affect register classes with a lot of registers (>32).
1122 if (NumCands == IntfCache.getMaxCursors()) {
1123 unsigned WorstCount = ~0u;
1125 for (unsigned i = 0; i != NumCands; ++i) {
1126 if (i == BestCand || !GlobalCand[i].PhysReg)
1128 unsigned Count = GlobalCand[i].LiveBundles.count();
1129 if (Count < WorstCount)
1130 Worst = i, WorstCount = Count;
1133 GlobalCand[Worst] = GlobalCand[NumCands];
1134 if (BestCand == NumCands)
1138 if (GlobalCand.size() <= NumCands)
1139 GlobalCand.resize(NumCands+1);
1140 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1141 Cand.reset(IntfCache, PhysReg);
1143 SpillPlacer->prepare(Cand.LiveBundles);
1145 if (!addSplitConstraints(Cand.Intf, Cost)) {
1146 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1149 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1150 if (Cost >= BestCost) {
1152 if (BestCand == NoCand)
1153 dbgs() << " worse than no bundles\n";
1155 dbgs() << " worse than "
1156 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1162 SpillPlacer->finish();
1164 // No live bundles, defer to splitSingleBlocks().
1165 if (!Cand.LiveBundles.any()) {
1166 DEBUG(dbgs() << " no bundles.\n");
1170 Cost += calcGlobalSplitCost(Cand);
1172 dbgs() << ", total = " << Cost << " with bundles";
1173 for (int i = Cand.LiveBundles.find_first(); i>=0;
1174 i = Cand.LiveBundles.find_next(i))
1175 dbgs() << " EB#" << i;
1178 if (Cost < BestCost) {
1179 BestCand = NumCands;
1180 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1185 // No solutions found, fall back to single block splitting.
1186 if (!HasCompact && BestCand == NoCand)
1189 // Prepare split editor.
1190 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1191 SE->reset(LREdit, SplitSpillMode);
1193 // Assign all edge bundles to the preferred candidate, or NoCand.
1194 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1196 // Assign bundles for the best candidate region.
1197 if (BestCand != NoCand) {
1198 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1199 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1200 UsedCands.push_back(BestCand);
1201 Cand.IntvIdx = SE->openIntv();
1202 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
1203 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1208 // Assign bundles for the compact region.
1210 GlobalSplitCandidate &Cand = GlobalCand.front();
1211 assert(!Cand.PhysReg && "Compact region has no physreg");
1212 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1213 UsedCands.push_back(0);
1214 Cand.IntvIdx = SE->openIntv();
1215 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
1216 << Cand.IntvIdx << ".\n");
1221 splitAroundRegion(LREdit, UsedCands);
1226 //===----------------------------------------------------------------------===//
1227 // Per-Block Splitting
1228 //===----------------------------------------------------------------------===//
1230 /// tryBlockSplit - Split a global live range around every block with uses. This
1231 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
1232 /// they don't allocate.
1233 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1234 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1235 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1236 unsigned Reg = VirtReg.reg;
1237 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1238 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1239 SE->reset(LREdit, SplitSpillMode);
1240 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1241 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1242 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1243 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1244 SE->splitSingleBlock(BI);
1246 // No blocks were split.
1250 // We did split for some blocks.
1251 SmallVector<unsigned, 8> IntvMap;
1252 SE->finish(&IntvMap);
1254 // Tell LiveDebugVariables about the new ranges.
1255 DebugVars->splitRegister(Reg, LREdit.regs());
1257 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1259 // Sort out the new intervals created by splitting. The remainder interval
1260 // goes straight to spilling, the new local ranges get to stay RS_New.
1261 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1262 LiveInterval &LI = *LREdit.get(i);
1263 if (getStage(LI) == RS_New && IntvMap[i] == 0)
1264 setStage(LI, RS_Spill);
1268 MF->verify(this, "After splitting live range around basic blocks");
1273 //===----------------------------------------------------------------------===//
1274 // Per-Instruction Splitting
1275 //===----------------------------------------------------------------------===//
1277 /// tryInstructionSplit - Split a live range around individual instructions.
1278 /// This is normally not worthwhile since the spiller is doing essentially the
1279 /// same thing. However, when the live range is in a constrained register
1280 /// class, it may help to insert copies such that parts of the live range can
1281 /// be moved to a larger register class.
1283 /// This is similar to spilling to a larger register class.
1285 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1286 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1287 // There is no point to this if there are no larger sub-classes.
1288 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg)))
1291 // Always enable split spill mode, since we're effectively spilling to a
1293 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1294 SE->reset(LREdit, SplitEditor::SM_Size);
1296 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1297 if (Uses.size() <= 1)
1300 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
1302 // Split around every non-copy instruction.
1303 for (unsigned i = 0; i != Uses.size(); ++i) {
1304 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
1305 if (MI->isFullCopy()) {
1306 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
1310 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
1311 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
1312 SE->useIntv(SegStart, SegStop);
1315 if (LREdit.empty()) {
1316 DEBUG(dbgs() << "All uses were copies.\n");
1320 SmallVector<unsigned, 8> IntvMap;
1321 SE->finish(&IntvMap);
1322 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1323 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1325 // Assign all new registers to RS_Spill. This was the last chance.
1326 setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1331 //===----------------------------------------------------------------------===//
1333 //===----------------------------------------------------------------------===//
1336 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1337 /// in order to use PhysReg between two entries in SA->UseSlots.
1339 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1341 void RAGreedy::calcGapWeights(unsigned PhysReg,
1342 SmallVectorImpl<float> &GapWeight) {
1343 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1344 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1345 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1346 const unsigned NumGaps = Uses.size()-1;
1348 // Start and end points for the interference check.
1349 SlotIndex StartIdx =
1350 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1352 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
1354 GapWeight.assign(NumGaps, 0.0f);
1356 // Add interference from each overlapping register.
1357 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1358 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1359 .checkInterference())
1362 // We know that VirtReg is a continuous interval from FirstInstr to
1363 // LastInstr, so we don't need InterferenceQuery.
1365 // Interference that overlaps an instruction is counted in both gaps
1366 // surrounding the instruction. The exception is interference before
1367 // StartIdx and after StopIdx.
1369 LiveIntervalUnion::SegmentIter IntI =
1370 Matrix->getLiveUnions()[*Units] .find(StartIdx);
1371 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1372 // Skip the gaps before IntI.
1373 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1374 if (++Gap == NumGaps)
1379 // Update the gaps covered by IntI.
1380 const float weight = IntI.value()->weight;
1381 for (; Gap != NumGaps; ++Gap) {
1382 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1383 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1391 // Add fixed interference.
1392 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1393 const LiveInterval &LI = LIS->getRegUnit(*Units);
1394 LiveInterval::const_iterator I = LI.find(StartIdx);
1395 LiveInterval::const_iterator E = LI.end();
1397 // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
1398 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
1399 while (Uses[Gap+1].getBoundaryIndex() < I->start)
1400 if (++Gap == NumGaps)
1405 for (; Gap != NumGaps; ++Gap) {
1406 GapWeight[Gap] = HUGE_VALF;
1407 if (Uses[Gap+1].getBaseIndex() >= I->end)
1416 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1419 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1420 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1421 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1422 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1424 // Note that it is possible to have an interval that is live-in or live-out
1425 // while only covering a single block - A phi-def can use undef values from
1426 // predecessors, and the block could be a single-block loop.
1427 // We don't bother doing anything clever about such a case, we simply assume
1428 // that the interval is continuous from FirstInstr to LastInstr. We should
1429 // make sure that we don't do anything illegal to such an interval, though.
1431 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1432 if (Uses.size() <= 2)
1434 const unsigned NumGaps = Uses.size()-1;
1437 dbgs() << "tryLocalSplit: ";
1438 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1439 dbgs() << ' ' << Uses[i];
1443 // If VirtReg is live across any register mask operands, compute a list of
1444 // gaps with register masks.
1445 SmallVector<unsigned, 8> RegMaskGaps;
1446 if (Matrix->checkRegMaskInterference(VirtReg)) {
1447 // Get regmask slots for the whole block.
1448 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
1449 DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1450 // Constrain to VirtReg's live range.
1451 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
1452 Uses.front().getRegSlot()) - RMS.begin();
1453 unsigned re = RMS.size();
1454 for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
1455 // Look for Uses[i] <= RMS <= Uses[i+1].
1456 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1457 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
1459 // Skip a regmask on the same instruction as the last use. It doesn't
1460 // overlap the live range.
1461 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1463 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
1464 RegMaskGaps.push_back(i);
1465 // Advance ri to the next gap. A regmask on one of the uses counts in
1467 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1470 DEBUG(dbgs() << '\n');
1473 // Since we allow local split results to be split again, there is a risk of
1474 // creating infinite loops. It is tempting to require that the new live
1475 // ranges have less instructions than the original. That would guarantee
1476 // convergence, but it is too strict. A live range with 3 instructions can be
1477 // split 2+3 (including the COPY), and we want to allow that.
1479 // Instead we use these rules:
1481 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1482 // noop split, of course).
1483 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1484 // the new ranges must have fewer instructions than before the split.
1485 // 3. New ranges with the same number of instructions are marked RS_Split2,
1486 // smaller ranges are marked RS_New.
1488 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1489 // excessive splitting and infinite loops.
1491 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1493 // Best split candidate.
1494 unsigned BestBefore = NumGaps;
1495 unsigned BestAfter = 0;
1498 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1499 SmallVector<float, 8> GapWeight;
1502 while (unsigned PhysReg = Order.next()) {
1503 // Keep track of the largest spill weight that would need to be evicted in
1504 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1505 calcGapWeights(PhysReg, GapWeight);
1507 // Remove any gaps with regmask clobbers.
1508 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1509 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
1510 GapWeight[RegMaskGaps[i]] = HUGE_VALF;
1512 // Try to find the best sequence of gaps to close.
1513 // The new spill weight must be larger than any gap interference.
1515 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1516 unsigned SplitBefore = 0, SplitAfter = 1;
1518 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1519 // It is the spill weight that needs to be evicted.
1520 float MaxGap = GapWeight[0];
1523 // Live before/after split?
1524 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1525 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1527 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1528 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1529 << " i=" << MaxGap);
1531 // Stop before the interval gets so big we wouldn't be making progress.
1532 if (!LiveBefore && !LiveAfter) {
1533 DEBUG(dbgs() << " all\n");
1536 // Should the interval be extended or shrunk?
1539 // How many gaps would the new range have?
1540 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1542 // Legally, without causing looping?
1543 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1545 if (Legal && MaxGap < HUGE_VALF) {
1546 // Estimate the new spill weight. Each instruction reads or writes the
1547 // register. Conservatively assume there are no read-modify-write
1550 // Try to guess the size of the new interval.
1551 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1552 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1553 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1554 // Would this split be possible to allocate?
1555 // Never allocate all gaps, we wouldn't be making progress.
1556 DEBUG(dbgs() << " w=" << EstWeight);
1557 if (EstWeight * Hysteresis >= MaxGap) {
1559 float Diff = EstWeight - MaxGap;
1560 if (Diff > BestDiff) {
1561 DEBUG(dbgs() << " (best)");
1562 BestDiff = Hysteresis * Diff;
1563 BestBefore = SplitBefore;
1564 BestAfter = SplitAfter;
1571 if (++SplitBefore < SplitAfter) {
1572 DEBUG(dbgs() << " shrink\n");
1573 // Recompute the max when necessary.
1574 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1575 MaxGap = GapWeight[SplitBefore];
1576 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1577 MaxGap = std::max(MaxGap, GapWeight[i]);
1584 // Try to extend the interval.
1585 if (SplitAfter >= NumGaps) {
1586 DEBUG(dbgs() << " end\n");
1590 DEBUG(dbgs() << " extend\n");
1591 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1595 // Didn't find any candidates?
1596 if (BestBefore == NumGaps)
1599 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1600 << '-' << Uses[BestAfter] << ", " << BestDiff
1601 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1603 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1607 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1608 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1609 SE->useIntv(SegStart, SegStop);
1610 SmallVector<unsigned, 8> IntvMap;
1611 SE->finish(&IntvMap);
1612 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1614 // If the new range has the same number of instructions as before, mark it as
1615 // RS_Split2 so the next split will be forced to make progress. Otherwise,
1616 // leave the new intervals as RS_New so they can compete.
1617 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1618 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1619 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1620 if (NewGaps >= NumGaps) {
1621 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1622 assert(!ProgressRequired && "Didn't make progress when it was required.");
1623 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1624 if (IntvMap[i] == 1) {
1625 setStage(*LREdit.get(i), RS_Split2);
1626 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1628 DEBUG(dbgs() << '\n');
1635 //===----------------------------------------------------------------------===//
1636 // Live Range Splitting
1637 //===----------------------------------------------------------------------===//
1639 /// trySplit - Try to split VirtReg or one of its interferences, making it
1641 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1642 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1643 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1644 // Ranges must be Split2 or less.
1645 if (getStage(VirtReg) >= RS_Spill)
1648 // Local intervals are handled separately.
1649 if (LIS->intervalIsInOneMBB(VirtReg)) {
1650 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1651 SA->analyze(&VirtReg);
1652 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1653 if (PhysReg || !NewVRegs.empty())
1655 return tryInstructionSplit(VirtReg, Order, NewVRegs);
1658 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1660 SA->analyze(&VirtReg);
1662 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1663 // coalescer. That may cause the range to become allocatable which means that
1664 // tryRegionSplit won't be making progress. This check should be replaced with
1665 // an assertion when the coalescer is fixed.
1666 if (SA->didRepairRange()) {
1667 // VirtReg has changed, so all cached queries are invalid.
1668 Matrix->invalidateVirtRegs();
1669 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1673 // First try to split around a region spanning multiple blocks. RS_Split2
1674 // ranges already made dubious progress with region splitting, so they go
1675 // straight to single block splitting.
1676 if (getStage(VirtReg) < RS_Split2) {
1677 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1678 if (PhysReg || !NewVRegs.empty())
1682 // Then isolate blocks.
1683 return tryBlockSplit(VirtReg, Order, NewVRegs);
1687 //===----------------------------------------------------------------------===//
1689 //===----------------------------------------------------------------------===//
1691 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1692 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1693 // First try assigning a free register.
1694 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1695 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1698 LiveRangeStage Stage = getStage(VirtReg);
1699 DEBUG(dbgs() << StageName[Stage]
1700 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1702 // Try to evict a less worthy live range, but only for ranges from the primary
1703 // queue. The RS_Split ranges already failed to do this, and they should not
1704 // get a second chance until they have been split.
1705 if (Stage != RS_Split)
1706 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1709 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1711 // The first time we see a live range, don't try to split or spill.
1712 // Wait until the second time, when all smaller ranges have been allocated.
1713 // This gives a better picture of the interference to split around.
1714 if (Stage < RS_Split) {
1715 setStage(VirtReg, RS_Split);
1716 DEBUG(dbgs() << "wait for second round\n");
1717 NewVRegs.push_back(&VirtReg);
1721 // If we couldn't allocate a register from spilling, there is probably some
1722 // invalid inline assembly. The base class wil report it.
1723 if (Stage >= RS_Done || !VirtReg.isSpillable())
1726 // Try splitting VirtReg or interferences.
1727 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1728 if (PhysReg || !NewVRegs.empty())
1731 // Finally spill VirtReg itself.
1732 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1733 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1734 spiller().spill(LRE);
1735 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
1738 MF->verify(this, "After spilling");
1740 // The live virtual register requesting allocation was spilled, so tell
1741 // the caller not to allocate anything during this round.
1745 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1746 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1747 << "********** Function: " << mf.getName() << '\n');
1751 MF->verify(this, "Before greedy register allocator");
1753 RegAllocBase::init(getAnalysis<VirtRegMap>(),
1754 getAnalysis<LiveIntervals>(),
1755 getAnalysis<LiveRegMatrix>());
1756 Indexes = &getAnalysis<SlotIndexes>();
1757 DomTree = &getAnalysis<MachineDominatorTree>();
1758 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1759 Loops = &getAnalysis<MachineLoopInfo>();
1760 Bundles = &getAnalysis<EdgeBundles>();
1761 SpillPlacer = &getAnalysis<SpillPlacement>();
1762 DebugVars = &getAnalysis<LiveDebugVariables>();
1764 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1765 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1766 ExtraRegInfo.clear();
1767 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1769 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
1770 GlobalCand.resize(32); // This will grow as needed.