1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "RegAllocBase.h"
21 #include "SpillPlacement.h"
23 #include "VirtRegMap.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Function.h"
27 #include "llvm/PassAnalysisSupport.h"
28 #include "llvm/CodeGen/CalcSpillWeights.h"
29 #include "llvm/CodeGen/EdgeBundles.h"
30 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
31 #include "llvm/CodeGen/LiveRangeEdit.h"
32 #include "llvm/CodeGen/LiveStackAnalysis.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/CodeGen/RegAllocRegistry.h"
39 #include "llvm/Target/TargetOptions.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumEvicted, "Number of interferences evicted");
54 static cl::opt<SplitEditor::ComplementSpillMode>
55 SplitSpillMode("split-spill-mode", cl::Hidden,
56 cl::desc("Spill mode for splitting live ranges"),
57 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
58 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
59 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
61 cl::init(SplitEditor::SM_Partition));
63 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
64 createGreedyRegisterAllocator);
67 class RAGreedy : public MachineFunctionPass,
69 private LiveRangeEdit::Delegate {
77 MachineDominatorTree *DomTree;
78 MachineLoopInfo *Loops;
80 SpillPlacement *SpillPlacer;
81 LiveDebugVariables *DebugVars;
84 std::auto_ptr<Spiller> SpillerInstance;
85 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
88 // Live ranges pass through a number of stages as we try to allocate them.
89 // Some of the stages may also create new live ranges:
91 // - Region splitting.
92 // - Per-block splitting.
96 // Ranges produced by one of the stages skip the previous stages when they are
97 // dequeued. This improves performance because we can skip interference checks
98 // that are unlikely to give any results. It also guarantees that the live
99 // range splitting algorithm terminates, something that is otherwise hard to
101 enum LiveRangeStage {
102 /// Newly created live range that has never been queued.
105 /// Only attempt assignment and eviction. Then requeue as RS_Split.
108 /// Attempt live range splitting if assignment is impossible.
111 /// Attempt more aggressive live range splitting that is guaranteed to make
112 /// progress. This is used for split products that may not be making
116 /// Live range will be spilled. No more splitting will be attempted.
119 /// There is nothing more we can do to this live range. Abort compilation
120 /// if it can't be assigned.
124 static const char *const StageName[];
126 // RegInfo - Keep additional information about each live range.
128 LiveRangeStage Stage;
130 // Cascade - Eviction loop prevention. See canEvictInterference().
133 RegInfo() : Stage(RS_New), Cascade(0) {}
136 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
138 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
139 return ExtraRegInfo[VirtReg.reg].Stage;
142 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
143 ExtraRegInfo.resize(MRI->getNumVirtRegs());
144 ExtraRegInfo[VirtReg.reg].Stage = Stage;
147 template<typename Iterator>
148 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
149 ExtraRegInfo.resize(MRI->getNumVirtRegs());
150 for (;Begin != End; ++Begin) {
151 unsigned Reg = (*Begin)->reg;
152 if (ExtraRegInfo[Reg].Stage == RS_New)
153 ExtraRegInfo[Reg].Stage = NewStage;
157 /// Cost of evicting interference.
158 struct EvictionCost {
159 unsigned BrokenHints; ///< Total number of broken hints.
160 float MaxWeight; ///< Maximum spill weight evicted.
162 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
164 bool operator<(const EvictionCost &O) const {
165 if (BrokenHints != O.BrokenHints)
166 return BrokenHints < O.BrokenHints;
167 return MaxWeight < O.MaxWeight;
171 // Register mask interference. The current VirtReg is checked for register
172 // mask interference on entry to selectOrSplit(). If there is no
173 // interference, UsableRegs is left empty. If there is interference,
174 // UsableRegs has a bit mask of registers that can be used without register
175 // mask interference.
176 BitVector UsableRegs;
178 /// clobberedByRegMask - Returns true if PhysReg is not directly usable
179 /// because of register mask clobbers.
180 bool clobberedByRegMask(unsigned PhysReg) const {
181 return !UsableRegs.empty() && !UsableRegs.test(PhysReg);
185 std::auto_ptr<SplitAnalysis> SA;
186 std::auto_ptr<SplitEditor> SE;
188 /// Cached per-block interference maps
189 InterferenceCache IntfCache;
191 /// All basic blocks where the current register has uses.
192 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
194 /// Global live range splitting candidate info.
195 struct GlobalSplitCandidate {
196 // Register intended for assignment, or 0.
199 // SplitKit interval index for this candidate.
202 // Interference for PhysReg.
203 InterferenceCache::Cursor Intf;
205 // Bundles where this candidate should be live.
206 BitVector LiveBundles;
207 SmallVector<unsigned, 8> ActiveBlocks;
209 void reset(InterferenceCache &Cache, unsigned Reg) {
212 Intf.setPhysReg(Cache, Reg);
214 ActiveBlocks.clear();
217 // Set B[i] = C for every live bundle where B[i] was NoCand.
218 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
220 for (int i = LiveBundles.find_first(); i >= 0;
221 i = LiveBundles.find_next(i))
222 if (B[i] == NoCand) {
230 /// Candidate info for for each PhysReg in AllocationOrder.
231 /// This vector never shrinks, but grows to the size of the largest register
233 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
235 enum { NoCand = ~0u };
237 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
238 /// NoCand which indicates the stack interval.
239 SmallVector<unsigned, 32> BundleCand;
244 /// Return the pass name.
245 virtual const char* getPassName() const {
246 return "Greedy Register Allocator";
249 /// RAGreedy analysis usage.
250 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
251 virtual void releaseMemory();
252 virtual Spiller &spiller() { return *SpillerInstance; }
253 virtual void enqueue(LiveInterval *LI);
254 virtual LiveInterval *dequeue();
255 virtual unsigned selectOrSplit(LiveInterval&,
256 SmallVectorImpl<LiveInterval*>&);
258 /// Perform register allocation.
259 virtual bool runOnMachineFunction(MachineFunction &mf);
264 bool LRE_CanEraseVirtReg(unsigned);
265 void LRE_WillShrinkVirtReg(unsigned);
266 void LRE_DidCloneVirtReg(unsigned, unsigned);
268 float calcSpillCost();
269 bool addSplitConstraints(InterferenceCache::Cursor, float&);
270 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
271 void growRegion(GlobalSplitCandidate &Cand);
272 float calcGlobalSplitCost(GlobalSplitCandidate&);
273 bool calcCompactRegion(GlobalSplitCandidate&);
274 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
275 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
276 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
277 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
278 void evictInterference(LiveInterval&, unsigned,
279 SmallVectorImpl<LiveInterval*>&);
281 unsigned tryAssign(LiveInterval&, AllocationOrder&,
282 SmallVectorImpl<LiveInterval*>&);
283 unsigned tryEvict(LiveInterval&, AllocationOrder&,
284 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
285 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
286 SmallVectorImpl<LiveInterval*>&);
287 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
288 SmallVectorImpl<LiveInterval*>&);
289 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
290 SmallVectorImpl<LiveInterval*>&);
291 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
292 SmallVectorImpl<LiveInterval*>&);
293 unsigned trySplit(LiveInterval&, AllocationOrder&,
294 SmallVectorImpl<LiveInterval*>&);
296 } // end anonymous namespace
298 char RAGreedy::ID = 0;
301 const char *const RAGreedy::StageName[] = {
311 // Hysteresis to use when comparing floats.
312 // This helps stabilize decisions based on float comparisons.
313 const float Hysteresis = 0.98f;
316 FunctionPass* llvm::createGreedyRegisterAllocator() {
317 return new RAGreedy();
320 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
321 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
322 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
323 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
324 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
325 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
326 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
327 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
328 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
329 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
330 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
331 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
332 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
333 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
336 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
337 AU.setPreservesCFG();
338 AU.addRequired<AliasAnalysis>();
339 AU.addPreserved<AliasAnalysis>();
340 AU.addRequired<LiveIntervals>();
341 AU.addRequired<SlotIndexes>();
342 AU.addPreserved<SlotIndexes>();
343 AU.addRequired<LiveDebugVariables>();
344 AU.addPreserved<LiveDebugVariables>();
345 AU.addRequired<CalculateSpillWeights>();
346 AU.addRequired<LiveStacks>();
347 AU.addPreserved<LiveStacks>();
348 AU.addRequired<MachineDominatorTree>();
349 AU.addPreserved<MachineDominatorTree>();
350 AU.addRequired<MachineLoopInfo>();
351 AU.addPreserved<MachineLoopInfo>();
352 AU.addRequired<VirtRegMap>();
353 AU.addPreserved<VirtRegMap>();
354 AU.addRequired<EdgeBundles>();
355 AU.addRequired<SpillPlacement>();
356 MachineFunctionPass::getAnalysisUsage(AU);
360 //===----------------------------------------------------------------------===//
361 // LiveRangeEdit delegate methods
362 //===----------------------------------------------------------------------===//
364 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
365 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
366 unassign(LIS->getInterval(VirtReg), PhysReg);
369 // Unassigned virtreg is probably in the priority queue.
370 // RegAllocBase will erase it after dequeueing.
374 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
375 unsigned PhysReg = VRM->getPhys(VirtReg);
379 // Register is assigned, put it back on the queue for reassignment.
380 LiveInterval &LI = LIS->getInterval(VirtReg);
381 unassign(LI, PhysReg);
385 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
386 // Cloning a register we haven't even heard about yet? Just ignore it.
387 if (!ExtraRegInfo.inBounds(Old))
390 // LRE may clone a virtual register because dead code elimination causes it to
391 // be split into connected components. The new components are much smaller
392 // than the original, so they should get a new chance at being assigned.
393 // same stage as the parent.
394 ExtraRegInfo[Old].Stage = RS_Assign;
395 ExtraRegInfo.grow(New);
396 ExtraRegInfo[New] = ExtraRegInfo[Old];
399 void RAGreedy::releaseMemory() {
400 SpillerInstance.reset(0);
401 ExtraRegInfo.clear();
403 RegAllocBase::releaseMemory();
406 void RAGreedy::enqueue(LiveInterval *LI) {
407 // Prioritize live ranges by size, assigning larger ranges first.
408 // The queue holds (size, reg) pairs.
409 const unsigned Size = LI->getSize();
410 const unsigned Reg = LI->reg;
411 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
412 "Can only enqueue virtual registers");
415 ExtraRegInfo.grow(Reg);
416 if (ExtraRegInfo[Reg].Stage == RS_New)
417 ExtraRegInfo[Reg].Stage = RS_Assign;
419 if (ExtraRegInfo[Reg].Stage == RS_Split) {
420 // Unsplit ranges that couldn't be allocated immediately are deferred until
421 // everything else has been allocated.
424 // Everything is allocated in long->short order. Long ranges that don't fit
425 // should be spilled (or split) ASAP so they don't create interference.
426 Prio = (1u << 31) + Size;
428 // Boost ranges that have a physical register hint.
429 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
433 Queue.push(std::make_pair(Prio, ~Reg));
436 LiveInterval *RAGreedy::dequeue() {
439 LiveInterval *LI = &LIS->getInterval(~Queue.top().second);
445 //===----------------------------------------------------------------------===//
447 //===----------------------------------------------------------------------===//
449 /// tryAssign - Try to assign VirtReg to an available register.
450 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
451 AllocationOrder &Order,
452 SmallVectorImpl<LiveInterval*> &NewVRegs) {
455 while ((PhysReg = Order.next())) {
456 if (clobberedByRegMask(PhysReg))
458 if (!checkPhysRegInterference(VirtReg, PhysReg))
461 if (!PhysReg || Order.isHint(PhysReg))
464 // PhysReg is available, but there may be a better choice.
466 // If we missed a simple hint, try to cheaply evict interference from the
467 // preferred register.
468 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
469 if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) {
470 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
471 EvictionCost MaxCost(1);
472 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
473 evictInterference(VirtReg, Hint, NewVRegs);
478 // Try to evict interference from a cheaper alternative.
479 unsigned Cost = TRI->getCostPerUse(PhysReg);
481 // Most registers have 0 additional cost.
485 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
487 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
488 return CheapReg ? CheapReg : PhysReg;
492 //===----------------------------------------------------------------------===//
493 // Interference eviction
494 //===----------------------------------------------------------------------===//
496 /// shouldEvict - determine if A should evict the assigned live range B. The
497 /// eviction policy defined by this function together with the allocation order
498 /// defined by enqueue() decides which registers ultimately end up being split
501 /// Cascade numbers are used to prevent infinite loops if this function is a
504 /// @param A The live range to be assigned.
505 /// @param IsHint True when A is about to be assigned to its preferred
507 /// @param B The live range to be evicted.
508 /// @param BreaksHint True when B is already assigned to its preferred register.
509 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
510 LiveInterval &B, bool BreaksHint) {
511 bool CanSplit = getStage(B) < RS_Spill;
513 // Be fairly aggressive about following hints as long as the evictee can be
515 if (CanSplit && IsHint && !BreaksHint)
518 return A.weight > B.weight;
521 /// canEvictInterference - Return true if all interferences between VirtReg and
522 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
524 /// @param VirtReg Live range that is about to be assigned.
525 /// @param PhysReg Desired register for assignment.
526 /// @prarm IsHint True when PhysReg is VirtReg's preferred register.
527 /// @param MaxCost Only look for cheaper candidates and update with new cost
528 /// when returning true.
529 /// @returns True when interference can be evicted cheaper than MaxCost.
530 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
531 bool IsHint, EvictionCost &MaxCost) {
532 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
533 // involved in an eviction before. If a cascade number was assigned, deny
534 // evicting anything with the same or a newer cascade number. This prevents
535 // infinite eviction loops.
537 // This works out so a register without a cascade number is allowed to evict
538 // anything, and it can be evicted by anything.
539 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
541 Cascade = NextCascade;
544 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) {
545 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
546 // If there is 10 or more interferences, chances are one is heavier.
547 if (Q.collectInterferingVRegs(10) >= 10)
550 // Check if any interfering live range is heavier than MaxWeight.
551 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
552 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
553 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
555 // Never evict spill products. They cannot split or spill.
556 if (getStage(*Intf) == RS_Done)
558 // Once a live range becomes small enough, it is urgent that we find a
559 // register for it. This is indicated by an infinite spill weight. These
560 // urgent live ranges get to evict almost anything.
562 // Also allow urgent evictions of unspillable ranges from a strictly
563 // larger allocation order.
564 bool Urgent = !VirtReg.isSpillable() &&
565 (Intf->isSpillable() ||
566 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
567 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
568 // Only evict older cascades or live ranges without a cascade.
569 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
570 if (Cascade <= IntfCascade) {
573 // We permit breaking cascades for urgent evictions. It should be the
574 // last resort, though, so make it really expensive.
575 Cost.BrokenHints += 10;
577 // Would this break a satisfied hint?
578 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
579 // Update eviction cost.
580 Cost.BrokenHints += BreaksHint;
581 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
582 // Abort if this would be too expensive.
583 if (!(Cost < MaxCost))
585 // Finally, apply the eviction policy for non-urgent evictions.
586 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
594 /// evictInterference - Evict any interferring registers that prevent VirtReg
595 /// from being assigned to Physreg. This assumes that canEvictInterference
597 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
598 SmallVectorImpl<LiveInterval*> &NewVRegs) {
599 // Make sure that VirtReg has a cascade number, and assign that cascade
600 // number to every evicted register. These live ranges than then only be
601 // evicted by a newer cascade, preventing infinite loops.
602 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
604 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
606 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
607 << " interference: Cascade " << Cascade << '\n');
608 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) {
609 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
610 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
611 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
612 LiveInterval *Intf = Q.interferingVRegs()[i];
613 unassign(*Intf, VRM->getPhys(Intf->reg));
614 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
615 VirtReg.isSpillable() < Intf->isSpillable()) &&
616 "Cannot decrease cascade number, illegal eviction");
617 ExtraRegInfo[Intf->reg].Cascade = Cascade;
619 NewVRegs.push_back(Intf);
624 /// tryEvict - Try to evict all interferences for a physreg.
625 /// @param VirtReg Currently unassigned virtual register.
626 /// @param Order Physregs to try.
627 /// @return Physreg to assign VirtReg, or 0.
628 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
629 AllocationOrder &Order,
630 SmallVectorImpl<LiveInterval*> &NewVRegs,
631 unsigned CostPerUseLimit) {
632 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
634 // Keep track of the cheapest interference seen so far.
635 EvictionCost BestCost(~0u);
636 unsigned BestPhys = 0;
638 // When we are just looking for a reduced cost per use, don't break any
639 // hints, and only evict smaller spill weights.
640 if (CostPerUseLimit < ~0u) {
641 BestCost.BrokenHints = 0;
642 BestCost.MaxWeight = VirtReg.weight;
646 while (unsigned PhysReg = Order.next()) {
647 if (clobberedByRegMask(PhysReg))
649 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
651 // The first use of a callee-saved register in a function has cost 1.
652 // Don't start using a CSR when the CostPerUseLimit is low.
653 if (CostPerUseLimit == 1)
654 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
655 if (!MRI->isPhysRegUsed(CSR)) {
656 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
657 << PrintReg(CSR, TRI) << '\n');
661 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
667 // Stop if the hint can be used.
668 if (Order.isHint(PhysReg))
675 evictInterference(VirtReg, BestPhys, NewVRegs);
680 //===----------------------------------------------------------------------===//
682 //===----------------------------------------------------------------------===//
684 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
685 /// interference pattern in Physreg and its aliases. Add the constraints to
686 /// SpillPlacement and return the static cost of this split in Cost, assuming
687 /// that all preferences in SplitConstraints are met.
688 /// Return false if there are no bundles with positive bias.
689 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
691 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
693 // Reset interference dependent info.
694 SplitConstraints.resize(UseBlocks.size());
695 float StaticCost = 0;
696 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
697 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
698 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
700 BC.Number = BI.MBB->getNumber();
701 Intf.moveToBlock(BC.Number);
702 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
703 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
704 BC.ChangesValue = BI.FirstDef;
706 if (!Intf.hasInterference())
709 // Number of spill code instructions to insert.
712 // Interference for the live-in value.
714 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
715 BC.Entry = SpillPlacement::MustSpill, ++Ins;
716 else if (Intf.first() < BI.FirstInstr)
717 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
718 else if (Intf.first() < BI.LastInstr)
722 // Interference for the live-out value.
724 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
725 BC.Exit = SpillPlacement::MustSpill, ++Ins;
726 else if (Intf.last() > BI.LastInstr)
727 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
728 else if (Intf.last() > BI.FirstInstr)
732 // Accumulate the total frequency of inserted spill code.
734 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
738 // Add constraints for use-blocks. Note that these are the only constraints
739 // that may add a positive bias, it is downhill from here.
740 SpillPlacer->addConstraints(SplitConstraints);
741 return SpillPlacer->scanActiveBundles();
745 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
746 /// live-through blocks in Blocks.
747 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
748 ArrayRef<unsigned> Blocks) {
749 const unsigned GroupSize = 8;
750 SpillPlacement::BlockConstraint BCS[GroupSize];
751 unsigned TBS[GroupSize];
752 unsigned B = 0, T = 0;
754 for (unsigned i = 0; i != Blocks.size(); ++i) {
755 unsigned Number = Blocks[i];
756 Intf.moveToBlock(Number);
758 if (!Intf.hasInterference()) {
759 assert(T < GroupSize && "Array overflow");
761 if (++T == GroupSize) {
762 SpillPlacer->addLinks(makeArrayRef(TBS, T));
768 assert(B < GroupSize && "Array overflow");
769 BCS[B].Number = Number;
771 // Interference for the live-in value.
772 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
773 BCS[B].Entry = SpillPlacement::MustSpill;
775 BCS[B].Entry = SpillPlacement::PrefSpill;
777 // Interference for the live-out value.
778 if (Intf.last() >= SA->getLastSplitPoint(Number))
779 BCS[B].Exit = SpillPlacement::MustSpill;
781 BCS[B].Exit = SpillPlacement::PrefSpill;
783 if (++B == GroupSize) {
784 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
785 SpillPlacer->addConstraints(Array);
790 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
791 SpillPlacer->addConstraints(Array);
792 SpillPlacer->addLinks(makeArrayRef(TBS, T));
795 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
796 // Keep track of through blocks that have not been added to SpillPlacer.
797 BitVector Todo = SA->getThroughBlocks();
798 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
799 unsigned AddedTo = 0;
801 unsigned Visited = 0;
805 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
806 // Find new through blocks in the periphery of PrefRegBundles.
807 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
808 unsigned Bundle = NewBundles[i];
809 // Look at all blocks connected to Bundle in the full graph.
810 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
811 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
814 if (!Todo.test(Block))
817 // This is a new through block. Add it to SpillPlacer later.
818 ActiveBlocks.push_back(Block);
824 // Any new blocks to add?
825 if (ActiveBlocks.size() == AddedTo)
828 // Compute through constraints from the interference, or assume that all
829 // through blocks prefer spilling when forming compact regions.
830 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
832 addThroughConstraints(Cand.Intf, NewBlocks);
834 // Provide a strong negative bias on through blocks to prevent unwanted
835 // liveness on loop backedges.
836 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
837 AddedTo = ActiveBlocks.size();
839 // Perhaps iterating can enable more bundles?
840 SpillPlacer->iterate();
842 DEBUG(dbgs() << ", v=" << Visited);
845 /// calcCompactRegion - Compute the set of edge bundles that should be live
846 /// when splitting the current live range into compact regions. Compact
847 /// regions can be computed without looking at interference. They are the
848 /// regions formed by removing all the live-through blocks from the live range.
850 /// Returns false if the current live range is already compact, or if the
851 /// compact regions would form single block regions anyway.
852 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
853 // Without any through blocks, the live range is already compact.
854 if (!SA->getNumThroughBlocks())
857 // Compact regions don't correspond to any physreg.
858 Cand.reset(IntfCache, 0);
860 DEBUG(dbgs() << "Compact region bundles");
862 // Use the spill placer to determine the live bundles. GrowRegion pretends
863 // that all the through blocks have interference when PhysReg is unset.
864 SpillPlacer->prepare(Cand.LiveBundles);
866 // The static split cost will be zero since Cand.Intf reports no interference.
868 if (!addSplitConstraints(Cand.Intf, Cost)) {
869 DEBUG(dbgs() << ", none.\n");
874 SpillPlacer->finish();
876 if (!Cand.LiveBundles.any()) {
877 DEBUG(dbgs() << ", none.\n");
882 for (int i = Cand.LiveBundles.find_first(); i>=0;
883 i = Cand.LiveBundles.find_next(i))
884 dbgs() << " EB#" << i;
890 /// calcSpillCost - Compute how expensive it would be to split the live range in
891 /// SA around all use blocks instead of forming bundle regions.
892 float RAGreedy::calcSpillCost() {
894 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
895 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
896 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
897 unsigned Number = BI.MBB->getNumber();
898 // We normally only need one spill instruction - a load or a store.
899 Cost += SpillPlacer->getBlockFrequency(Number);
901 // Unless the value is redefined in the block.
902 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
903 Cost += SpillPlacer->getBlockFrequency(Number);
908 /// calcGlobalSplitCost - Return the global split cost of following the split
909 /// pattern in LiveBundles. This cost should be added to the local cost of the
910 /// interference pattern in SplitConstraints.
912 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
913 float GlobalCost = 0;
914 const BitVector &LiveBundles = Cand.LiveBundles;
915 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
916 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
917 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
918 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
919 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
920 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
924 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
926 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
928 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
931 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
932 unsigned Number = Cand.ActiveBlocks[i];
933 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
934 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
935 if (!RegIn && !RegOut)
937 if (RegIn && RegOut) {
938 // We need double spill code if this block has interference.
939 Cand.Intf.moveToBlock(Number);
940 if (Cand.Intf.hasInterference())
941 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
944 // live-in / stack-out or stack-in live-out.
945 GlobalCost += SpillPlacer->getBlockFrequency(Number);
950 /// splitAroundRegion - Split the current live range around the regions
951 /// determined by BundleCand and GlobalCand.
953 /// Before calling this function, GlobalCand and BundleCand must be initialized
954 /// so each bundle is assigned to a valid candidate, or NoCand for the
955 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
956 /// objects must be initialized for the current live range, and intervals
957 /// created for the used candidates.
959 /// @param LREdit The LiveRangeEdit object handling the current split.
960 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
961 /// must appear in this list.
962 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
963 ArrayRef<unsigned> UsedCands) {
964 // These are the intervals created for new global ranges. We may create more
965 // intervals for local ranges.
966 const unsigned NumGlobalIntvs = LREdit.size();
967 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
968 assert(NumGlobalIntvs && "No global intervals configured");
970 // Isolate even single instructions when dealing with a proper sub-class.
971 // That guarantees register class inflation for the stack interval because it
973 unsigned Reg = SA->getParent().reg;
974 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
976 // First handle all the blocks with uses.
977 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
978 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
979 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
980 unsigned Number = BI.MBB->getNumber();
981 unsigned IntvIn = 0, IntvOut = 0;
982 SlotIndex IntfIn, IntfOut;
984 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
985 if (CandIn != NoCand) {
986 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
987 IntvIn = Cand.IntvIdx;
988 Cand.Intf.moveToBlock(Number);
989 IntfIn = Cand.Intf.first();
993 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
994 if (CandOut != NoCand) {
995 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
996 IntvOut = Cand.IntvIdx;
997 Cand.Intf.moveToBlock(Number);
998 IntfOut = Cand.Intf.last();
1002 // Create separate intervals for isolated blocks with multiple uses.
1003 if (!IntvIn && !IntvOut) {
1004 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
1005 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1006 SE->splitSingleBlock(BI);
1010 if (IntvIn && IntvOut)
1011 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1013 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1015 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1018 // Handle live-through blocks. The relevant live-through blocks are stored in
1019 // the ActiveBlocks list with each candidate. We need to filter out
1021 BitVector Todo = SA->getThroughBlocks();
1022 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1023 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1024 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1025 unsigned Number = Blocks[i];
1026 if (!Todo.test(Number))
1030 unsigned IntvIn = 0, IntvOut = 0;
1031 SlotIndex IntfIn, IntfOut;
1033 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1034 if (CandIn != NoCand) {
1035 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1036 IntvIn = Cand.IntvIdx;
1037 Cand.Intf.moveToBlock(Number);
1038 IntfIn = Cand.Intf.first();
1041 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1042 if (CandOut != NoCand) {
1043 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1044 IntvOut = Cand.IntvIdx;
1045 Cand.Intf.moveToBlock(Number);
1046 IntfOut = Cand.Intf.last();
1048 if (!IntvIn && !IntvOut)
1050 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1056 SmallVector<unsigned, 8> IntvMap;
1057 SE->finish(&IntvMap);
1058 DebugVars->splitRegister(Reg, LREdit.regs());
1060 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1061 unsigned OrigBlocks = SA->getNumLiveBlocks();
1063 // Sort out the new intervals created by splitting. We get four kinds:
1064 // - Remainder intervals should not be split again.
1065 // - Candidate intervals can be assigned to Cand.PhysReg.
1066 // - Block-local splits are candidates for local splitting.
1067 // - DCE leftovers should go back on the queue.
1068 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1069 LiveInterval &Reg = *LREdit.get(i);
1071 // Ignore old intervals from DCE.
1072 if (getStage(Reg) != RS_New)
1075 // Remainder interval. Don't try splitting again, spill if it doesn't
1077 if (IntvMap[i] == 0) {
1078 setStage(Reg, RS_Spill);
1082 // Global intervals. Allow repeated splitting as long as the number of live
1083 // blocks is strictly decreasing.
1084 if (IntvMap[i] < NumGlobalIntvs) {
1085 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1086 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1087 << " blocks as original.\n");
1088 // Don't allow repeated splitting as a safe guard against looping.
1089 setStage(Reg, RS_Split2);
1094 // Other intervals are treated as new. This includes local intervals created
1095 // for blocks with multiple uses, and anything created by DCE.
1099 MF->verify(this, "After splitting live range around region");
1102 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1103 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1104 unsigned NumCands = 0;
1105 unsigned BestCand = NoCand;
1107 SmallVector<unsigned, 8> UsedCands;
1109 // Check if we can split this live range around a compact region.
1110 bool HasCompact = calcCompactRegion(GlobalCand.front());
1112 // Yes, keep GlobalCand[0] as the compact region candidate.
1114 BestCost = HUGE_VALF;
1116 // No benefit from the compact region, our fallback will be per-block
1117 // splitting. Make sure we find a solution that is cheaper than spilling.
1118 BestCost = Hysteresis * calcSpillCost();
1119 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1123 while (unsigned PhysReg = Order.next()) {
1124 // Discard bad candidates before we run out of interference cache cursors.
1125 // This will only affect register classes with a lot of registers (>32).
1126 if (NumCands == IntfCache.getMaxCursors()) {
1127 unsigned WorstCount = ~0u;
1129 for (unsigned i = 0; i != NumCands; ++i) {
1130 if (i == BestCand || !GlobalCand[i].PhysReg)
1132 unsigned Count = GlobalCand[i].LiveBundles.count();
1133 if (Count < WorstCount)
1134 Worst = i, WorstCount = Count;
1137 GlobalCand[Worst] = GlobalCand[NumCands];
1138 if (BestCand == NumCands)
1142 if (GlobalCand.size() <= NumCands)
1143 GlobalCand.resize(NumCands+1);
1144 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1145 Cand.reset(IntfCache, PhysReg);
1147 SpillPlacer->prepare(Cand.LiveBundles);
1149 if (!addSplitConstraints(Cand.Intf, Cost)) {
1150 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1153 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1154 if (Cost >= BestCost) {
1156 if (BestCand == NoCand)
1157 dbgs() << " worse than no bundles\n";
1159 dbgs() << " worse than "
1160 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1166 SpillPlacer->finish();
1168 // No live bundles, defer to splitSingleBlocks().
1169 if (!Cand.LiveBundles.any()) {
1170 DEBUG(dbgs() << " no bundles.\n");
1174 Cost += calcGlobalSplitCost(Cand);
1176 dbgs() << ", total = " << Cost << " with bundles";
1177 for (int i = Cand.LiveBundles.find_first(); i>=0;
1178 i = Cand.LiveBundles.find_next(i))
1179 dbgs() << " EB#" << i;
1182 if (Cost < BestCost) {
1183 BestCand = NumCands;
1184 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1189 // No solutions found, fall back to single block splitting.
1190 if (!HasCompact && BestCand == NoCand)
1193 // Prepare split editor.
1194 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1195 SE->reset(LREdit, SplitSpillMode);
1197 // Assign all edge bundles to the preferred candidate, or NoCand.
1198 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1200 // Assign bundles for the best candidate region.
1201 if (BestCand != NoCand) {
1202 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1203 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1204 UsedCands.push_back(BestCand);
1205 Cand.IntvIdx = SE->openIntv();
1206 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
1207 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1212 // Assign bundles for the compact region.
1214 GlobalSplitCandidate &Cand = GlobalCand.front();
1215 assert(!Cand.PhysReg && "Compact region has no physreg");
1216 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1217 UsedCands.push_back(0);
1218 Cand.IntvIdx = SE->openIntv();
1219 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
1220 << Cand.IntvIdx << ".\n");
1225 splitAroundRegion(LREdit, UsedCands);
1230 //===----------------------------------------------------------------------===//
1231 // Per-Block Splitting
1232 //===----------------------------------------------------------------------===//
1234 /// tryBlockSplit - Split a global live range around every block with uses. This
1235 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
1236 /// they don't allocate.
1237 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1238 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1239 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1240 unsigned Reg = VirtReg.reg;
1241 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1242 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1243 SE->reset(LREdit, SplitSpillMode);
1244 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1245 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1246 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1247 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1248 SE->splitSingleBlock(BI);
1250 // No blocks were split.
1254 // We did split for some blocks.
1255 SmallVector<unsigned, 8> IntvMap;
1256 SE->finish(&IntvMap);
1258 // Tell LiveDebugVariables about the new ranges.
1259 DebugVars->splitRegister(Reg, LREdit.regs());
1261 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1263 // Sort out the new intervals created by splitting. The remainder interval
1264 // goes straight to spilling, the new local ranges get to stay RS_New.
1265 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1266 LiveInterval &LI = *LREdit.get(i);
1267 if (getStage(LI) == RS_New && IntvMap[i] == 0)
1268 setStage(LI, RS_Spill);
1272 MF->verify(this, "After splitting live range around basic blocks");
1277 //===----------------------------------------------------------------------===//
1278 // Per-Instruction Splitting
1279 //===----------------------------------------------------------------------===//
1281 /// tryInstructionSplit - Split a live range around individual instructions.
1282 /// This is normally not worthwhile since the spiller is doing essentially the
1283 /// same thing. However, when the live range is in a constrained register
1284 /// class, it may help to insert copies such that parts of the live range can
1285 /// be moved to a larger register class.
1287 /// This is similar to spilling to a larger register class.
1289 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1290 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1291 // There is no point to this if there are no larger sub-classes.
1292 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg)))
1295 // Always enable split spill mode, since we're effectively spilling to a
1297 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1298 SE->reset(LREdit, SplitEditor::SM_Size);
1300 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1301 if (Uses.size() <= 1)
1304 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
1306 // Split around every non-copy instruction.
1307 for (unsigned i = 0; i != Uses.size(); ++i) {
1308 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
1309 if (MI->isFullCopy()) {
1310 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
1314 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
1315 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
1316 SE->useIntv(SegStart, SegStop);
1319 if (LREdit.empty()) {
1320 DEBUG(dbgs() << "All uses were copies.\n");
1324 SmallVector<unsigned, 8> IntvMap;
1325 SE->finish(&IntvMap);
1326 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1327 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1329 // Assign all new registers to RS_Spill. This was the last chance.
1330 setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1335 //===----------------------------------------------------------------------===//
1337 //===----------------------------------------------------------------------===//
1340 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1341 /// in order to use PhysReg between two entries in SA->UseSlots.
1343 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1345 void RAGreedy::calcGapWeights(unsigned PhysReg,
1346 SmallVectorImpl<float> &GapWeight) {
1347 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1348 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1349 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1350 const unsigned NumGaps = Uses.size()-1;
1352 // Start and end points for the interference check.
1353 SlotIndex StartIdx =
1354 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1356 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
1358 GapWeight.assign(NumGaps, 0.0f);
1360 // Add interference from each overlapping register.
1361 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) {
1362 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1363 .checkInterference())
1366 // We know that VirtReg is a continuous interval from FirstInstr to
1367 // LastInstr, so we don't need InterferenceQuery.
1369 // Interference that overlaps an instruction is counted in both gaps
1370 // surrounding the instruction. The exception is interference before
1371 // StartIdx and after StopIdx.
1373 LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx);
1374 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1375 // Skip the gaps before IntI.
1376 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1377 if (++Gap == NumGaps)
1382 // Update the gaps covered by IntI.
1383 const float weight = IntI.value()->weight;
1384 for (; Gap != NumGaps; ++Gap) {
1385 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1386 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1395 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1398 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1399 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1400 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1401 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1403 // Note that it is possible to have an interval that is live-in or live-out
1404 // while only covering a single block - A phi-def can use undef values from
1405 // predecessors, and the block could be a single-block loop.
1406 // We don't bother doing anything clever about such a case, we simply assume
1407 // that the interval is continuous from FirstInstr to LastInstr. We should
1408 // make sure that we don't do anything illegal to such an interval, though.
1410 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1411 if (Uses.size() <= 2)
1413 const unsigned NumGaps = Uses.size()-1;
1416 dbgs() << "tryLocalSplit: ";
1417 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1418 dbgs() << ' ' << Uses[i];
1422 // If VirtReg is live across any register mask operands, compute a list of
1423 // gaps with register masks.
1424 SmallVector<unsigned, 8> RegMaskGaps;
1425 if (!UsableRegs.empty()) {
1426 // Get regmask slots for the whole block.
1427 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
1428 DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1429 // Constrain to VirtReg's live range.
1430 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
1431 Uses.front().getRegSlot()) - RMS.begin();
1432 unsigned re = RMS.size();
1433 for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
1434 // Look for Uses[i] <= RMS <= Uses[i+1].
1435 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1436 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
1438 // Skip a regmask on the same instruction as the last use. It doesn't
1439 // overlap the live range.
1440 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1442 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
1443 RegMaskGaps.push_back(i);
1444 // Advance ri to the next gap. A regmask on one of the uses counts in
1446 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1449 DEBUG(dbgs() << '\n');
1452 // Since we allow local split results to be split again, there is a risk of
1453 // creating infinite loops. It is tempting to require that the new live
1454 // ranges have less instructions than the original. That would guarantee
1455 // convergence, but it is too strict. A live range with 3 instructions can be
1456 // split 2+3 (including the COPY), and we want to allow that.
1458 // Instead we use these rules:
1460 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1461 // noop split, of course).
1462 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1463 // the new ranges must have fewer instructions than before the split.
1464 // 3. New ranges with the same number of instructions are marked RS_Split2,
1465 // smaller ranges are marked RS_New.
1467 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1468 // excessive splitting and infinite loops.
1470 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1472 // Best split candidate.
1473 unsigned BestBefore = NumGaps;
1474 unsigned BestAfter = 0;
1477 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1478 SmallVector<float, 8> GapWeight;
1481 while (unsigned PhysReg = Order.next()) {
1482 // Keep track of the largest spill weight that would need to be evicted in
1483 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1484 calcGapWeights(PhysReg, GapWeight);
1486 // Remove any gaps with regmask clobbers.
1487 if (clobberedByRegMask(PhysReg))
1488 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
1489 GapWeight[RegMaskGaps[i]] = HUGE_VALF;
1491 // Try to find the best sequence of gaps to close.
1492 // The new spill weight must be larger than any gap interference.
1494 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1495 unsigned SplitBefore = 0, SplitAfter = 1;
1497 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1498 // It is the spill weight that needs to be evicted.
1499 float MaxGap = GapWeight[0];
1502 // Live before/after split?
1503 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1504 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1506 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1507 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1508 << " i=" << MaxGap);
1510 // Stop before the interval gets so big we wouldn't be making progress.
1511 if (!LiveBefore && !LiveAfter) {
1512 DEBUG(dbgs() << " all\n");
1515 // Should the interval be extended or shrunk?
1518 // How many gaps would the new range have?
1519 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1521 // Legally, without causing looping?
1522 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1524 if (Legal && MaxGap < HUGE_VALF) {
1525 // Estimate the new spill weight. Each instruction reads or writes the
1526 // register. Conservatively assume there are no read-modify-write
1529 // Try to guess the size of the new interval.
1530 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1531 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1532 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1533 // Would this split be possible to allocate?
1534 // Never allocate all gaps, we wouldn't be making progress.
1535 DEBUG(dbgs() << " w=" << EstWeight);
1536 if (EstWeight * Hysteresis >= MaxGap) {
1538 float Diff = EstWeight - MaxGap;
1539 if (Diff > BestDiff) {
1540 DEBUG(dbgs() << " (best)");
1541 BestDiff = Hysteresis * Diff;
1542 BestBefore = SplitBefore;
1543 BestAfter = SplitAfter;
1550 if (++SplitBefore < SplitAfter) {
1551 DEBUG(dbgs() << " shrink\n");
1552 // Recompute the max when necessary.
1553 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1554 MaxGap = GapWeight[SplitBefore];
1555 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1556 MaxGap = std::max(MaxGap, GapWeight[i]);
1563 // Try to extend the interval.
1564 if (SplitAfter >= NumGaps) {
1565 DEBUG(dbgs() << " end\n");
1569 DEBUG(dbgs() << " extend\n");
1570 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1574 // Didn't find any candidates?
1575 if (BestBefore == NumGaps)
1578 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1579 << '-' << Uses[BestAfter] << ", " << BestDiff
1580 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1582 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1586 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1587 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1588 SE->useIntv(SegStart, SegStop);
1589 SmallVector<unsigned, 8> IntvMap;
1590 SE->finish(&IntvMap);
1591 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1593 // If the new range has the same number of instructions as before, mark it as
1594 // RS_Split2 so the next split will be forced to make progress. Otherwise,
1595 // leave the new intervals as RS_New so they can compete.
1596 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1597 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1598 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1599 if (NewGaps >= NumGaps) {
1600 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1601 assert(!ProgressRequired && "Didn't make progress when it was required.");
1602 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1603 if (IntvMap[i] == 1) {
1604 setStage(*LREdit.get(i), RS_Split2);
1605 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1607 DEBUG(dbgs() << '\n');
1614 //===----------------------------------------------------------------------===//
1615 // Live Range Splitting
1616 //===----------------------------------------------------------------------===//
1618 /// trySplit - Try to split VirtReg or one of its interferences, making it
1620 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1621 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1622 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1623 // Ranges must be Split2 or less.
1624 if (getStage(VirtReg) >= RS_Spill)
1627 // Local intervals are handled separately.
1628 if (LIS->intervalIsInOneMBB(VirtReg)) {
1629 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1630 SA->analyze(&VirtReg);
1631 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1632 if (PhysReg || !NewVRegs.empty())
1634 return tryInstructionSplit(VirtReg, Order, NewVRegs);
1637 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1639 SA->analyze(&VirtReg);
1641 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1642 // coalescer. That may cause the range to become allocatable which means that
1643 // tryRegionSplit won't be making progress. This check should be replaced with
1644 // an assertion when the coalescer is fixed.
1645 if (SA->didRepairRange()) {
1646 // VirtReg has changed, so all cached queries are invalid.
1647 invalidateVirtRegs();
1648 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1652 // First try to split around a region spanning multiple blocks. RS_Split2
1653 // ranges already made dubious progress with region splitting, so they go
1654 // straight to single block splitting.
1655 if (getStage(VirtReg) < RS_Split2) {
1656 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1657 if (PhysReg || !NewVRegs.empty())
1661 // Then isolate blocks.
1662 return tryBlockSplit(VirtReg, Order, NewVRegs);
1666 //===----------------------------------------------------------------------===//
1668 //===----------------------------------------------------------------------===//
1670 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1671 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1672 // Check if VirtReg is live across any calls.
1674 if (LIS->checkRegMaskInterference(VirtReg, UsableRegs))
1675 DEBUG(dbgs() << "Live across regmasks.\n");
1677 // First try assigning a free register.
1678 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1679 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1682 LiveRangeStage Stage = getStage(VirtReg);
1683 DEBUG(dbgs() << StageName[Stage]
1684 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1686 // Try to evict a less worthy live range, but only for ranges from the primary
1687 // queue. The RS_Split ranges already failed to do this, and they should not
1688 // get a second chance until they have been split.
1689 if (Stage != RS_Split)
1690 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1693 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1695 // The first time we see a live range, don't try to split or spill.
1696 // Wait until the second time, when all smaller ranges have been allocated.
1697 // This gives a better picture of the interference to split around.
1698 if (Stage < RS_Split) {
1699 setStage(VirtReg, RS_Split);
1700 DEBUG(dbgs() << "wait for second round\n");
1701 NewVRegs.push_back(&VirtReg);
1705 // If we couldn't allocate a register from spilling, there is probably some
1706 // invalid inline assembly. The base class wil report it.
1707 if (Stage >= RS_Done || !VirtReg.isSpillable())
1710 // Try splitting VirtReg or interferences.
1711 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1712 if (PhysReg || !NewVRegs.empty())
1715 // Finally spill VirtReg itself.
1716 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1717 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1718 spiller().spill(LRE);
1719 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
1722 MF->verify(this, "After spilling");
1724 // The live virtual register requesting allocation was spilled, so tell
1725 // the caller not to allocate anything during this round.
1729 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1730 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1731 << "********** Function: "
1732 << ((Value*)mf.getFunction())->getName() << '\n');
1736 MF->verify(this, "Before greedy register allocator");
1738 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1739 Indexes = &getAnalysis<SlotIndexes>();
1740 DomTree = &getAnalysis<MachineDominatorTree>();
1741 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1742 Loops = &getAnalysis<MachineLoopInfo>();
1743 Bundles = &getAnalysis<EdgeBundles>();
1744 SpillPlacer = &getAnalysis<SpillPlacement>();
1745 DebugVars = &getAnalysis<LiveDebugVariables>();
1747 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1748 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1749 ExtraRegInfo.clear();
1750 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1752 IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI);
1753 GlobalCand.resize(32); // This will grow as needed.
1757 LIS->addKillFlags();
1761 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1762 VRM->rewrite(Indexes);
1765 // Write out new DBG_VALUE instructions.
1767 NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled);
1768 DebugVars->emitDebugValues(VRM);
1771 // All machine operands and other references to virtual registers have been
1772 // replaced. Remove the virtual registers and release all the transient data.
1773 VRM->clearAllVirt();
1774 MRI->clearVirtRegs();