1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "llvm/CodeGen/Passes.h"
17 #include "AllocationOrder.h"
18 #include "InterferenceCache.h"
19 #include "LiveDebugVariables.h"
20 #include "RegAllocBase.h"
21 #include "SpillPlacement.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/CodeGen/CalcSpillWeights.h"
27 #include "llvm/CodeGen/EdgeBundles.h"
28 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
29 #include "llvm/CodeGen/LiveRangeEdit.h"
30 #include "llvm/CodeGen/LiveRegMatrix.h"
31 #include "llvm/CodeGen/LiveStackAnalysis.h"
32 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/RegAllocRegistry.h"
38 #include "llvm/CodeGen/VirtRegMap.h"
39 #include "llvm/PassAnalysisSupport.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/Timer.h"
44 #include "llvm/Support/raw_ostream.h"
49 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
50 STATISTIC(NumLocalSplits, "Number of split local live ranges");
51 STATISTIC(NumEvicted, "Number of interferences evicted");
53 static cl::opt<SplitEditor::ComplementSpillMode>
54 SplitSpillMode("split-spill-mode", cl::Hidden,
55 cl::desc("Spill mode for splitting live ranges"),
56 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
57 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
58 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
60 cl::init(SplitEditor::SM_Partition));
62 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
63 createGreedyRegisterAllocator);
66 class RAGreedy : public MachineFunctionPass,
68 private LiveRangeEdit::Delegate {
75 MachineBlockFrequencyInfo *MBFI;
76 MachineDominatorTree *DomTree;
77 MachineLoopInfo *Loops;
79 SpillPlacement *SpillPlacer;
80 LiveDebugVariables *DebugVars;
83 OwningPtr<Spiller> SpillerInstance;
84 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
87 // Live ranges pass through a number of stages as we try to allocate them.
88 // Some of the stages may also create new live ranges:
90 // - Region splitting.
91 // - Per-block splitting.
95 // Ranges produced by one of the stages skip the previous stages when they are
96 // dequeued. This improves performance because we can skip interference checks
97 // that are unlikely to give any results. It also guarantees that the live
98 // range splitting algorithm terminates, something that is otherwise hard to
100 enum LiveRangeStage {
101 /// Newly created live range that has never been queued.
104 /// Only attempt assignment and eviction. Then requeue as RS_Split.
107 /// Attempt live range splitting if assignment is impossible.
110 /// Attempt more aggressive live range splitting that is guaranteed to make
111 /// progress. This is used for split products that may not be making
115 /// Live range will be spilled. No more splitting will be attempted.
118 /// There is nothing more we can do to this live range. Abort compilation
119 /// if it can't be assigned.
124 static const char *const StageName[];
127 // RegInfo - Keep additional information about each live range.
129 LiveRangeStage Stage;
131 // Cascade - Eviction loop prevention. See canEvictInterference().
134 RegInfo() : Stage(RS_New), Cascade(0) {}
137 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
139 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
140 return ExtraRegInfo[VirtReg.reg].Stage;
143 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
144 ExtraRegInfo.resize(MRI->getNumVirtRegs());
145 ExtraRegInfo[VirtReg.reg].Stage = Stage;
148 template<typename Iterator>
149 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
150 ExtraRegInfo.resize(MRI->getNumVirtRegs());
151 for (;Begin != End; ++Begin) {
152 unsigned Reg = *Begin;
153 if (ExtraRegInfo[Reg].Stage == RS_New)
154 ExtraRegInfo[Reg].Stage = NewStage;
158 /// Cost of evicting interference.
159 struct EvictionCost {
160 unsigned BrokenHints; ///< Total number of broken hints.
161 float MaxWeight; ///< Maximum spill weight evicted.
163 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
165 bool isMax() const { return BrokenHints == ~0u; }
167 bool operator<(const EvictionCost &O) const {
168 if (BrokenHints != O.BrokenHints)
169 return BrokenHints < O.BrokenHints;
170 return MaxWeight < O.MaxWeight;
175 OwningPtr<SplitAnalysis> SA;
176 OwningPtr<SplitEditor> SE;
178 /// Cached per-block interference maps
179 InterferenceCache IntfCache;
181 /// All basic blocks where the current register has uses.
182 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
184 /// Global live range splitting candidate info.
185 struct GlobalSplitCandidate {
186 // Register intended for assignment, or 0.
189 // SplitKit interval index for this candidate.
192 // Interference for PhysReg.
193 InterferenceCache::Cursor Intf;
195 // Bundles where this candidate should be live.
196 BitVector LiveBundles;
197 SmallVector<unsigned, 8> ActiveBlocks;
199 void reset(InterferenceCache &Cache, unsigned Reg) {
202 Intf.setPhysReg(Cache, Reg);
204 ActiveBlocks.clear();
207 // Set B[i] = C for every live bundle where B[i] was NoCand.
208 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
210 for (int i = LiveBundles.find_first(); i >= 0;
211 i = LiveBundles.find_next(i))
212 if (B[i] == NoCand) {
220 /// Candidate info for each PhysReg in AllocationOrder.
221 /// This vector never shrinks, but grows to the size of the largest register
223 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
225 enum LLVM_ENUM_INT_TYPE(unsigned) { NoCand = ~0u };
227 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
228 /// NoCand which indicates the stack interval.
229 SmallVector<unsigned, 32> BundleCand;
234 /// Return the pass name.
235 virtual const char* getPassName() const {
236 return "Greedy Register Allocator";
239 /// RAGreedy analysis usage.
240 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
241 virtual void releaseMemory();
242 virtual Spiller &spiller() { return *SpillerInstance; }
243 virtual void enqueue(LiveInterval *LI);
244 virtual LiveInterval *dequeue();
245 virtual unsigned selectOrSplit(LiveInterval&,
246 SmallVectorImpl<unsigned>&);
248 /// Perform register allocation.
249 virtual bool runOnMachineFunction(MachineFunction &mf);
254 bool LRE_CanEraseVirtReg(unsigned);
255 void LRE_WillShrinkVirtReg(unsigned);
256 void LRE_DidCloneVirtReg(unsigned, unsigned);
258 BlockFrequency calcSpillCost();
259 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
260 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
261 void growRegion(GlobalSplitCandidate &Cand);
262 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&);
263 bool calcCompactRegion(GlobalSplitCandidate&);
264 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
265 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
266 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg);
267 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
268 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
269 void evictInterference(LiveInterval&, unsigned,
270 SmallVectorImpl<unsigned>&);
272 unsigned tryAssign(LiveInterval&, AllocationOrder&,
273 SmallVectorImpl<unsigned>&);
274 unsigned tryEvict(LiveInterval&, AllocationOrder&,
275 SmallVectorImpl<unsigned>&, unsigned = ~0u);
276 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
277 SmallVectorImpl<unsigned>&);
278 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
279 SmallVectorImpl<unsigned>&);
280 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
281 SmallVectorImpl<unsigned>&);
282 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
283 SmallVectorImpl<unsigned>&);
284 unsigned trySplit(LiveInterval&, AllocationOrder&,
285 SmallVectorImpl<unsigned>&);
287 } // end anonymous namespace
289 char RAGreedy::ID = 0;
292 const char *const RAGreedy::StageName[] = {
302 // Hysteresis to use when comparing floats.
303 // This helps stabilize decisions based on float comparisons.
304 const float Hysteresis = 0.98f;
307 FunctionPass* llvm::createGreedyRegisterAllocator() {
308 return new RAGreedy();
311 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
312 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
313 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
314 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
315 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
316 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
317 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
318 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
319 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
320 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
321 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
322 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
323 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
324 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
327 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
328 AU.setPreservesCFG();
329 AU.addRequired<MachineBlockFrequencyInfo>();
330 AU.addPreserved<MachineBlockFrequencyInfo>();
331 AU.addRequired<AliasAnalysis>();
332 AU.addPreserved<AliasAnalysis>();
333 AU.addRequired<LiveIntervals>();
334 AU.addPreserved<LiveIntervals>();
335 AU.addRequired<SlotIndexes>();
336 AU.addPreserved<SlotIndexes>();
337 AU.addRequired<LiveDebugVariables>();
338 AU.addPreserved<LiveDebugVariables>();
339 AU.addRequired<LiveStacks>();
340 AU.addPreserved<LiveStacks>();
341 AU.addRequired<MachineDominatorTree>();
342 AU.addPreserved<MachineDominatorTree>();
343 AU.addRequired<MachineLoopInfo>();
344 AU.addPreserved<MachineLoopInfo>();
345 AU.addRequired<VirtRegMap>();
346 AU.addPreserved<VirtRegMap>();
347 AU.addRequired<LiveRegMatrix>();
348 AU.addPreserved<LiveRegMatrix>();
349 AU.addRequired<EdgeBundles>();
350 AU.addRequired<SpillPlacement>();
351 MachineFunctionPass::getAnalysisUsage(AU);
355 //===----------------------------------------------------------------------===//
356 // LiveRangeEdit delegate methods
357 //===----------------------------------------------------------------------===//
359 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
360 if (VRM->hasPhys(VirtReg)) {
361 Matrix->unassign(LIS->getInterval(VirtReg));
364 // Unassigned virtreg is probably in the priority queue.
365 // RegAllocBase will erase it after dequeueing.
369 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
370 if (!VRM->hasPhys(VirtReg))
373 // Register is assigned, put it back on the queue for reassignment.
374 LiveInterval &LI = LIS->getInterval(VirtReg);
375 Matrix->unassign(LI);
379 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
380 // Cloning a register we haven't even heard about yet? Just ignore it.
381 if (!ExtraRegInfo.inBounds(Old))
384 // LRE may clone a virtual register because dead code elimination causes it to
385 // be split into connected components. The new components are much smaller
386 // than the original, so they should get a new chance at being assigned.
387 // same stage as the parent.
388 ExtraRegInfo[Old].Stage = RS_Assign;
389 ExtraRegInfo.grow(New);
390 ExtraRegInfo[New] = ExtraRegInfo[Old];
393 void RAGreedy::releaseMemory() {
394 SpillerInstance.reset(0);
395 ExtraRegInfo.clear();
399 void RAGreedy::enqueue(LiveInterval *LI) {
400 // Prioritize live ranges by size, assigning larger ranges first.
401 // The queue holds (size, reg) pairs.
402 const unsigned Size = LI->getSize();
403 const unsigned Reg = LI->reg;
404 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
405 "Can only enqueue virtual registers");
408 ExtraRegInfo.grow(Reg);
409 if (ExtraRegInfo[Reg].Stage == RS_New)
410 ExtraRegInfo[Reg].Stage = RS_Assign;
412 if (ExtraRegInfo[Reg].Stage == RS_Split) {
413 // Unsplit ranges that couldn't be allocated immediately are deferred until
414 // everything else has been allocated.
417 if (ExtraRegInfo[Reg].Stage == RS_Assign && !LI->empty() &&
418 LIS->intervalIsInOneMBB(*LI)) {
419 // Allocate original local ranges in linear instruction order. Since they
420 // are singly defined, this produces optimal coloring in the absence of
421 // global interference and other constraints.
422 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
425 // Allocate global and split ranges in long->short order. Long ranges that
426 // don't fit should be spilled (or split) ASAP so they don't create
427 // interference. Mark a bit to prioritize global above local ranges.
428 Prio = (1u << 29) + Size;
430 // Mark a higher bit to prioritize global and local above RS_Split.
433 // Boost ranges that have a physical register hint.
434 if (VRM->hasKnownPreference(Reg))
437 // The virtual register number is a tie breaker for same-sized ranges.
438 // Give lower vreg numbers higher priority to assign them first.
439 Queue.push(std::make_pair(Prio, ~Reg));
442 LiveInterval *RAGreedy::dequeue() {
445 LiveInterval *LI = &LIS->getInterval(~Queue.top().second);
451 //===----------------------------------------------------------------------===//
453 //===----------------------------------------------------------------------===//
455 /// tryAssign - Try to assign VirtReg to an available register.
456 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
457 AllocationOrder &Order,
458 SmallVectorImpl<unsigned> &NewVRegs) {
461 while ((PhysReg = Order.next()))
462 if (!Matrix->checkInterference(VirtReg, PhysReg))
464 if (!PhysReg || Order.isHint())
467 // PhysReg is available, but there may be a better choice.
469 // If we missed a simple hint, try to cheaply evict interference from the
470 // preferred register.
471 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
472 if (Order.isHint(Hint)) {
473 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
474 EvictionCost MaxCost(1);
475 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
476 evictInterference(VirtReg, Hint, NewVRegs);
481 // Try to evict interference from a cheaper alternative.
482 unsigned Cost = TRI->getCostPerUse(PhysReg);
484 // Most registers have 0 additional cost.
488 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
490 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
491 return CheapReg ? CheapReg : PhysReg;
495 //===----------------------------------------------------------------------===//
496 // Interference eviction
497 //===----------------------------------------------------------------------===//
499 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
500 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
502 while ((PhysReg = Order.next())) {
503 if (PhysReg == PrevReg)
506 MCRegUnitIterator Units(PhysReg, TRI);
507 for (; Units.isValid(); ++Units) {
508 // Instantiate a "subquery", not to be confused with the Queries array.
509 LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]);
510 if (subQ.checkInterference())
513 // If no units have interference, break out with the current PhysReg.
514 if (!Units.isValid())
518 DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
519 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI)
524 /// shouldEvict - determine if A should evict the assigned live range B. The
525 /// eviction policy defined by this function together with the allocation order
526 /// defined by enqueue() decides which registers ultimately end up being split
529 /// Cascade numbers are used to prevent infinite loops if this function is a
532 /// @param A The live range to be assigned.
533 /// @param IsHint True when A is about to be assigned to its preferred
535 /// @param B The live range to be evicted.
536 /// @param BreaksHint True when B is already assigned to its preferred register.
537 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
538 LiveInterval &B, bool BreaksHint) {
539 bool CanSplit = getStage(B) < RS_Spill;
541 // Be fairly aggressive about following hints as long as the evictee can be
543 if (CanSplit && IsHint && !BreaksHint)
546 return A.weight > B.weight;
549 /// canEvictInterference - Return true if all interferences between VirtReg and
550 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
552 /// @param VirtReg Live range that is about to be assigned.
553 /// @param PhysReg Desired register for assignment.
554 /// @param IsHint True when PhysReg is VirtReg's preferred register.
555 /// @param MaxCost Only look for cheaper candidates and update with new cost
556 /// when returning true.
557 /// @returns True when interference can be evicted cheaper than MaxCost.
558 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
559 bool IsHint, EvictionCost &MaxCost) {
560 // It is only possible to evict virtual register interference.
561 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
564 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
566 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
567 // involved in an eviction before. If a cascade number was assigned, deny
568 // evicting anything with the same or a newer cascade number. This prevents
569 // infinite eviction loops.
571 // This works out so a register without a cascade number is allowed to evict
572 // anything, and it can be evicted by anything.
573 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
575 Cascade = NextCascade;
578 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
579 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
580 // If there is 10 or more interferences, chances are one is heavier.
581 if (Q.collectInterferingVRegs(10) >= 10)
584 // Check if any interfering live range is heavier than MaxWeight.
585 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
586 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
587 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
588 "Only expecting virtual register interference from query");
589 // Never evict spill products. They cannot split or spill.
590 if (getStage(*Intf) == RS_Done)
592 // Once a live range becomes small enough, it is urgent that we find a
593 // register for it. This is indicated by an infinite spill weight. These
594 // urgent live ranges get to evict almost anything.
596 // Also allow urgent evictions of unspillable ranges from a strictly
597 // larger allocation order.
598 bool Urgent = !VirtReg.isSpillable() &&
599 (Intf->isSpillable() ||
600 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
601 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
602 // Only evict older cascades or live ranges without a cascade.
603 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
604 if (Cascade <= IntfCascade) {
607 // We permit breaking cascades for urgent evictions. It should be the
608 // last resort, though, so make it really expensive.
609 Cost.BrokenHints += 10;
611 // Would this break a satisfied hint?
612 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
613 // Update eviction cost.
614 Cost.BrokenHints += BreaksHint;
615 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
616 // Abort if this would be too expensive.
617 if (!(Cost < MaxCost))
621 // If !MaxCost.isMax(), then we're just looking for a cheap register.
622 // Evicting another local live range in this case could lead to suboptimal
624 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
625 !canReassign(*Intf, PhysReg)) {
628 // Finally, apply the eviction policy for non-urgent evictions.
629 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
637 /// evictInterference - Evict any interferring registers that prevent VirtReg
638 /// from being assigned to Physreg. This assumes that canEvictInterference
640 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
641 SmallVectorImpl<unsigned> &NewVRegs) {
642 // Make sure that VirtReg has a cascade number, and assign that cascade
643 // number to every evicted register. These live ranges than then only be
644 // evicted by a newer cascade, preventing infinite loops.
645 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
647 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
649 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
650 << " interference: Cascade " << Cascade << '\n');
652 // Collect all interfering virtregs first.
653 SmallVector<LiveInterval*, 8> Intfs;
654 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
655 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
656 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
657 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
658 Intfs.append(IVR.begin(), IVR.end());
661 // Evict them second. This will invalidate the queries.
662 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
663 LiveInterval *Intf = Intfs[i];
664 // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
665 if (!VRM->hasPhys(Intf->reg))
667 Matrix->unassign(*Intf);
668 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
669 VirtReg.isSpillable() < Intf->isSpillable()) &&
670 "Cannot decrease cascade number, illegal eviction");
671 ExtraRegInfo[Intf->reg].Cascade = Cascade;
673 NewVRegs.push_back(Intf->reg);
677 /// tryEvict - Try to evict all interferences for a physreg.
678 /// @param VirtReg Currently unassigned virtual register.
679 /// @param Order Physregs to try.
680 /// @return Physreg to assign VirtReg, or 0.
681 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
682 AllocationOrder &Order,
683 SmallVectorImpl<unsigned> &NewVRegs,
684 unsigned CostPerUseLimit) {
685 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
687 // Keep track of the cheapest interference seen so far.
688 EvictionCost BestCost(~0u);
689 unsigned BestPhys = 0;
690 unsigned OrderLimit = Order.getOrder().size();
692 // When we are just looking for a reduced cost per use, don't break any
693 // hints, and only evict smaller spill weights.
694 if (CostPerUseLimit < ~0u) {
695 BestCost.BrokenHints = 0;
696 BestCost.MaxWeight = VirtReg.weight;
698 // Check of any registers in RC are below CostPerUseLimit.
699 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
700 unsigned MinCost = RegClassInfo.getMinCost(RC);
701 if (MinCost >= CostPerUseLimit) {
702 DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost
703 << ", no cheaper registers to be found.\n");
707 // It is normal for register classes to have a long tail of registers with
708 // the same cost. We don't need to look at them if they're too expensive.
709 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
710 OrderLimit = RegClassInfo.getLastCostChange(RC);
711 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n");
716 while (unsigned PhysReg = Order.nextWithDups(OrderLimit)) {
717 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
719 // The first use of a callee-saved register in a function has cost 1.
720 // Don't start using a CSR when the CostPerUseLimit is low.
721 if (CostPerUseLimit == 1)
722 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
723 if (!MRI->isPhysRegUsed(CSR)) {
724 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
725 << PrintReg(CSR, TRI) << '\n');
729 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
735 // Stop if the hint can be used.
743 evictInterference(VirtReg, BestPhys, NewVRegs);
748 //===----------------------------------------------------------------------===//
750 //===----------------------------------------------------------------------===//
752 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
753 /// interference pattern in Physreg and its aliases. Add the constraints to
754 /// SpillPlacement and return the static cost of this split in Cost, assuming
755 /// that all preferences in SplitConstraints are met.
756 /// Return false if there are no bundles with positive bias.
757 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
758 BlockFrequency &Cost) {
759 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
761 // Reset interference dependent info.
762 SplitConstraints.resize(UseBlocks.size());
763 BlockFrequency StaticCost = 0;
764 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
765 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
766 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
768 BC.Number = BI.MBB->getNumber();
769 Intf.moveToBlock(BC.Number);
770 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
771 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
772 BC.ChangesValue = BI.FirstDef.isValid();
774 if (!Intf.hasInterference())
777 // Number of spill code instructions to insert.
780 // Interference for the live-in value.
782 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
783 BC.Entry = SpillPlacement::MustSpill, ++Ins;
784 else if (Intf.first() < BI.FirstInstr)
785 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
786 else if (Intf.first() < BI.LastInstr)
790 // Interference for the live-out value.
792 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
793 BC.Exit = SpillPlacement::MustSpill, ++Ins;
794 else if (Intf.last() > BI.LastInstr)
795 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
796 else if (Intf.last() > BI.FirstInstr)
800 // Accumulate the total frequency of inserted spill code.
802 StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
806 // Add constraints for use-blocks. Note that these are the only constraints
807 // that may add a positive bias, it is downhill from here.
808 SpillPlacer->addConstraints(SplitConstraints);
809 return SpillPlacer->scanActiveBundles();
813 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
814 /// live-through blocks in Blocks.
815 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
816 ArrayRef<unsigned> Blocks) {
817 const unsigned GroupSize = 8;
818 SpillPlacement::BlockConstraint BCS[GroupSize];
819 unsigned TBS[GroupSize];
820 unsigned B = 0, T = 0;
822 for (unsigned i = 0; i != Blocks.size(); ++i) {
823 unsigned Number = Blocks[i];
824 Intf.moveToBlock(Number);
826 if (!Intf.hasInterference()) {
827 assert(T < GroupSize && "Array overflow");
829 if (++T == GroupSize) {
830 SpillPlacer->addLinks(makeArrayRef(TBS, T));
836 assert(B < GroupSize && "Array overflow");
837 BCS[B].Number = Number;
839 // Interference for the live-in value.
840 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
841 BCS[B].Entry = SpillPlacement::MustSpill;
843 BCS[B].Entry = SpillPlacement::PrefSpill;
845 // Interference for the live-out value.
846 if (Intf.last() >= SA->getLastSplitPoint(Number))
847 BCS[B].Exit = SpillPlacement::MustSpill;
849 BCS[B].Exit = SpillPlacement::PrefSpill;
851 if (++B == GroupSize) {
852 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
853 SpillPlacer->addConstraints(Array);
858 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
859 SpillPlacer->addConstraints(Array);
860 SpillPlacer->addLinks(makeArrayRef(TBS, T));
863 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
864 // Keep track of through blocks that have not been added to SpillPlacer.
865 BitVector Todo = SA->getThroughBlocks();
866 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
867 unsigned AddedTo = 0;
869 unsigned Visited = 0;
873 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
874 // Find new through blocks in the periphery of PrefRegBundles.
875 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
876 unsigned Bundle = NewBundles[i];
877 // Look at all blocks connected to Bundle in the full graph.
878 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
879 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
882 if (!Todo.test(Block))
885 // This is a new through block. Add it to SpillPlacer later.
886 ActiveBlocks.push_back(Block);
892 // Any new blocks to add?
893 if (ActiveBlocks.size() == AddedTo)
896 // Compute through constraints from the interference, or assume that all
897 // through blocks prefer spilling when forming compact regions.
898 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
900 addThroughConstraints(Cand.Intf, NewBlocks);
902 // Provide a strong negative bias on through blocks to prevent unwanted
903 // liveness on loop backedges.
904 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
905 AddedTo = ActiveBlocks.size();
907 // Perhaps iterating can enable more bundles?
908 SpillPlacer->iterate();
910 DEBUG(dbgs() << ", v=" << Visited);
913 /// calcCompactRegion - Compute the set of edge bundles that should be live
914 /// when splitting the current live range into compact regions. Compact
915 /// regions can be computed without looking at interference. They are the
916 /// regions formed by removing all the live-through blocks from the live range.
918 /// Returns false if the current live range is already compact, or if the
919 /// compact regions would form single block regions anyway.
920 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
921 // Without any through blocks, the live range is already compact.
922 if (!SA->getNumThroughBlocks())
925 // Compact regions don't correspond to any physreg.
926 Cand.reset(IntfCache, 0);
928 DEBUG(dbgs() << "Compact region bundles");
930 // Use the spill placer to determine the live bundles. GrowRegion pretends
931 // that all the through blocks have interference when PhysReg is unset.
932 SpillPlacer->prepare(Cand.LiveBundles);
934 // The static split cost will be zero since Cand.Intf reports no interference.
936 if (!addSplitConstraints(Cand.Intf, Cost)) {
937 DEBUG(dbgs() << ", none.\n");
942 SpillPlacer->finish();
944 if (!Cand.LiveBundles.any()) {
945 DEBUG(dbgs() << ", none.\n");
950 for (int i = Cand.LiveBundles.find_first(); i>=0;
951 i = Cand.LiveBundles.find_next(i))
952 dbgs() << " EB#" << i;
958 /// calcSpillCost - Compute how expensive it would be to split the live range in
959 /// SA around all use blocks instead of forming bundle regions.
960 BlockFrequency RAGreedy::calcSpillCost() {
961 BlockFrequency Cost = 0;
962 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
963 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
964 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
965 unsigned Number = BI.MBB->getNumber();
966 // We normally only need one spill instruction - a load or a store.
967 Cost += SpillPlacer->getBlockFrequency(Number);
969 // Unless the value is redefined in the block.
970 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
971 Cost += SpillPlacer->getBlockFrequency(Number);
976 /// calcGlobalSplitCost - Return the global split cost of following the split
977 /// pattern in LiveBundles. This cost should be added to the local cost of the
978 /// interference pattern in SplitConstraints.
980 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
981 BlockFrequency GlobalCost = 0;
982 const BitVector &LiveBundles = Cand.LiveBundles;
983 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
984 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
985 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
986 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
987 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
988 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
992 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
994 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
996 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
999 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
1000 unsigned Number = Cand.ActiveBlocks[i];
1001 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
1002 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
1003 if (!RegIn && !RegOut)
1005 if (RegIn && RegOut) {
1006 // We need double spill code if this block has interference.
1007 Cand.Intf.moveToBlock(Number);
1008 if (Cand.Intf.hasInterference()) {
1009 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1010 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1014 // live-in / stack-out or stack-in live-out.
1015 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1020 /// splitAroundRegion - Split the current live range around the regions
1021 /// determined by BundleCand and GlobalCand.
1023 /// Before calling this function, GlobalCand and BundleCand must be initialized
1024 /// so each bundle is assigned to a valid candidate, or NoCand for the
1025 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
1026 /// objects must be initialized for the current live range, and intervals
1027 /// created for the used candidates.
1029 /// @param LREdit The LiveRangeEdit object handling the current split.
1030 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1031 /// must appear in this list.
1032 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1033 ArrayRef<unsigned> UsedCands) {
1034 // These are the intervals created for new global ranges. We may create more
1035 // intervals for local ranges.
1036 const unsigned NumGlobalIntvs = LREdit.size();
1037 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
1038 assert(NumGlobalIntvs && "No global intervals configured");
1040 // Isolate even single instructions when dealing with a proper sub-class.
1041 // That guarantees register class inflation for the stack interval because it
1043 unsigned Reg = SA->getParent().reg;
1044 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1046 // First handle all the blocks with uses.
1047 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1048 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1049 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1050 unsigned Number = BI.MBB->getNumber();
1051 unsigned IntvIn = 0, IntvOut = 0;
1052 SlotIndex IntfIn, IntfOut;
1054 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1055 if (CandIn != NoCand) {
1056 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1057 IntvIn = Cand.IntvIdx;
1058 Cand.Intf.moveToBlock(Number);
1059 IntfIn = Cand.Intf.first();
1063 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1064 if (CandOut != NoCand) {
1065 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1066 IntvOut = Cand.IntvIdx;
1067 Cand.Intf.moveToBlock(Number);
1068 IntfOut = Cand.Intf.last();
1072 // Create separate intervals for isolated blocks with multiple uses.
1073 if (!IntvIn && !IntvOut) {
1074 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
1075 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1076 SE->splitSingleBlock(BI);
1080 if (IntvIn && IntvOut)
1081 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1083 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1085 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1088 // Handle live-through blocks. The relevant live-through blocks are stored in
1089 // the ActiveBlocks list with each candidate. We need to filter out
1091 BitVector Todo = SA->getThroughBlocks();
1092 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1093 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1094 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1095 unsigned Number = Blocks[i];
1096 if (!Todo.test(Number))
1100 unsigned IntvIn = 0, IntvOut = 0;
1101 SlotIndex IntfIn, IntfOut;
1103 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1104 if (CandIn != NoCand) {
1105 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1106 IntvIn = Cand.IntvIdx;
1107 Cand.Intf.moveToBlock(Number);
1108 IntfIn = Cand.Intf.first();
1111 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1112 if (CandOut != NoCand) {
1113 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1114 IntvOut = Cand.IntvIdx;
1115 Cand.Intf.moveToBlock(Number);
1116 IntfOut = Cand.Intf.last();
1118 if (!IntvIn && !IntvOut)
1120 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1126 SmallVector<unsigned, 8> IntvMap;
1127 SE->finish(&IntvMap);
1128 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1130 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1131 unsigned OrigBlocks = SA->getNumLiveBlocks();
1133 // Sort out the new intervals created by splitting. We get four kinds:
1134 // - Remainder intervals should not be split again.
1135 // - Candidate intervals can be assigned to Cand.PhysReg.
1136 // - Block-local splits are candidates for local splitting.
1137 // - DCE leftovers should go back on the queue.
1138 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1139 LiveInterval &Reg = LIS->getInterval(LREdit.get(i));
1141 // Ignore old intervals from DCE.
1142 if (getStage(Reg) != RS_New)
1145 // Remainder interval. Don't try splitting again, spill if it doesn't
1147 if (IntvMap[i] == 0) {
1148 setStage(Reg, RS_Spill);
1152 // Global intervals. Allow repeated splitting as long as the number of live
1153 // blocks is strictly decreasing.
1154 if (IntvMap[i] < NumGlobalIntvs) {
1155 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1156 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1157 << " blocks as original.\n");
1158 // Don't allow repeated splitting as a safe guard against looping.
1159 setStage(Reg, RS_Split2);
1164 // Other intervals are treated as new. This includes local intervals created
1165 // for blocks with multiple uses, and anything created by DCE.
1169 MF->verify(this, "After splitting live range around region");
1172 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1173 SmallVectorImpl<unsigned> &NewVRegs) {
1174 unsigned NumCands = 0;
1175 unsigned BestCand = NoCand;
1176 BlockFrequency BestCost;
1177 SmallVector<unsigned, 8> UsedCands;
1179 // Check if we can split this live range around a compact region.
1180 bool HasCompact = calcCompactRegion(GlobalCand.front());
1182 // Yes, keep GlobalCand[0] as the compact region candidate.
1184 BestCost = BlockFrequency::getMaxFrequency();
1186 // No benefit from the compact region, our fallback will be per-block
1187 // splitting. Make sure we find a solution that is cheaper than spilling.
1188 BestCost = calcSpillCost();
1189 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1193 while (unsigned PhysReg = Order.next()) {
1194 // Discard bad candidates before we run out of interference cache cursors.
1195 // This will only affect register classes with a lot of registers (>32).
1196 if (NumCands == IntfCache.getMaxCursors()) {
1197 unsigned WorstCount = ~0u;
1199 for (unsigned i = 0; i != NumCands; ++i) {
1200 if (i == BestCand || !GlobalCand[i].PhysReg)
1202 unsigned Count = GlobalCand[i].LiveBundles.count();
1203 if (Count < WorstCount)
1204 Worst = i, WorstCount = Count;
1207 GlobalCand[Worst] = GlobalCand[NumCands];
1208 if (BestCand == NumCands)
1212 if (GlobalCand.size() <= NumCands)
1213 GlobalCand.resize(NumCands+1);
1214 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1215 Cand.reset(IntfCache, PhysReg);
1217 SpillPlacer->prepare(Cand.LiveBundles);
1218 BlockFrequency Cost;
1219 if (!addSplitConstraints(Cand.Intf, Cost)) {
1220 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1223 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1224 if (Cost >= BestCost) {
1226 if (BestCand == NoCand)
1227 dbgs() << " worse than no bundles\n";
1229 dbgs() << " worse than "
1230 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1236 SpillPlacer->finish();
1238 // No live bundles, defer to splitSingleBlocks().
1239 if (!Cand.LiveBundles.any()) {
1240 DEBUG(dbgs() << " no bundles.\n");
1244 Cost += calcGlobalSplitCost(Cand);
1246 dbgs() << ", total = " << Cost << " with bundles";
1247 for (int i = Cand.LiveBundles.find_first(); i>=0;
1248 i = Cand.LiveBundles.find_next(i))
1249 dbgs() << " EB#" << i;
1252 if (Cost < BestCost) {
1253 BestCand = NumCands;
1259 // No solutions found, fall back to single block splitting.
1260 if (!HasCompact && BestCand == NoCand)
1263 // Prepare split editor.
1264 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1265 SE->reset(LREdit, SplitSpillMode);
1267 // Assign all edge bundles to the preferred candidate, or NoCand.
1268 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1270 // Assign bundles for the best candidate region.
1271 if (BestCand != NoCand) {
1272 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1273 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1274 UsedCands.push_back(BestCand);
1275 Cand.IntvIdx = SE->openIntv();
1276 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
1277 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1282 // Assign bundles for the compact region.
1284 GlobalSplitCandidate &Cand = GlobalCand.front();
1285 assert(!Cand.PhysReg && "Compact region has no physreg");
1286 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1287 UsedCands.push_back(0);
1288 Cand.IntvIdx = SE->openIntv();
1289 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
1290 << Cand.IntvIdx << ".\n");
1295 splitAroundRegion(LREdit, UsedCands);
1300 //===----------------------------------------------------------------------===//
1301 // Per-Block Splitting
1302 //===----------------------------------------------------------------------===//
1304 /// tryBlockSplit - Split a global live range around every block with uses. This
1305 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
1306 /// they don't allocate.
1307 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1308 SmallVectorImpl<unsigned> &NewVRegs) {
1309 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1310 unsigned Reg = VirtReg.reg;
1311 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1312 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1313 SE->reset(LREdit, SplitSpillMode);
1314 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1315 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1316 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1317 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1318 SE->splitSingleBlock(BI);
1320 // No blocks were split.
1324 // We did split for some blocks.
1325 SmallVector<unsigned, 8> IntvMap;
1326 SE->finish(&IntvMap);
1328 // Tell LiveDebugVariables about the new ranges.
1329 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1331 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1333 // Sort out the new intervals created by splitting. The remainder interval
1334 // goes straight to spilling, the new local ranges get to stay RS_New.
1335 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1336 LiveInterval &LI = LIS->getInterval(LREdit.get(i));
1337 if (getStage(LI) == RS_New && IntvMap[i] == 0)
1338 setStage(LI, RS_Spill);
1342 MF->verify(this, "After splitting live range around basic blocks");
1347 //===----------------------------------------------------------------------===//
1348 // Per-Instruction Splitting
1349 //===----------------------------------------------------------------------===//
1351 /// tryInstructionSplit - Split a live range around individual instructions.
1352 /// This is normally not worthwhile since the spiller is doing essentially the
1353 /// same thing. However, when the live range is in a constrained register
1354 /// class, it may help to insert copies such that parts of the live range can
1355 /// be moved to a larger register class.
1357 /// This is similar to spilling to a larger register class.
1359 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1360 SmallVectorImpl<unsigned> &NewVRegs) {
1361 // There is no point to this if there are no larger sub-classes.
1362 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg)))
1365 // Always enable split spill mode, since we're effectively spilling to a
1367 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1368 SE->reset(LREdit, SplitEditor::SM_Size);
1370 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1371 if (Uses.size() <= 1)
1374 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
1376 // Split around every non-copy instruction.
1377 for (unsigned i = 0; i != Uses.size(); ++i) {
1378 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
1379 if (MI->isFullCopy()) {
1380 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
1384 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
1385 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
1386 SE->useIntv(SegStart, SegStop);
1389 if (LREdit.empty()) {
1390 DEBUG(dbgs() << "All uses were copies.\n");
1394 SmallVector<unsigned, 8> IntvMap;
1395 SE->finish(&IntvMap);
1396 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
1397 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1399 // Assign all new registers to RS_Spill. This was the last chance.
1400 setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1405 //===----------------------------------------------------------------------===//
1407 //===----------------------------------------------------------------------===//
1410 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1411 /// in order to use PhysReg between two entries in SA->UseSlots.
1413 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1415 void RAGreedy::calcGapWeights(unsigned PhysReg,
1416 SmallVectorImpl<float> &GapWeight) {
1417 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1418 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1419 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1420 const unsigned NumGaps = Uses.size()-1;
1422 // Start and end points for the interference check.
1423 SlotIndex StartIdx =
1424 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1426 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
1428 GapWeight.assign(NumGaps, 0.0f);
1430 // Add interference from each overlapping register.
1431 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1432 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1433 .checkInterference())
1436 // We know that VirtReg is a continuous interval from FirstInstr to
1437 // LastInstr, so we don't need InterferenceQuery.
1439 // Interference that overlaps an instruction is counted in both gaps
1440 // surrounding the instruction. The exception is interference before
1441 // StartIdx and after StopIdx.
1443 LiveIntervalUnion::SegmentIter IntI =
1444 Matrix->getLiveUnions()[*Units] .find(StartIdx);
1445 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1446 // Skip the gaps before IntI.
1447 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1448 if (++Gap == NumGaps)
1453 // Update the gaps covered by IntI.
1454 const float weight = IntI.value()->weight;
1455 for (; Gap != NumGaps; ++Gap) {
1456 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1457 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1465 // Add fixed interference.
1466 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1467 const LiveRange &LR = LIS->getRegUnit(*Units);
1468 LiveRange::const_iterator I = LR.find(StartIdx);
1469 LiveRange::const_iterator E = LR.end();
1471 // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
1472 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
1473 while (Uses[Gap+1].getBoundaryIndex() < I->start)
1474 if (++Gap == NumGaps)
1479 for (; Gap != NumGaps; ++Gap) {
1480 GapWeight[Gap] = llvm::huge_valf;
1481 if (Uses[Gap+1].getBaseIndex() >= I->end)
1490 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1493 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1494 SmallVectorImpl<unsigned> &NewVRegs) {
1495 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1496 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1498 // Note that it is possible to have an interval that is live-in or live-out
1499 // while only covering a single block - A phi-def can use undef values from
1500 // predecessors, and the block could be a single-block loop.
1501 // We don't bother doing anything clever about such a case, we simply assume
1502 // that the interval is continuous from FirstInstr to LastInstr. We should
1503 // make sure that we don't do anything illegal to such an interval, though.
1505 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1506 if (Uses.size() <= 2)
1508 const unsigned NumGaps = Uses.size()-1;
1511 dbgs() << "tryLocalSplit: ";
1512 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1513 dbgs() << ' ' << Uses[i];
1517 // If VirtReg is live across any register mask operands, compute a list of
1518 // gaps with register masks.
1519 SmallVector<unsigned, 8> RegMaskGaps;
1520 if (Matrix->checkRegMaskInterference(VirtReg)) {
1521 // Get regmask slots for the whole block.
1522 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
1523 DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1524 // Constrain to VirtReg's live range.
1525 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
1526 Uses.front().getRegSlot()) - RMS.begin();
1527 unsigned re = RMS.size();
1528 for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
1529 // Look for Uses[i] <= RMS <= Uses[i+1].
1530 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1531 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
1533 // Skip a regmask on the same instruction as the last use. It doesn't
1534 // overlap the live range.
1535 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1537 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
1538 RegMaskGaps.push_back(i);
1539 // Advance ri to the next gap. A regmask on one of the uses counts in
1541 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1544 DEBUG(dbgs() << '\n');
1547 // Since we allow local split results to be split again, there is a risk of
1548 // creating infinite loops. It is tempting to require that the new live
1549 // ranges have less instructions than the original. That would guarantee
1550 // convergence, but it is too strict. A live range with 3 instructions can be
1551 // split 2+3 (including the COPY), and we want to allow that.
1553 // Instead we use these rules:
1555 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1556 // noop split, of course).
1557 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1558 // the new ranges must have fewer instructions than before the split.
1559 // 3. New ranges with the same number of instructions are marked RS_Split2,
1560 // smaller ranges are marked RS_New.
1562 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1563 // excessive splitting and infinite loops.
1565 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1567 // Best split candidate.
1568 unsigned BestBefore = NumGaps;
1569 unsigned BestAfter = 0;
1572 const float blockFreq =
1573 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
1574 (1.0f / BlockFrequency::getEntryFrequency());
1575 SmallVector<float, 8> GapWeight;
1578 while (unsigned PhysReg = Order.next()) {
1579 // Keep track of the largest spill weight that would need to be evicted in
1580 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1581 calcGapWeights(PhysReg, GapWeight);
1583 // Remove any gaps with regmask clobbers.
1584 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1585 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
1586 GapWeight[RegMaskGaps[i]] = llvm::huge_valf;
1588 // Try to find the best sequence of gaps to close.
1589 // The new spill weight must be larger than any gap interference.
1591 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1592 unsigned SplitBefore = 0, SplitAfter = 1;
1594 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1595 // It is the spill weight that needs to be evicted.
1596 float MaxGap = GapWeight[0];
1599 // Live before/after split?
1600 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1601 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1603 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1604 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1605 << " i=" << MaxGap);
1607 // Stop before the interval gets so big we wouldn't be making progress.
1608 if (!LiveBefore && !LiveAfter) {
1609 DEBUG(dbgs() << " all\n");
1612 // Should the interval be extended or shrunk?
1615 // How many gaps would the new range have?
1616 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1618 // Legally, without causing looping?
1619 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1621 if (Legal && MaxGap < llvm::huge_valf) {
1622 // Estimate the new spill weight. Each instruction reads or writes the
1623 // register. Conservatively assume there are no read-modify-write
1626 // Try to guess the size of the new interval.
1627 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1628 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1629 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1630 // Would this split be possible to allocate?
1631 // Never allocate all gaps, we wouldn't be making progress.
1632 DEBUG(dbgs() << " w=" << EstWeight);
1633 if (EstWeight * Hysteresis >= MaxGap) {
1635 float Diff = EstWeight - MaxGap;
1636 if (Diff > BestDiff) {
1637 DEBUG(dbgs() << " (best)");
1638 BestDiff = Hysteresis * Diff;
1639 BestBefore = SplitBefore;
1640 BestAfter = SplitAfter;
1647 if (++SplitBefore < SplitAfter) {
1648 DEBUG(dbgs() << " shrink\n");
1649 // Recompute the max when necessary.
1650 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1651 MaxGap = GapWeight[SplitBefore];
1652 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1653 MaxGap = std::max(MaxGap, GapWeight[i]);
1660 // Try to extend the interval.
1661 if (SplitAfter >= NumGaps) {
1662 DEBUG(dbgs() << " end\n");
1666 DEBUG(dbgs() << " extend\n");
1667 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1671 // Didn't find any candidates?
1672 if (BestBefore == NumGaps)
1675 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1676 << '-' << Uses[BestAfter] << ", " << BestDiff
1677 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1679 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1683 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1684 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1685 SE->useIntv(SegStart, SegStop);
1686 SmallVector<unsigned, 8> IntvMap;
1687 SE->finish(&IntvMap);
1688 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
1690 // If the new range has the same number of instructions as before, mark it as
1691 // RS_Split2 so the next split will be forced to make progress. Otherwise,
1692 // leave the new intervals as RS_New so they can compete.
1693 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1694 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1695 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1696 if (NewGaps >= NumGaps) {
1697 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1698 assert(!ProgressRequired && "Didn't make progress when it was required.");
1699 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1700 if (IntvMap[i] == 1) {
1701 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2);
1702 DEBUG(dbgs() << PrintReg(LREdit.get(i)));
1704 DEBUG(dbgs() << '\n');
1711 //===----------------------------------------------------------------------===//
1712 // Live Range Splitting
1713 //===----------------------------------------------------------------------===//
1715 /// trySplit - Try to split VirtReg or one of its interferences, making it
1717 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1718 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1719 SmallVectorImpl<unsigned>&NewVRegs) {
1720 // Ranges must be Split2 or less.
1721 if (getStage(VirtReg) >= RS_Spill)
1724 // Local intervals are handled separately.
1725 if (LIS->intervalIsInOneMBB(VirtReg)) {
1726 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1727 SA->analyze(&VirtReg);
1728 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1729 if (PhysReg || !NewVRegs.empty())
1731 return tryInstructionSplit(VirtReg, Order, NewVRegs);
1734 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1736 SA->analyze(&VirtReg);
1738 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1739 // coalescer. That may cause the range to become allocatable which means that
1740 // tryRegionSplit won't be making progress. This check should be replaced with
1741 // an assertion when the coalescer is fixed.
1742 if (SA->didRepairRange()) {
1743 // VirtReg has changed, so all cached queries are invalid.
1744 Matrix->invalidateVirtRegs();
1745 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1749 // First try to split around a region spanning multiple blocks. RS_Split2
1750 // ranges already made dubious progress with region splitting, so they go
1751 // straight to single block splitting.
1752 if (getStage(VirtReg) < RS_Split2) {
1753 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1754 if (PhysReg || !NewVRegs.empty())
1758 // Then isolate blocks.
1759 return tryBlockSplit(VirtReg, Order, NewVRegs);
1763 //===----------------------------------------------------------------------===//
1765 //===----------------------------------------------------------------------===//
1767 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1768 SmallVectorImpl<unsigned> &NewVRegs) {
1769 // First try assigning a free register.
1770 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1771 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1774 LiveRangeStage Stage = getStage(VirtReg);
1775 DEBUG(dbgs() << StageName[Stage]
1776 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1778 // Try to evict a less worthy live range, but only for ranges from the primary
1779 // queue. The RS_Split ranges already failed to do this, and they should not
1780 // get a second chance until they have been split.
1781 if (Stage != RS_Split)
1782 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1785 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1787 // The first time we see a live range, don't try to split or spill.
1788 // Wait until the second time, when all smaller ranges have been allocated.
1789 // This gives a better picture of the interference to split around.
1790 if (Stage < RS_Split) {
1791 setStage(VirtReg, RS_Split);
1792 DEBUG(dbgs() << "wait for second round\n");
1793 NewVRegs.push_back(VirtReg.reg);
1797 // If we couldn't allocate a register from spilling, there is probably some
1798 // invalid inline assembly. The base class wil report it.
1799 if (Stage >= RS_Done || !VirtReg.isSpillable())
1802 // Try splitting VirtReg or interferences.
1803 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1804 if (PhysReg || !NewVRegs.empty())
1807 // Finally spill VirtReg itself.
1808 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1809 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1810 spiller().spill(LRE);
1811 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
1814 MF->verify(this, "After spilling");
1816 // The live virtual register requesting allocation was spilled, so tell
1817 // the caller not to allocate anything during this round.
1821 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1822 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1823 << "********** Function: " << mf.getName() << '\n');
1827 MF->verify(this, "Before greedy register allocator");
1829 RegAllocBase::init(getAnalysis<VirtRegMap>(),
1830 getAnalysis<LiveIntervals>(),
1831 getAnalysis<LiveRegMatrix>());
1832 Indexes = &getAnalysis<SlotIndexes>();
1833 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
1834 DomTree = &getAnalysis<MachineDominatorTree>();
1835 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1836 Loops = &getAnalysis<MachineLoopInfo>();
1837 Bundles = &getAnalysis<EdgeBundles>();
1838 SpillPlacer = &getAnalysis<SpillPlacement>();
1839 DebugVars = &getAnalysis<LiveDebugVariables>();
1841 calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI);
1845 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1846 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI));
1847 ExtraRegInfo.clear();
1848 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1850 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
1851 GlobalCand.resize(32); // This will grow as needed.