1 //===-- RegAllocLinearScan.cpp - Linear Scan register allocator -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a linear scan register allocator.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "regalloc"
15 #include "VirtRegMap.h"
16 #include "VirtRegRewriter.h"
18 #include "llvm/Function.h"
19 #include "llvm/CodeGen/CalcSpillWeights.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/LiveStackAnalysis.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineLoopInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/CodeGen/RegAllocRegistry.h"
28 #include "llvm/CodeGen/RegisterCoalescer.h"
29 #include "llvm/Target/TargetRegisterInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/ADT/EquivalenceClasses.h"
34 #include "llvm/ADT/SmallSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
48 STATISTIC(NumIters , "Number of iterations performed");
49 STATISTIC(NumBacktracks, "Number of times we had to backtrack");
50 STATISTIC(NumCoalesce, "Number of copies coalesced");
51 STATISTIC(NumDowngrade, "Number of registers downgraded");
54 NewHeuristic("new-spilling-heuristic",
55 cl::desc("Use new spilling heuristic"),
56 cl::init(false), cl::Hidden);
59 PreSplitIntervals("pre-alloc-split",
60 cl::desc("Pre-register allocation live interval splitting"),
61 cl::init(false), cl::Hidden);
64 TrivCoalesceEnds("trivial-coalesce-ends",
65 cl::desc("Attempt trivial coalescing of interval ends"),
66 cl::init(false), cl::Hidden);
68 static RegisterRegAlloc
69 linearscanRegAlloc("linearscan", "linear scan register allocator",
70 createLinearScanRegisterAllocator);
73 // When we allocate a register, add it to a fixed-size queue of
74 // registers to skip in subsequent allocations. This trades a small
75 // amount of register pressure and increased spills for flexibility in
76 // the post-pass scheduler.
78 // Note that in a the number of registers used for reloading spills
79 // will be one greater than the value of this option.
81 // One big limitation of this is that it doesn't differentiate between
82 // different register classes. So on x86-64, if there is xmm register
83 // pressure, it can caused fewer GPRs to be held in the queue.
84 static cl::opt<unsigned>
85 NumRecentlyUsedRegs("linearscan-skip-count",
86 cl::desc("Number of registers for linearscan to remember"
91 struct RALinScan : public MachineFunctionPass {
93 RALinScan() : MachineFunctionPass(&ID) {
94 // Initialize the queue to record recently-used registers.
95 if (NumRecentlyUsedRegs > 0)
96 RecentRegs.resize(NumRecentlyUsedRegs, 0);
97 RecentNext = RecentRegs.begin();
100 typedef std::pair<LiveInterval*, LiveInterval::iterator> IntervalPtr;
101 typedef SmallVector<IntervalPtr, 32> IntervalPtrs;
103 /// RelatedRegClasses - This structure is built the first time a function is
104 /// compiled, and keeps track of which register classes have registers that
105 /// belong to multiple classes or have aliases that are in other classes.
106 EquivalenceClasses<const TargetRegisterClass*> RelatedRegClasses;
107 DenseMap<unsigned, const TargetRegisterClass*> OneClassForEachPhysReg;
109 // NextReloadMap - For each register in the map, it maps to the another
110 // register which is defined by a reload from the same stack slot and
111 // both reloads are in the same basic block.
112 DenseMap<unsigned, unsigned> NextReloadMap;
114 // DowngradedRegs - A set of registers which are being "downgraded", i.e.
115 // un-favored for allocation.
116 SmallSet<unsigned, 8> DowngradedRegs;
118 // DowngradeMap - A map from virtual registers to physical registers being
119 // downgraded for the virtual registers.
120 DenseMap<unsigned, unsigned> DowngradeMap;
122 MachineFunction* mf_;
123 MachineRegisterInfo* mri_;
124 const TargetMachine* tm_;
125 const TargetRegisterInfo* tri_;
126 const TargetInstrInfo* tii_;
127 BitVector allocatableRegs_;
130 const MachineLoopInfo *loopInfo;
132 /// handled_ - Intervals are added to the handled_ set in the order of their
133 /// start value. This is uses for backtracking.
134 std::vector<LiveInterval*> handled_;
136 /// fixed_ - Intervals that correspond to machine registers.
140 /// active_ - Intervals that are currently being processed, and which have a
141 /// live range active for the current point.
142 IntervalPtrs active_;
144 /// inactive_ - Intervals that are currently being processed, but which have
145 /// a hold at the current point.
146 IntervalPtrs inactive_;
148 typedef std::priority_queue<LiveInterval*,
149 SmallVector<LiveInterval*, 64>,
150 greater_ptr<LiveInterval> > IntervalHeap;
151 IntervalHeap unhandled_;
153 /// regUse_ - Tracks register usage.
154 SmallVector<unsigned, 32> regUse_;
155 SmallVector<unsigned, 32> regUseBackUp_;
157 /// vrm_ - Tracks register assignments.
160 std::auto_ptr<VirtRegRewriter> rewriter_;
162 std::auto_ptr<Spiller> spiller_;
164 // The queue of recently-used registers.
165 SmallVector<unsigned, 4> RecentRegs;
166 SmallVector<unsigned, 4>::iterator RecentNext;
168 // Record that we just picked this register.
169 void recordRecentlyUsed(unsigned reg) {
170 assert(reg != 0 && "Recently used register is NOREG!");
171 if (!RecentRegs.empty()) {
173 if (RecentNext == RecentRegs.end())
174 RecentNext = RecentRegs.begin();
179 virtual const char* getPassName() const {
180 return "Linear Scan Register Allocator";
183 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
184 AU.setPreservesCFG();
185 AU.addRequired<LiveIntervals>();
186 AU.addPreserved<SlotIndexes>();
188 AU.addRequiredID(StrongPHIEliminationID);
189 // Make sure PassManager knows which analyses to make available
190 // to coalescing and which analyses coalescing invalidates.
191 AU.addRequiredTransitive<RegisterCoalescer>();
192 AU.addRequired<CalculateSpillWeights>();
193 if (PreSplitIntervals)
194 AU.addRequiredID(PreAllocSplittingID);
195 AU.addRequired<LiveStacks>();
196 AU.addPreserved<LiveStacks>();
197 AU.addRequired<MachineLoopInfo>();
198 AU.addPreserved<MachineLoopInfo>();
199 AU.addRequired<VirtRegMap>();
200 AU.addPreserved<VirtRegMap>();
201 AU.addPreservedID(MachineDominatorsID);
202 MachineFunctionPass::getAnalysisUsage(AU);
205 /// runOnMachineFunction - register allocate the whole function
206 bool runOnMachineFunction(MachineFunction&);
208 // Determine if we skip this register due to its being recently used.
209 bool isRecentlyUsed(unsigned reg) const {
210 return std::find(RecentRegs.begin(), RecentRegs.end(), reg) !=
215 /// linearScan - the linear scan algorithm
218 /// initIntervalSets - initialize the interval sets.
220 void initIntervalSets();
222 /// processActiveIntervals - expire old intervals and move non-overlapping
223 /// ones to the inactive list.
224 void processActiveIntervals(SlotIndex CurPoint);
226 /// processInactiveIntervals - expire old intervals and move overlapping
227 /// ones to the active list.
228 void processInactiveIntervals(SlotIndex CurPoint);
230 /// hasNextReloadInterval - Return the next liveinterval that's being
231 /// defined by a reload from the same SS as the specified one.
232 LiveInterval *hasNextReloadInterval(LiveInterval *cur);
234 /// DowngradeRegister - Downgrade a register for allocation.
235 void DowngradeRegister(LiveInterval *li, unsigned Reg);
237 /// UpgradeRegister - Upgrade a register for allocation.
238 void UpgradeRegister(unsigned Reg);
240 /// assignRegOrStackSlotAtInterval - assign a register if one
241 /// is available, or spill.
242 void assignRegOrStackSlotAtInterval(LiveInterval* cur);
244 void updateSpillWeights(std::vector<float> &Weights,
245 unsigned reg, float weight,
246 const TargetRegisterClass *RC);
248 /// findIntervalsToSpill - Determine the intervals to spill for the
249 /// specified interval. It's passed the physical registers whose spill
250 /// weight is the lowest among all the registers whose live intervals
251 /// conflict with the interval.
252 void findIntervalsToSpill(LiveInterval *cur,
253 std::vector<std::pair<unsigned,float> > &Candidates,
255 SmallVector<LiveInterval*, 8> &SpillIntervals);
257 /// attemptTrivialCoalescing - If a simple interval is defined by a copy,
258 /// try allocate the definition the same register as the source register
259 /// if the register is not defined during live time of the interval. This
260 /// eliminate a copy. This is used to coalesce copies which were not
261 /// coalesced away before allocation either due to dest and src being in
262 /// different register classes or because the coalescer was overly
264 unsigned attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg);
267 /// Register usage / availability tracking helpers.
271 regUse_.resize(tri_->getNumRegs(), 0);
272 regUseBackUp_.resize(tri_->getNumRegs(), 0);
275 void finalizeRegUses() {
277 // Verify all the registers are "freed".
279 for (unsigned i = 0, e = tri_->getNumRegs(); i != e; ++i) {
280 if (regUse_[i] != 0) {
281 dbgs() << tri_->getName(i) << " is still in use!\n";
289 regUseBackUp_.clear();
292 void addRegUse(unsigned physReg) {
293 assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
294 "should be physical register!");
296 for (const unsigned* as = tri_->getAliasSet(physReg); *as; ++as)
300 void delRegUse(unsigned physReg) {
301 assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
302 "should be physical register!");
303 assert(regUse_[physReg] != 0);
305 for (const unsigned* as = tri_->getAliasSet(physReg); *as; ++as) {
306 assert(regUse_[*as] != 0);
311 bool isRegAvail(unsigned physReg) const {
312 assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
313 "should be physical register!");
314 return regUse_[physReg] == 0;
317 void backUpRegUses() {
318 regUseBackUp_ = regUse_;
321 void restoreRegUses() {
322 regUse_ = regUseBackUp_;
326 /// Register handling helpers.
329 /// getFreePhysReg - return a free physical register for this virtual
330 /// register interval if we have one, otherwise return 0.
331 unsigned getFreePhysReg(LiveInterval* cur);
332 unsigned getFreePhysReg(LiveInterval* cur,
333 const TargetRegisterClass *RC,
334 unsigned MaxInactiveCount,
335 SmallVector<unsigned, 256> &inactiveCounts,
338 void ComputeRelatedRegClasses();
340 template <typename ItTy>
341 void printIntervals(const char* const str, ItTy i, ItTy e) const {
344 dbgs() << str << " intervals:\n";
346 for (; i != e; ++i) {
347 dbgs() << "\t" << *i->first << " -> ";
349 unsigned reg = i->first->reg;
350 if (TargetRegisterInfo::isVirtualRegister(reg))
351 reg = vrm_->getPhys(reg);
353 dbgs() << tri_->getName(reg) << '\n';
358 char RALinScan::ID = 0;
361 static RegisterPass<RALinScan>
362 X("linearscan-regalloc", "Linear Scan Register Allocator");
364 void RALinScan::ComputeRelatedRegClasses() {
365 // First pass, add all reg classes to the union, and determine at least one
366 // reg class that each register is in.
367 bool HasAliases = false;
368 for (TargetRegisterInfo::regclass_iterator RCI = tri_->regclass_begin(),
369 E = tri_->regclass_end(); RCI != E; ++RCI) {
370 RelatedRegClasses.insert(*RCI);
371 for (TargetRegisterClass::iterator I = (*RCI)->begin(), E = (*RCI)->end();
373 HasAliases = HasAliases || *tri_->getAliasSet(*I) != 0;
375 const TargetRegisterClass *&PRC = OneClassForEachPhysReg[*I];
377 // Already processed this register. Just make sure we know that
378 // multiple register classes share a register.
379 RelatedRegClasses.unionSets(PRC, *RCI);
386 // Second pass, now that we know conservatively what register classes each reg
387 // belongs to, add info about aliases. We don't need to do this for targets
388 // without register aliases.
390 for (DenseMap<unsigned, const TargetRegisterClass*>::iterator
391 I = OneClassForEachPhysReg.begin(), E = OneClassForEachPhysReg.end();
393 for (const unsigned *AS = tri_->getAliasSet(I->first); *AS; ++AS)
394 RelatedRegClasses.unionSets(I->second, OneClassForEachPhysReg[*AS]);
397 /// attemptTrivialCoalescing - If a simple interval is defined by a copy, try
398 /// allocate the definition the same register as the source register if the
399 /// register is not defined during live time of the interval. If the interval is
400 /// killed by a copy, try to use the destination register. This eliminates a
401 /// copy. This is used to coalesce copies which were not coalesced away before
402 /// allocation either due to dest and src being in different register classes or
403 /// because the coalescer was overly conservative.
404 unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
405 unsigned Preference = vrm_->getRegAllocPref(cur.reg);
406 if ((Preference && Preference == Reg) || !cur.containsOneValue())
409 // We cannot handle complicated live ranges. Simple linear stuff only.
410 if (cur.ranges.size() != 1)
413 const LiveRange &range = cur.ranges.front();
415 VNInfo *vni = range.valno;
421 MachineInstr *CopyMI;
422 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
423 if (vni->def != SlotIndex() && vni->isDefAccurate() &&
424 (CopyMI = li_->getInstructionFromIndex(vni->def)) &&
426 tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg)))
427 // Defined by a copy, try to extend SrcReg forward
428 CandReg = CopyMI->isCopy() ? CopyMI->getOperand(1).getReg() : SrcReg;
429 else if (TrivCoalesceEnds &&
431 li_->getInstructionFromIndex(range.end.getBaseIndex())) &&
432 tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
434 // Only used by a copy, try to extend DstReg backwards
440 if (TargetRegisterInfo::isVirtualRegister(CandReg)) {
441 if (!vrm_->isAssignedReg(CandReg))
443 CandReg = vrm_->getPhys(CandReg);
448 const TargetRegisterClass *RC = mri_->getRegClass(cur.reg);
449 if (!RC->contains(CandReg))
452 if (li_->conflictsWithPhysReg(cur, *vrm_, CandReg))
456 DEBUG(dbgs() << "Coalescing: " << cur << " -> " << tri_->getName(CandReg)
458 vrm_->clearVirt(cur.reg);
459 vrm_->assignVirt2Phys(cur.reg, CandReg);
465 bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
467 mri_ = &fn.getRegInfo();
468 tm_ = &fn.getTarget();
469 tri_ = tm_->getRegisterInfo();
470 tii_ = tm_->getInstrInfo();
471 allocatableRegs_ = tri_->getAllocatableSet(fn);
472 li_ = &getAnalysis<LiveIntervals>();
473 ls_ = &getAnalysis<LiveStacks>();
474 loopInfo = &getAnalysis<MachineLoopInfo>();
476 // We don't run the coalescer here because we have no reason to
477 // interact with it. If the coalescer requires interaction, it
478 // won't do anything. If it doesn't require interaction, we assume
479 // it was run as a separate pass.
481 // If this is the first function compiled, compute the related reg classes.
482 if (RelatedRegClasses.empty())
483 ComputeRelatedRegClasses();
485 // Also resize register usage trackers.
488 vrm_ = &getAnalysis<VirtRegMap>();
489 if (!rewriter_.get()) rewriter_.reset(createVirtRegRewriter());
491 spiller_.reset(createSpiller(mf_, li_, loopInfo, vrm_));
497 // Rewrite spill code and update the PhysRegsUsed set.
498 rewriter_->runOnMachineFunction(*mf_, *vrm_, li_);
500 assert(unhandled_.empty() && "Unhandled live intervals remain!");
508 NextReloadMap.clear();
509 DowngradedRegs.clear();
510 DowngradeMap.clear();
516 /// initIntervalSets - initialize the interval sets.
518 void RALinScan::initIntervalSets()
520 assert(unhandled_.empty() && fixed_.empty() &&
521 active_.empty() && inactive_.empty() &&
522 "interval sets should be empty on initialization");
524 handled_.reserve(li_->getNumIntervals());
526 for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
527 if (TargetRegisterInfo::isPhysicalRegister(i->second->reg)) {
528 if (!i->second->empty()) {
529 mri_->setPhysRegUsed(i->second->reg);
530 fixed_.push_back(std::make_pair(i->second, i->second->begin()));
533 if (i->second->empty()) {
534 assignRegOrStackSlotAtInterval(i->second);
537 unhandled_.push(i->second);
542 void RALinScan::linearScan() {
543 // linear scan algorithm
545 dbgs() << "********** LINEAR SCAN **********\n"
546 << "********** Function: "
547 << mf_->getFunction()->getName() << '\n';
548 printIntervals("fixed", fixed_.begin(), fixed_.end());
551 while (!unhandled_.empty()) {
552 // pick the interval with the earliest start point
553 LiveInterval* cur = unhandled_.top();
556 DEBUG(dbgs() << "\n*** CURRENT ***: " << *cur << '\n');
558 assert(!cur->empty() && "Empty interval in unhandled set.");
560 processActiveIntervals(cur->beginIndex());
561 processInactiveIntervals(cur->beginIndex());
563 assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
564 "Can only allocate virtual registers!");
566 // Allocating a virtual register. try to find a free
567 // physical register or spill an interval (possibly this one) in order to
569 assignRegOrStackSlotAtInterval(cur);
572 printIntervals("active", active_.begin(), active_.end());
573 printIntervals("inactive", inactive_.begin(), inactive_.end());
577 // Expire any remaining active intervals
578 while (!active_.empty()) {
579 IntervalPtr &IP = active_.back();
580 unsigned reg = IP.first->reg;
581 DEBUG(dbgs() << "\tinterval " << *IP.first << " expired\n");
582 assert(TargetRegisterInfo::isVirtualRegister(reg) &&
583 "Can only allocate virtual registers!");
584 reg = vrm_->getPhys(reg);
589 // Expire any remaining inactive intervals
591 for (IntervalPtrs::reverse_iterator
592 i = inactive_.rbegin(); i != inactive_.rend(); ++i)
593 dbgs() << "\tinterval " << *i->first << " expired\n";
597 // Add live-ins to every BB except for entry. Also perform trivial coalescing.
598 MachineFunction::iterator EntryMBB = mf_->begin();
599 SmallVector<MachineBasicBlock*, 8> LiveInMBBs;
600 for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
601 LiveInterval &cur = *i->second;
603 bool isPhys = TargetRegisterInfo::isPhysicalRegister(cur.reg);
606 else if (vrm_->isAssignedReg(cur.reg))
607 Reg = attemptTrivialCoalescing(cur, vrm_->getPhys(cur.reg));
610 // Ignore splited live intervals.
611 if (!isPhys && vrm_->getPreSplitReg(cur.reg))
614 for (LiveInterval::Ranges::const_iterator I = cur.begin(), E = cur.end();
616 const LiveRange &LR = *I;
617 if (li_->findLiveInMBBs(LR.start, LR.end, LiveInMBBs)) {
618 for (unsigned i = 0, e = LiveInMBBs.size(); i != e; ++i)
619 if (LiveInMBBs[i] != EntryMBB) {
620 assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
621 "Adding a virtual register to livein set?");
622 LiveInMBBs[i]->addLiveIn(Reg);
629 DEBUG(dbgs() << *vrm_);
631 // Look for physical registers that end up not being allocated even though
632 // register allocator had to spill other registers in its register class.
633 if (ls_->getNumIntervals() == 0)
635 if (!vrm_->FindUnusedRegisters(li_))
639 /// processActiveIntervals - expire old intervals and move non-overlapping ones
640 /// to the inactive list.
641 void RALinScan::processActiveIntervals(SlotIndex CurPoint)
643 DEBUG(dbgs() << "\tprocessing active intervals:\n");
645 for (unsigned i = 0, e = active_.size(); i != e; ++i) {
646 LiveInterval *Interval = active_[i].first;
647 LiveInterval::iterator IntervalPos = active_[i].second;
648 unsigned reg = Interval->reg;
650 IntervalPos = Interval->advanceTo(IntervalPos, CurPoint);
652 if (IntervalPos == Interval->end()) { // Remove expired intervals.
653 DEBUG(dbgs() << "\t\tinterval " << *Interval << " expired\n");
654 assert(TargetRegisterInfo::isVirtualRegister(reg) &&
655 "Can only allocate virtual registers!");
656 reg = vrm_->getPhys(reg);
659 // Pop off the end of the list.
660 active_[i] = active_.back();
664 } else if (IntervalPos->start > CurPoint) {
665 // Move inactive intervals to inactive list.
666 DEBUG(dbgs() << "\t\tinterval " << *Interval << " inactive\n");
667 assert(TargetRegisterInfo::isVirtualRegister(reg) &&
668 "Can only allocate virtual registers!");
669 reg = vrm_->getPhys(reg);
672 inactive_.push_back(std::make_pair(Interval, IntervalPos));
674 // Pop off the end of the list.
675 active_[i] = active_.back();
679 // Otherwise, just update the iterator position.
680 active_[i].second = IntervalPos;
685 /// processInactiveIntervals - expire old intervals and move overlapping
686 /// ones to the active list.
687 void RALinScan::processInactiveIntervals(SlotIndex CurPoint)
689 DEBUG(dbgs() << "\tprocessing inactive intervals:\n");
691 for (unsigned i = 0, e = inactive_.size(); i != e; ++i) {
692 LiveInterval *Interval = inactive_[i].first;
693 LiveInterval::iterator IntervalPos = inactive_[i].second;
694 unsigned reg = Interval->reg;
696 IntervalPos = Interval->advanceTo(IntervalPos, CurPoint);
698 if (IntervalPos == Interval->end()) { // remove expired intervals.
699 DEBUG(dbgs() << "\t\tinterval " << *Interval << " expired\n");
701 // Pop off the end of the list.
702 inactive_[i] = inactive_.back();
703 inactive_.pop_back();
705 } else if (IntervalPos->start <= CurPoint) {
706 // move re-activated intervals in active list
707 DEBUG(dbgs() << "\t\tinterval " << *Interval << " active\n");
708 assert(TargetRegisterInfo::isVirtualRegister(reg) &&
709 "Can only allocate virtual registers!");
710 reg = vrm_->getPhys(reg);
713 active_.push_back(std::make_pair(Interval, IntervalPos));
715 // Pop off the end of the list.
716 inactive_[i] = inactive_.back();
717 inactive_.pop_back();
720 // Otherwise, just update the iterator position.
721 inactive_[i].second = IntervalPos;
726 /// updateSpillWeights - updates the spill weights of the specifed physical
727 /// register and its weight.
728 void RALinScan::updateSpillWeights(std::vector<float> &Weights,
729 unsigned reg, float weight,
730 const TargetRegisterClass *RC) {
731 SmallSet<unsigned, 4> Processed;
732 SmallSet<unsigned, 4> SuperAdded;
733 SmallVector<unsigned, 4> Supers;
734 Weights[reg] += weight;
735 Processed.insert(reg);
736 for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as) {
737 Weights[*as] += weight;
738 Processed.insert(*as);
739 if (tri_->isSubRegister(*as, reg) &&
740 SuperAdded.insert(*as) &&
742 Supers.push_back(*as);
746 // If the alias is a super-register, and the super-register is in the
747 // register class we are trying to allocate. Then add the weight to all
748 // sub-registers of the super-register even if they are not aliases.
749 // e.g. allocating for GR32, bh is not used, updating bl spill weight.
750 // bl should get the same spill weight otherwise it will be choosen
751 // as a spill candidate since spilling bh doesn't make ebx available.
752 for (unsigned i = 0, e = Supers.size(); i != e; ++i) {
753 for (const unsigned *sr = tri_->getSubRegisters(Supers[i]); *sr; ++sr)
754 if (!Processed.count(*sr))
755 Weights[*sr] += weight;
760 RALinScan::IntervalPtrs::iterator
761 FindIntervalInVector(RALinScan::IntervalPtrs &IP, LiveInterval *LI) {
762 for (RALinScan::IntervalPtrs::iterator I = IP.begin(), E = IP.end();
764 if (I->first == LI) return I;
768 static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, SlotIndex Point){
769 for (unsigned i = 0, e = V.size(); i != e; ++i) {
770 RALinScan::IntervalPtr &IP = V[i];
771 LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
773 if (I != IP.first->begin()) --I;
778 /// addStackInterval - Create a LiveInterval for stack if the specified live
779 /// interval has been spilled.
780 static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
782 MachineRegisterInfo* mri_, VirtRegMap &vrm_) {
783 int SS = vrm_.getStackSlot(cur->reg);
784 if (SS == VirtRegMap::NO_STACK_SLOT)
787 const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
788 LiveInterval &SI = ls_->getOrCreateInterval(SS, RC);
791 if (SI.hasAtLeastOneValue())
792 VNI = SI.getValNumInfo(0);
794 VNI = SI.getNextValue(SlotIndex(), 0, false,
795 ls_->getVNInfoAllocator());
797 LiveInterval &RI = li_->getInterval(cur->reg);
798 // FIXME: This may be overly conservative.
799 SI.MergeRangesInAsValue(RI, VNI);
802 /// getConflictWeight - Return the number of conflicts between cur
803 /// live interval and defs and uses of Reg weighted by loop depthes.
805 float getConflictWeight(LiveInterval *cur, unsigned Reg, LiveIntervals *li_,
806 MachineRegisterInfo *mri_,
807 const MachineLoopInfo *loopInfo) {
809 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(Reg),
810 E = mri_->reg_end(); I != E; ++I) {
811 MachineInstr *MI = &*I;
812 if (cur->liveAt(li_->getInstructionIndex(MI))) {
813 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
814 Conflicts += std::pow(10.0f, (float)loopDepth);
820 /// findIntervalsToSpill - Determine the intervals to spill for the
821 /// specified interval. It's passed the physical registers whose spill
822 /// weight is the lowest among all the registers whose live intervals
823 /// conflict with the interval.
824 void RALinScan::findIntervalsToSpill(LiveInterval *cur,
825 std::vector<std::pair<unsigned,float> > &Candidates,
827 SmallVector<LiveInterval*, 8> &SpillIntervals) {
828 // We have figured out the *best* register to spill. But there are other
829 // registers that are pretty good as well (spill weight within 3%). Spill
830 // the one that has fewest defs and uses that conflict with cur.
831 float Conflicts[3] = { 0.0f, 0.0f, 0.0f };
832 SmallVector<LiveInterval*, 8> SLIs[3];
835 dbgs() << "\tConsidering " << NumCands << " candidates: ";
836 for (unsigned i = 0; i != NumCands; ++i)
837 dbgs() << tri_->getName(Candidates[i].first) << " ";
841 // Calculate the number of conflicts of each candidate.
842 for (IntervalPtrs::iterator i = active_.begin(); i != active_.end(); ++i) {
843 unsigned Reg = i->first->reg;
844 unsigned PhysReg = vrm_->getPhys(Reg);
845 if (!cur->overlapsFrom(*i->first, i->second))
847 for (unsigned j = 0; j < NumCands; ++j) {
848 unsigned Candidate = Candidates[j].first;
849 if (tri_->regsOverlap(PhysReg, Candidate)) {
851 Conflicts[j] += getConflictWeight(cur, Reg, li_, mri_, loopInfo);
852 SLIs[j].push_back(i->first);
857 for (IntervalPtrs::iterator i = inactive_.begin(); i != inactive_.end(); ++i){
858 unsigned Reg = i->first->reg;
859 unsigned PhysReg = vrm_->getPhys(Reg);
860 if (!cur->overlapsFrom(*i->first, i->second-1))
862 for (unsigned j = 0; j < NumCands; ++j) {
863 unsigned Candidate = Candidates[j].first;
864 if (tri_->regsOverlap(PhysReg, Candidate)) {
866 Conflicts[j] += getConflictWeight(cur, Reg, li_, mri_, loopInfo);
867 SLIs[j].push_back(i->first);
872 // Which is the best candidate?
873 unsigned BestCandidate = 0;
874 float MinConflicts = Conflicts[0];
875 for (unsigned i = 1; i != NumCands; ++i) {
876 if (Conflicts[i] < MinConflicts) {
878 MinConflicts = Conflicts[i];
882 std::copy(SLIs[BestCandidate].begin(), SLIs[BestCandidate].end(),
883 std::back_inserter(SpillIntervals));
887 struct WeightCompare {
889 const RALinScan &Allocator;
892 WeightCompare(const RALinScan &Alloc) : Allocator(Alloc) {}
894 typedef std::pair<unsigned, float> RegWeightPair;
895 bool operator()(const RegWeightPair &LHS, const RegWeightPair &RHS) const {
896 return LHS.second < RHS.second && !Allocator.isRecentlyUsed(LHS.first);
901 static bool weightsAreClose(float w1, float w2) {
905 float diff = w1 - w2;
906 if (diff <= 0.02f) // Within 0.02f
908 return (diff / w2) <= 0.05f; // Within 5%.
911 LiveInterval *RALinScan::hasNextReloadInterval(LiveInterval *cur) {
912 DenseMap<unsigned, unsigned>::iterator I = NextReloadMap.find(cur->reg);
913 if (I == NextReloadMap.end())
915 return &li_->getInterval(I->second);
918 void RALinScan::DowngradeRegister(LiveInterval *li, unsigned Reg) {
919 bool isNew = DowngradedRegs.insert(Reg);
920 isNew = isNew; // Silence compiler warning.
921 assert(isNew && "Multiple reloads holding the same register?");
922 DowngradeMap.insert(std::make_pair(li->reg, Reg));
923 for (const unsigned *AS = tri_->getAliasSet(Reg); *AS; ++AS) {
924 isNew = DowngradedRegs.insert(*AS);
925 isNew = isNew; // Silence compiler warning.
926 assert(isNew && "Multiple reloads holding the same register?");
927 DowngradeMap.insert(std::make_pair(li->reg, *AS));
932 void RALinScan::UpgradeRegister(unsigned Reg) {
934 DowngradedRegs.erase(Reg);
935 for (const unsigned *AS = tri_->getAliasSet(Reg); *AS; ++AS)
936 DowngradedRegs.erase(*AS);
942 bool operator()(LiveInterval* A, LiveInterval* B) {
943 return A->beginIndex() < B->beginIndex();
948 /// assignRegOrStackSlotAtInterval - assign a register if one is available, or
950 void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
951 DEBUG(dbgs() << "\tallocating current interval: ");
953 // This is an implicitly defined live interval, just assign any register.
954 const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
956 unsigned physReg = vrm_->getRegAllocPref(cur->reg);
958 physReg = *RC->allocation_order_begin(*mf_);
959 DEBUG(dbgs() << tri_->getName(physReg) << '\n');
960 // Note the register is not really in use.
961 vrm_->assignVirt2Phys(cur->reg, physReg);
967 std::vector<std::pair<unsigned, float> > SpillWeightsToAdd;
968 SlotIndex StartPosition = cur->beginIndex();
969 const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
971 // If start of this live interval is defined by a move instruction and its
972 // source is assigned a physical register that is compatible with the target
973 // register class, then we should try to assign it the same register.
974 // This can happen when the move is from a larger register class to a smaller
975 // one, e.g. X86::mov32to32_. These move instructions are not coalescable.
976 if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
977 VNInfo *vni = cur->begin()->valno;
978 if ((vni->def != SlotIndex()) && !vni->isUnused() &&
979 vni->isDefAccurate()) {
980 MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
981 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
983 tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
985 if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
987 else if (vrm_->isAssignedReg(SrcReg))
988 Reg = vrm_->getPhys(SrcReg);
991 Reg = tri_->getSubReg(Reg, SrcSubReg);
993 Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC);
994 if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
995 mri_->setRegAllocationHint(cur->reg, 0, Reg);
997 } else if (CopyMI && CopyMI->isCopy()) {
998 DstReg = CopyMI->getOperand(0).getReg();
999 DstSubReg = CopyMI->getOperand(0).getSubReg();
1000 SrcReg = CopyMI->getOperand(1).getReg();
1001 SrcSubReg = CopyMI->getOperand(1).getSubReg();
1003 if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
1005 else if (vrm_->isAssignedReg(SrcReg))
1006 Reg = vrm_->getPhys(SrcReg);
1009 Reg = tri_->getSubReg(Reg, SrcSubReg);
1011 Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC);
1012 if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
1013 mri_->setRegAllocationHint(cur->reg, 0, Reg);
1019 // For every interval in inactive we overlap with, mark the
1020 // register as not free and update spill weights.
1021 for (IntervalPtrs::const_iterator i = inactive_.begin(),
1022 e = inactive_.end(); i != e; ++i) {
1023 unsigned Reg = i->first->reg;
1024 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
1025 "Can only allocate virtual registers!");
1026 const TargetRegisterClass *RegRC = mri_->getRegClass(Reg);
1027 // If this is not in a related reg class to the register we're allocating,
1029 if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
1030 cur->overlapsFrom(*i->first, i->second-1)) {
1031 Reg = vrm_->getPhys(Reg);
1033 SpillWeightsToAdd.push_back(std::make_pair(Reg, i->first->weight));
1037 // Speculatively check to see if we can get a register right now. If not,
1038 // we know we won't be able to by adding more constraints. If so, we can
1039 // check to see if it is valid. Doing an exhaustive search of the fixed_ list
1040 // is very bad (it contains all callee clobbered registers for any functions
1041 // with a call), so we want to avoid doing that if possible.
1042 unsigned physReg = getFreePhysReg(cur);
1043 unsigned BestPhysReg = physReg;
1045 // We got a register. However, if it's in the fixed_ list, we might
1046 // conflict with it. Check to see if we conflict with it or any of its
1048 SmallSet<unsigned, 8> RegAliases;
1049 for (const unsigned *AS = tri_->getAliasSet(physReg); *AS; ++AS)
1050 RegAliases.insert(*AS);
1052 bool ConflictsWithFixed = false;
1053 for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
1054 IntervalPtr &IP = fixed_[i];
1055 if (physReg == IP.first->reg || RegAliases.count(IP.first->reg)) {
1056 // Okay, this reg is on the fixed list. Check to see if we actually
1058 LiveInterval *I = IP.first;
1059 if (I->endIndex() > StartPosition) {
1060 LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
1062 if (II != I->begin() && II->start > StartPosition)
1064 if (cur->overlapsFrom(*I, II)) {
1065 ConflictsWithFixed = true;
1072 // Okay, the register picked by our speculative getFreePhysReg call turned
1073 // out to be in use. Actually add all of the conflicting fixed registers to
1074 // regUse_ so we can do an accurate query.
1075 if (ConflictsWithFixed) {
1076 // For every interval in fixed we overlap with, mark the register as not
1077 // free and update spill weights.
1078 for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
1079 IntervalPtr &IP = fixed_[i];
1080 LiveInterval *I = IP.first;
1082 const TargetRegisterClass *RegRC = OneClassForEachPhysReg[I->reg];
1083 if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
1084 I->endIndex() > StartPosition) {
1085 LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
1087 if (II != I->begin() && II->start > StartPosition)
1089 if (cur->overlapsFrom(*I, II)) {
1090 unsigned reg = I->reg;
1092 SpillWeightsToAdd.push_back(std::make_pair(reg, I->weight));
1097 // Using the newly updated regUse_ object, which includes conflicts in the
1098 // future, see if there are any registers available.
1099 physReg = getFreePhysReg(cur);
1103 // Restore the physical register tracker, removing information about the
1107 // If we find a free register, we are done: assign this virtual to
1108 // the free physical register and add this interval to the active
1111 DEBUG(dbgs() << tri_->getName(physReg) << '\n');
1112 vrm_->assignVirt2Phys(cur->reg, physReg);
1114 active_.push_back(std::make_pair(cur, cur->begin()));
1115 handled_.push_back(cur);
1117 // "Upgrade" the physical register since it has been allocated.
1118 UpgradeRegister(physReg);
1119 if (LiveInterval *NextReloadLI = hasNextReloadInterval(cur)) {
1120 // "Downgrade" physReg to try to keep physReg from being allocated until
1121 // the next reload from the same SS is allocated.
1122 mri_->setRegAllocationHint(NextReloadLI->reg, 0, physReg);
1123 DowngradeRegister(cur, physReg);
1127 DEBUG(dbgs() << "no free registers\n");
1129 // Compile the spill weights into an array that is better for scanning.
1130 std::vector<float> SpillWeights(tri_->getNumRegs(), 0.0f);
1131 for (std::vector<std::pair<unsigned, float> >::iterator
1132 I = SpillWeightsToAdd.begin(), E = SpillWeightsToAdd.end(); I != E; ++I)
1133 updateSpillWeights(SpillWeights, I->first, I->second, RC);
1135 // for each interval in active, update spill weights.
1136 for (IntervalPtrs::const_iterator i = active_.begin(), e = active_.end();
1138 unsigned reg = i->first->reg;
1139 assert(TargetRegisterInfo::isVirtualRegister(reg) &&
1140 "Can only allocate virtual registers!");
1141 reg = vrm_->getPhys(reg);
1142 updateSpillWeights(SpillWeights, reg, i->first->weight, RC);
1145 DEBUG(dbgs() << "\tassigning stack slot at interval "<< *cur << ":\n");
1147 // Find a register to spill.
1148 float minWeight = HUGE_VALF;
1149 unsigned minReg = 0;
1152 std::vector<std::pair<unsigned,float> > RegsWeights;
1153 if (!minReg || SpillWeights[minReg] == HUGE_VALF)
1154 for (TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_),
1155 e = RC->allocation_order_end(*mf_); i != e; ++i) {
1157 float regWeight = SpillWeights[reg];
1158 // Skip recently allocated registers.
1159 if (minWeight > regWeight && !isRecentlyUsed(reg))
1161 RegsWeights.push_back(std::make_pair(reg, regWeight));
1164 // If we didn't find a register that is spillable, try aliases?
1166 for (TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_),
1167 e = RC->allocation_order_end(*mf_); i != e; ++i) {
1169 // No need to worry about if the alias register size < regsize of RC.
1170 // We are going to spill all registers that alias it anyway.
1171 for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as)
1172 RegsWeights.push_back(std::make_pair(*as, SpillWeights[*as]));
1176 // Sort all potential spill candidates by weight.
1177 std::sort(RegsWeights.begin(), RegsWeights.end(), WeightCompare(*this));
1178 minReg = RegsWeights[0].first;
1179 minWeight = RegsWeights[0].second;
1180 if (minWeight == HUGE_VALF) {
1181 // All registers must have inf weight. Just grab one!
1182 minReg = BestPhysReg ? BestPhysReg : *RC->allocation_order_begin(*mf_);
1183 if (cur->weight == HUGE_VALF ||
1184 li_->getApproximateInstructionCount(*cur) == 0) {
1185 // Spill a physical register around defs and uses.
1186 if (li_->spillPhysRegAroundRegDefsUses(*cur, minReg, *vrm_)) {
1187 // spillPhysRegAroundRegDefsUses may have invalidated iterator stored
1188 // in fixed_. Reset them.
1189 for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
1190 IntervalPtr &IP = fixed_[i];
1191 LiveInterval *I = IP.first;
1192 if (I->reg == minReg || tri_->isSubRegister(minReg, I->reg))
1193 IP.second = I->advanceTo(I->begin(), StartPosition);
1196 DowngradedRegs.clear();
1197 assignRegOrStackSlotAtInterval(cur);
1199 assert(false && "Ran out of registers during register allocation!");
1200 report_fatal_error("Ran out of registers during register allocation!");
1206 // Find up to 3 registers to consider as spill candidates.
1207 unsigned LastCandidate = RegsWeights.size() >= 3 ? 3 : 1;
1208 while (LastCandidate > 1) {
1209 if (weightsAreClose(RegsWeights[LastCandidate-1].second, minWeight))
1215 dbgs() << "\t\tregister(s) with min weight(s): ";
1217 for (unsigned i = 0; i != LastCandidate; ++i)
1218 dbgs() << tri_->getName(RegsWeights[i].first)
1219 << " (" << RegsWeights[i].second << ")\n";
1222 // If the current has the minimum weight, we need to spill it and
1223 // add any added intervals back to unhandled, and restart
1225 if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
1226 DEBUG(dbgs() << "\t\t\tspilling(c): " << *cur << '\n');
1227 SmallVector<LiveInterval*, 8> spillIs;
1228 std::vector<LiveInterval*> added;
1229 spiller_->spill(cur, added, spillIs);
1231 std::sort(added.begin(), added.end(), LISorter());
1232 addStackInterval(cur, ls_, li_, mri_, *vrm_);
1234 return; // Early exit if all spills were folded.
1236 // Merge added with unhandled. Note that we have already sorted
1237 // intervals returned by addIntervalsForSpills by their starting
1239 // This also update the NextReloadMap. That is, it adds mapping from a
1240 // register defined by a reload from SS to the next reload from SS in the
1241 // same basic block.
1242 MachineBasicBlock *LastReloadMBB = 0;
1243 LiveInterval *LastReload = 0;
1244 int LastReloadSS = VirtRegMap::NO_STACK_SLOT;
1245 for (unsigned i = 0, e = added.size(); i != e; ++i) {
1246 LiveInterval *ReloadLi = added[i];
1247 if (ReloadLi->weight == HUGE_VALF &&
1248 li_->getApproximateInstructionCount(*ReloadLi) == 0) {
1249 SlotIndex ReloadIdx = ReloadLi->beginIndex();
1250 MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
1251 int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
1252 if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
1253 // Last reload of same SS is in the same MBB. We want to try to
1254 // allocate both reloads the same register and make sure the reg
1255 // isn't clobbered in between if at all possible.
1256 assert(LastReload->beginIndex() < ReloadIdx);
1257 NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
1259 LastReloadMBB = ReloadMBB;
1260 LastReload = ReloadLi;
1261 LastReloadSS = ReloadSS;
1263 unhandled_.push(ReloadLi);
1270 // Push the current interval back to unhandled since we are going
1271 // to re-run at least this iteration. Since we didn't modify it it
1272 // should go back right in the front of the list
1273 unhandled_.push(cur);
1275 assert(TargetRegisterInfo::isPhysicalRegister(minReg) &&
1276 "did not choose a register to spill?");
1278 // We spill all intervals aliasing the register with
1279 // minimum weight, rollback to the interval with the earliest
1280 // start point and let the linear scan algorithm run again
1281 SmallVector<LiveInterval*, 8> spillIs;
1283 // Determine which intervals have to be spilled.
1284 findIntervalsToSpill(cur, RegsWeights, LastCandidate, spillIs);
1286 // Set of spilled vregs (used later to rollback properly)
1287 SmallSet<unsigned, 8> spilled;
1289 // The earliest start of a Spilled interval indicates up to where
1290 // in handled we need to roll back
1291 assert(!spillIs.empty() && "No spill intervals?");
1292 SlotIndex earliestStart = spillIs[0]->beginIndex();
1294 // Spill live intervals of virtual regs mapped to the physical register we
1295 // want to clear (and its aliases). We only spill those that overlap with the
1296 // current interval as the rest do not affect its allocation. we also keep
1297 // track of the earliest start of all spilled live intervals since this will
1298 // mark our rollback point.
1299 std::vector<LiveInterval*> added;
1300 while (!spillIs.empty()) {
1301 LiveInterval *sli = spillIs.back();
1303 DEBUG(dbgs() << "\t\t\tspilling(a): " << *sli << '\n');
1304 if (sli->beginIndex() < earliestStart)
1305 earliestStart = sli->beginIndex();
1307 spiller_->spill(sli, added, spillIs, &earliestStart);
1308 addStackInterval(sli, ls_, li_, mri_, *vrm_);
1309 spilled.insert(sli->reg);
1312 DEBUG(dbgs() << "\t\trolling back to: " << earliestStart << '\n');
1314 // Scan handled in reverse order up to the earliest start of a
1315 // spilled live interval and undo each one, restoring the state of
1317 while (!handled_.empty()) {
1318 LiveInterval* i = handled_.back();
1319 // If this interval starts before t we are done.
1320 if (!i->empty() && i->beginIndex() < earliestStart)
1322 DEBUG(dbgs() << "\t\t\tundo changes for: " << *i << '\n');
1323 handled_.pop_back();
1325 // When undoing a live interval allocation we must know if it is active or
1326 // inactive to properly update regUse_ and the VirtRegMap.
1327 IntervalPtrs::iterator it;
1328 if ((it = FindIntervalInVector(active_, i)) != active_.end()) {
1330 assert(!TargetRegisterInfo::isPhysicalRegister(i->reg));
1331 if (!spilled.count(i->reg))
1333 delRegUse(vrm_->getPhys(i->reg));
1334 vrm_->clearVirt(i->reg);
1335 } else if ((it = FindIntervalInVector(inactive_, i)) != inactive_.end()) {
1336 inactive_.erase(it);
1337 assert(!TargetRegisterInfo::isPhysicalRegister(i->reg));
1338 if (!spilled.count(i->reg))
1340 vrm_->clearVirt(i->reg);
1342 assert(TargetRegisterInfo::isVirtualRegister(i->reg) &&
1343 "Can only allocate virtual registers!");
1344 vrm_->clearVirt(i->reg);
1348 DenseMap<unsigned, unsigned>::iterator ii = DowngradeMap.find(i->reg);
1349 if (ii == DowngradeMap.end())
1350 // It interval has a preference, it must be defined by a copy. Clear the
1351 // preference now since the source interval allocation may have been
1353 mri_->setRegAllocationHint(i->reg, 0, 0);
1355 UpgradeRegister(ii->second);
1359 // Rewind the iterators in the active, inactive, and fixed lists back to the
1360 // point we reverted to.
1361 RevertVectorIteratorsTo(active_, earliestStart);
1362 RevertVectorIteratorsTo(inactive_, earliestStart);
1363 RevertVectorIteratorsTo(fixed_, earliestStart);
1365 // Scan the rest and undo each interval that expired after t and
1366 // insert it in active (the next iteration of the algorithm will
1367 // put it in inactive if required)
1368 for (unsigned i = 0, e = handled_.size(); i != e; ++i) {
1369 LiveInterval *HI = handled_[i];
1370 if (!HI->expiredAt(earliestStart) &&
1371 HI->expiredAt(cur->beginIndex())) {
1372 DEBUG(dbgs() << "\t\t\tundo changes for: " << *HI << '\n');
1373 active_.push_back(std::make_pair(HI, HI->begin()));
1374 assert(!TargetRegisterInfo::isPhysicalRegister(HI->reg));
1375 addRegUse(vrm_->getPhys(HI->reg));
1379 // Merge added with unhandled.
1380 // This also update the NextReloadMap. That is, it adds mapping from a
1381 // register defined by a reload from SS to the next reload from SS in the
1382 // same basic block.
1383 MachineBasicBlock *LastReloadMBB = 0;
1384 LiveInterval *LastReload = 0;
1385 int LastReloadSS = VirtRegMap::NO_STACK_SLOT;
1386 std::sort(added.begin(), added.end(), LISorter());
1387 for (unsigned i = 0, e = added.size(); i != e; ++i) {
1388 LiveInterval *ReloadLi = added[i];
1389 if (ReloadLi->weight == HUGE_VALF &&
1390 li_->getApproximateInstructionCount(*ReloadLi) == 0) {
1391 SlotIndex ReloadIdx = ReloadLi->beginIndex();
1392 MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
1393 int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
1394 if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
1395 // Last reload of same SS is in the same MBB. We want to try to
1396 // allocate both reloads the same register and make sure the reg
1397 // isn't clobbered in between if at all possible.
1398 assert(LastReload->beginIndex() < ReloadIdx);
1399 NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
1401 LastReloadMBB = ReloadMBB;
1402 LastReload = ReloadLi;
1403 LastReloadSS = ReloadSS;
1405 unhandled_.push(ReloadLi);
1409 unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
1410 const TargetRegisterClass *RC,
1411 unsigned MaxInactiveCount,
1412 SmallVector<unsigned, 256> &inactiveCounts,
1414 unsigned FreeReg = 0;
1415 unsigned FreeRegInactiveCount = 0;
1417 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(cur->reg);
1418 // Resolve second part of the hint (if possible) given the current allocation.
1419 unsigned physReg = Hint.second;
1421 TargetRegisterInfo::isVirtualRegister(physReg) && vrm_->hasPhys(physReg))
1422 physReg = vrm_->getPhys(physReg);
1424 TargetRegisterClass::iterator I, E;
1425 tie(I, E) = tri_->getAllocationOrder(RC, Hint.first, physReg, *mf_);
1426 assert(I != E && "No allocatable register in this register class!");
1428 // Scan for the first available register.
1429 for (; I != E; ++I) {
1431 // Ignore "downgraded" registers.
1432 if (SkipDGRegs && DowngradedRegs.count(Reg))
1434 // Skip recently allocated registers.
1435 if (isRegAvail(Reg) && !isRecentlyUsed(Reg)) {
1437 if (FreeReg < inactiveCounts.size())
1438 FreeRegInactiveCount = inactiveCounts[FreeReg];
1440 FreeRegInactiveCount = 0;
1445 // If there are no free regs, or if this reg has the max inactive count,
1446 // return this register.
1447 if (FreeReg == 0 || FreeRegInactiveCount == MaxInactiveCount) {
1448 // Remember what register we picked so we can skip it next time.
1449 if (FreeReg != 0) recordRecentlyUsed(FreeReg);
1453 // Continue scanning the registers, looking for the one with the highest
1454 // inactive count. Alkis found that this reduced register pressure very
1455 // slightly on X86 (in rev 1.94 of this file), though this should probably be
1457 for (; I != E; ++I) {
1459 // Ignore "downgraded" registers.
1460 if (SkipDGRegs && DowngradedRegs.count(Reg))
1462 if (isRegAvail(Reg) && Reg < inactiveCounts.size() &&
1463 FreeRegInactiveCount < inactiveCounts[Reg] && !isRecentlyUsed(Reg)) {
1465 FreeRegInactiveCount = inactiveCounts[Reg];
1466 if (FreeRegInactiveCount == MaxInactiveCount)
1467 break; // We found the one with the max inactive count.
1471 // Remember what register we picked so we can skip it next time.
1472 recordRecentlyUsed(FreeReg);
1477 /// getFreePhysReg - return a free physical register for this virtual register
1478 /// interval if we have one, otherwise return 0.
1479 unsigned RALinScan::getFreePhysReg(LiveInterval *cur) {
1480 SmallVector<unsigned, 256> inactiveCounts;
1481 unsigned MaxInactiveCount = 0;
1483 const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
1484 const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
1486 for (IntervalPtrs::iterator i = inactive_.begin(), e = inactive_.end();
1488 unsigned reg = i->first->reg;
1489 assert(TargetRegisterInfo::isVirtualRegister(reg) &&
1490 "Can only allocate virtual registers!");
1492 // If this is not in a related reg class to the register we're allocating,
1494 const TargetRegisterClass *RegRC = mri_->getRegClass(reg);
1495 if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader) {
1496 reg = vrm_->getPhys(reg);
1497 if (inactiveCounts.size() <= reg)
1498 inactiveCounts.resize(reg+1);
1499 ++inactiveCounts[reg];
1500 MaxInactiveCount = std::max(MaxInactiveCount, inactiveCounts[reg]);
1504 // If copy coalescer has assigned a "preferred" register, check if it's
1506 unsigned Preference = vrm_->getRegAllocPref(cur->reg);
1508 DEBUG(dbgs() << "(preferred: " << tri_->getName(Preference) << ") ");
1509 if (isRegAvail(Preference) &&
1510 RC->contains(Preference))
1514 if (!DowngradedRegs.empty()) {
1515 unsigned FreeReg = getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts,
1520 return getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts, false);
1523 FunctionPass* llvm::createLinearScanRegisterAllocator() {
1524 return new RALinScan();