1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
144 mf_->print(OS, indexes_);
147 void LiveIntervals::dumpInstrs() const {
151 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 // We don't handle fancy stuff crossing basic block boundaries
154 if (li.ranges.size() != 1)
156 const LiveRange &range = li.ranges.front();
157 SlotIndex idx = range.start.getBaseIndex();
158 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160 // Skip deleted instructions
161 MachineInstr *firstMI = getInstructionFromIndex(idx);
162 while (!firstMI && idx != end) {
163 idx = idx.getNextIndex();
164 firstMI = getInstructionFromIndex(idx);
169 // Find last instruction in range
170 SlotIndex lastIdx = end.getPrevIndex();
171 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172 while (!lastMI && lastIdx != idx) {
173 lastIdx = lastIdx.getPrevIndex();
174 lastMI = getInstructionFromIndex(lastIdx);
179 // Range cannot cross basic block boundaries or terminators
180 MachineBasicBlock *MBB = firstMI->getParent();
181 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
184 MachineBasicBlock::const_iterator E = lastMI;
186 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187 const MachineInstr &MI = *I;
189 // Allow copies to and from li.reg
191 if (MI.getOperand(0).getReg() == li.reg ||
192 MI.getOperand(1).getReg() == li.reg)
195 // Check for operands using reg
196 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
197 const MachineOperand& mop = MI.getOperand(i);
200 unsigned PhysReg = mop.getReg();
201 if (PhysReg == 0 || PhysReg == li.reg)
203 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204 if (!vrm.hasPhys(PhysReg))
206 PhysReg = vrm.getPhys(PhysReg);
208 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 // No conflicts found.
217 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219 for (LiveInterval::Ranges::const_iterator
220 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221 for (SlotIndex index = I->start.getBaseIndex(),
222 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index = index.getNextIndex()) {
225 MachineInstr *MI = getInstructionFromIndex(index);
227 continue; // skip deleted instructions
229 if (JoinedCopies.count(MI))
231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232 MachineOperand& MO = MI->getOperand(i);
235 unsigned PhysReg = MO.getReg();
236 if (PhysReg == 0 || PhysReg == Reg ||
237 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 if (tri_->regsOverlap(Reg, PhysReg))
249 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
250 if (TargetRegisterInfo::isPhysicalRegister(reg))
251 dbgs() << tri_->getName(reg);
253 dbgs() << "%reg" << reg;
258 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
259 unsigned Reg = MI.getOperand(MOIdx).getReg();
260 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
261 const MachineOperand &MO = MI.getOperand(i);
264 if (MO.getReg() == Reg && MO.isDef()) {
265 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
266 MI.getOperand(MOIdx).getSubReg() &&
267 (MO.getSubReg() || MO.isImplicit()));
274 /// isPartialRedef - Return true if the specified def at the specific index is
275 /// partially re-defining the specified live interval. A common case of this is
276 /// a definition of the sub-register.
277 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
278 LiveInterval &interval) {
279 if (!MO.getSubReg() || MO.isEarlyClobber())
282 SlotIndex RedefIndex = MIIdx.getDefIndex();
283 const LiveRange *OldLR =
284 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
285 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
287 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
292 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
293 MachineBasicBlock::iterator mi,
297 LiveInterval &interval) {
299 dbgs() << "\t\tregister: ";
300 printRegName(interval.reg, tri_);
303 // Virtual registers may be defined multiple times (due to phi
304 // elimination and 2-addr elimination). Much of what we do only has to be
305 // done once for the vreg. We use an empty interval to detect the first
306 // time we see a vreg.
307 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
308 if (interval.empty()) {
309 // Get the Idx of the defining instructions.
310 SlotIndex defIndex = MIIdx.getDefIndex();
311 // Earlyclobbers move back one, so that they overlap the live range
313 if (MO.isEarlyClobber())
314 defIndex = MIIdx.getUseIndex();
316 // Make sure the first definition is not a partial redefinition. Add an
317 // <imp-def> of the full register.
319 mi->addRegisterDefined(interval.reg);
321 MachineInstr *CopyMI = NULL;
322 if (mi->isCopyLike()) {
326 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
327 assert(ValNo->id == 0 && "First value in interval is not 0?");
329 // Loop over all of the blocks that the vreg is defined in. There are
330 // two cases we have to handle here. The most common case is a vreg
331 // whose lifetime is contained within a basic block. In this case there
332 // will be a single kill, in MBB, which comes after the definition.
333 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
334 // FIXME: what about dead vars?
336 if (vi.Kills[0] != mi)
337 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
339 killIdx = defIndex.getStoreIndex();
341 // If the kill happens after the definition, we have an intra-block
343 if (killIdx > defIndex) {
344 assert(vi.AliveBlocks.empty() &&
345 "Shouldn't be alive across any blocks!");
346 LiveRange LR(defIndex, killIdx, ValNo);
347 interval.addRange(LR);
348 DEBUG(dbgs() << " +" << LR << "\n");
353 // The other case we handle is when a virtual register lives to the end
354 // of the defining block, potentially live across some blocks, then is
355 // live into some number of blocks, but gets killed. Start by adding a
356 // range that goes from this definition to the end of the defining block.
357 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
358 DEBUG(dbgs() << " +" << NewLR);
359 interval.addRange(NewLR);
361 bool PHIJoin = lv_->isPHIJoin(interval.reg);
364 // A phi join register is killed at the end of the MBB and revived as a new
365 // valno in the killing blocks.
366 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
367 DEBUG(dbgs() << " phi-join");
368 ValNo->setHasPHIKill(true);
370 // Iterate over all of the blocks that the variable is completely
371 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
373 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
374 E = vi.AliveBlocks.end(); I != E; ++I) {
375 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
376 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
377 interval.addRange(LR);
378 DEBUG(dbgs() << " +" << LR);
382 // Finally, this virtual register is live from the start of any killing
383 // block to the 'use' slot of the killing instruction.
384 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
385 MachineInstr *Kill = vi.Kills[i];
386 SlotIndex Start = getMBBStartIdx(Kill->getParent());
387 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
389 // Create interval with one of a NEW value number. Note that this value
390 // number isn't actually defined by an instruction, weird huh? :)
392 assert(getInstructionFromIndex(Start) == 0 &&
393 "PHI def index points at actual instruction.");
394 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
395 ValNo->setIsPHIDef(true);
397 LiveRange LR(Start, killIdx, ValNo);
398 interval.addRange(LR);
399 DEBUG(dbgs() << " +" << LR);
403 if (MultipleDefsBySameMI(*mi, MOIdx))
404 // Multiple defs of the same virtual register by the same instruction.
405 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
406 // This is likely due to elimination of REG_SEQUENCE instructions. Return
407 // here since there is nothing to do.
410 // If this is the second time we see a virtual register definition, it
411 // must be due to phi elimination or two addr elimination. If this is
412 // the result of two address elimination, then the vreg is one of the
413 // def-and-use register operand.
415 // It may also be partial redef like this:
416 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
417 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
418 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
419 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
420 // If this is a two-address definition, then we have already processed
421 // the live range. The only problem is that we didn't realize there
422 // are actually two values in the live interval. Because of this we
423 // need to take the LiveRegion that defines this register and split it
425 SlotIndex RedefIndex = MIIdx.getDefIndex();
426 if (MO.isEarlyClobber())
427 RedefIndex = MIIdx.getUseIndex();
429 const LiveRange *OldLR =
430 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
431 VNInfo *OldValNo = OldLR->valno;
432 SlotIndex DefIndex = OldValNo->def.getDefIndex();
434 // Delete the previous value, which should be short and continuous,
435 // because the 2-addr copy must be in the same MBB as the redef.
436 interval.removeRange(DefIndex, RedefIndex);
438 // The new value number (#1) is defined by the instruction we claimed
440 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
442 // Value#0 is now defined by the 2-addr instruction.
443 OldValNo->def = RedefIndex;
444 OldValNo->setCopy(0);
446 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
447 if (PartReDef && mi->isCopyLike())
448 OldValNo->setCopy(&*mi);
450 // Add the new live interval which replaces the range for the input copy.
451 LiveRange LR(DefIndex, RedefIndex, ValNo);
452 DEBUG(dbgs() << " replace range with " << LR);
453 interval.addRange(LR);
455 // If this redefinition is dead, we need to add a dummy unit live
456 // range covering the def slot.
458 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
462 dbgs() << " RESULT: ";
463 interval.print(dbgs(), tri_);
465 } else if (lv_->isPHIJoin(interval.reg)) {
466 // In the case of PHI elimination, each variable definition is only
467 // live until the end of the block. We've already taken care of the
468 // rest of the live range.
470 SlotIndex defIndex = MIIdx.getDefIndex();
471 if (MO.isEarlyClobber())
472 defIndex = MIIdx.getUseIndex();
475 MachineInstr *CopyMI = NULL;
476 if (mi->isCopyLike())
478 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
480 SlotIndex killIndex = getMBBEndIdx(mbb);
481 LiveRange LR(defIndex, killIndex, ValNo);
482 interval.addRange(LR);
483 ValNo->setHasPHIKill(true);
484 DEBUG(dbgs() << " phi-join +" << LR);
486 llvm_unreachable("Multiply defined register");
490 DEBUG(dbgs() << '\n');
493 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
494 MachineBasicBlock::iterator mi,
497 LiveInterval &interval,
498 MachineInstr *CopyMI) {
499 // A physical register cannot be live across basic block, so its
500 // lifetime must end somewhere in its defining basic block.
502 dbgs() << "\t\tregister: ";
503 printRegName(interval.reg, tri_);
506 SlotIndex baseIndex = MIIdx;
507 SlotIndex start = baseIndex.getDefIndex();
508 // Earlyclobbers move back one.
509 if (MO.isEarlyClobber())
510 start = MIIdx.getUseIndex();
511 SlotIndex end = start;
513 // If it is not used after definition, it is considered dead at
514 // the instruction defining it. Hence its interval is:
515 // [defSlot(def), defSlot(def)+1)
516 // For earlyclobbers, the defSlot was pushed back one; the extra
517 // advance below compensates.
519 DEBUG(dbgs() << " dead");
520 end = start.getStoreIndex();
524 // If it is not dead on definition, it must be killed by a
525 // subsequent instruction. Hence its interval is:
526 // [defSlot(def), useSlot(kill)+1)
527 baseIndex = baseIndex.getNextIndex();
528 while (++mi != MBB->end()) {
530 if (mi->isDebugValue())
532 if (getInstructionFromIndex(baseIndex) == 0)
533 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
535 if (mi->killsRegister(interval.reg, tri_)) {
536 DEBUG(dbgs() << " killed");
537 end = baseIndex.getDefIndex();
540 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
542 if (mi->isRegTiedToUseOperand(DefIdx)) {
543 // Two-address instruction.
544 end = baseIndex.getDefIndex();
546 // Another instruction redefines the register before it is ever read.
547 // Then the register is essentially dead at the instruction that
548 // defines it. Hence its interval is:
549 // [defSlot(def), defSlot(def)+1)
550 DEBUG(dbgs() << " dead");
551 end = start.getStoreIndex();
557 baseIndex = baseIndex.getNextIndex();
560 // The only case we should have a dead physreg here without a killing or
561 // instruction where we know it's dead is if it is live-in to the function
562 // and never used. Another possible case is the implicit use of the
563 // physical register has been deleted by two-address pass.
564 end = start.getStoreIndex();
567 assert(start < end && "did not find end of interval?");
569 // Already exists? Extend old live interval.
570 VNInfo *ValNo = interval.getVNInfoAt(start);
571 bool Extend = ValNo != 0;
573 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
574 if (Extend && MO.isEarlyClobber())
575 ValNo->setHasRedefByEC(true);
576 LiveRange LR(start, end, ValNo);
577 interval.addRange(LR);
578 DEBUG(dbgs() << " +" << LR << '\n');
581 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
582 MachineBasicBlock::iterator MI,
586 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
587 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
588 getOrCreateInterval(MO.getReg()));
589 else if (allocatableRegs_[MO.getReg()]) {
590 MachineInstr *CopyMI = NULL;
591 if (MI->isCopyLike())
593 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594 getOrCreateInterval(MO.getReg()), CopyMI);
595 // Def of a register also defines its sub-registers.
596 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
597 // If MI also modifies the sub-register explicitly, avoid processing it
598 // more than once. Do not pass in TRI here so it checks for exact match.
599 if (!MI->definesRegister(*AS))
600 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
601 getOrCreateInterval(*AS), 0);
605 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
607 LiveInterval &interval, bool isAlias) {
609 dbgs() << "\t\tlivein register: ";
610 printRegName(interval.reg, tri_);
613 // Look for kills, if it reaches a def before it's killed, then it shouldn't
614 // be considered a livein.
615 MachineBasicBlock::iterator mi = MBB->begin();
616 MachineBasicBlock::iterator E = MBB->end();
617 // Skip over DBG_VALUE at the start of the MBB.
618 if (mi != E && mi->isDebugValue()) {
619 while (++mi != E && mi->isDebugValue())
622 // MBB is empty except for DBG_VALUE's.
626 SlotIndex baseIndex = MIIdx;
627 SlotIndex start = baseIndex;
628 if (getInstructionFromIndex(baseIndex) == 0)
629 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
631 SlotIndex end = baseIndex;
632 bool SeenDefUse = false;
635 if (mi->killsRegister(interval.reg, tri_)) {
636 DEBUG(dbgs() << " killed");
637 end = baseIndex.getDefIndex();
640 } else if (mi->definesRegister(interval.reg, tri_)) {
641 // Another instruction redefines the register before it is ever read.
642 // Then the register is essentially dead at the instruction that defines
643 // it. Hence its interval is:
644 // [defSlot(def), defSlot(def)+1)
645 DEBUG(dbgs() << " dead");
646 end = start.getStoreIndex();
651 while (++mi != E && mi->isDebugValue())
652 // Skip over DBG_VALUE.
655 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
658 // Live-in register might not be used at all.
661 DEBUG(dbgs() << " dead");
662 end = MIIdx.getStoreIndex();
664 DEBUG(dbgs() << " live through");
669 SlotIndex defIdx = getMBBStartIdx(MBB);
670 assert(getInstructionFromIndex(defIdx) == 0 &&
671 "PHI def index points at actual instruction.");
673 interval.getNextValue(defIdx, 0, VNInfoAllocator);
674 vni->setIsPHIDef(true);
675 LiveRange LR(start, end, vni);
677 interval.addRange(LR);
678 DEBUG(dbgs() << " +" << LR << '\n');
681 /// computeIntervals - computes the live intervals for virtual
682 /// registers. for some ordering of the machine instructions [1,N] a
683 /// live interval is an interval [i, j) where 1 <= i <= j < N for
684 /// which a variable is live
685 void LiveIntervals::computeIntervals() {
686 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
687 << "********** Function: "
688 << ((Value*)mf_->getFunction())->getName() << '\n');
690 SmallVector<unsigned, 8> UndefUses;
691 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
693 MachineBasicBlock *MBB = MBBI;
697 // Track the index of the current machine instr.
698 SlotIndex MIIndex = getMBBStartIdx(MBB);
699 DEBUG(dbgs() << "BB#" << MBB->getNumber()
700 << ":\t\t# derived from " << MBB->getName() << "\n");
702 // Create intervals for live-ins to this BB first.
703 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
704 LE = MBB->livein_end(); LI != LE; ++LI) {
705 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
706 // Multiple live-ins can alias the same register.
707 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
708 if (!hasInterval(*AS))
709 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
713 // Skip over empty initial indices.
714 if (getInstructionFromIndex(MIIndex) == 0)
715 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
717 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
719 DEBUG(dbgs() << MIIndex << "\t" << *MI);
720 if (MI->isDebugValue())
724 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
725 MachineOperand &MO = MI->getOperand(i);
726 if (!MO.isReg() || !MO.getReg())
729 // handle register defs - build intervals
731 handleRegisterDef(MBB, MI, MIIndex, MO, i);
732 else if (MO.isUndef())
733 UndefUses.push_back(MO.getReg());
736 // Move to the next instr slot.
737 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
741 // Create empty intervals for registers defined by implicit_def's (except
742 // for those implicit_def that define values which are liveout of their
744 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
745 unsigned UndefReg = UndefUses[i];
746 (void)getOrCreateInterval(UndefReg);
750 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
751 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
752 return new LiveInterval(reg, Weight);
755 /// dupInterval - Duplicate a live interval. The caller is responsible for
756 /// managing the allocated memory.
757 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
758 LiveInterval *NewLI = createInterval(li->reg);
759 NewLI->Copy(*li, mri_, getVNInfoAllocator());
763 //===----------------------------------------------------------------------===//
764 // Register allocator hooks.
767 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768 /// allow one) virtual register operand, then its uses are implicitly using
769 /// the register. Returns the virtual register.
770 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771 MachineInstr *MI) const {
773 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774 MachineOperand &MO = MI->getOperand(i);
775 if (!MO.isReg() || !MO.isUse())
777 unsigned Reg = MO.getReg();
778 if (Reg == 0 || Reg == li.reg)
781 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
782 !allocatableRegs_[Reg])
784 // FIXME: For now, only remat MI with at most one register operand.
786 "Can't rematerialize instruction with multiple register operand!");
795 /// isValNoAvailableAt - Return true if the val# of the specified interval
796 /// which reaches the given instruction also reaches the specified use index.
797 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
798 SlotIndex UseIdx) const {
799 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
800 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
803 /// isReMaterializable - Returns true if the definition MI of the specified
804 /// val# of the specified interval is re-materializable.
806 LiveIntervals::isReMaterializable(const LiveInterval &li,
807 const VNInfo *ValNo, MachineInstr *MI,
808 const SmallVectorImpl<LiveInterval*> &SpillIs,
813 if (!tii_->isTriviallyReMaterializable(MI, aa_))
816 // Target-specific code can mark an instruction as being rematerializable
817 // if it has one virtual reg use, though it had better be something like
818 // a PIC base register which is likely to be live everywhere.
819 unsigned ImpUse = getReMatImplicitUse(li, MI);
821 const LiveInterval &ImpLi = getInterval(ImpUse);
822 for (MachineRegisterInfo::use_nodbg_iterator
823 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
825 MachineInstr *UseMI = &*ri;
826 SlotIndex UseIdx = getInstructionIndex(UseMI);
827 if (li.getVNInfoAt(UseIdx) != ValNo)
829 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
833 // If a register operand of the re-materialized instruction is going to
834 // be spilled next, then it's not legal to re-materialize this instruction.
835 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
836 if (ImpUse == SpillIs[i]->reg)
842 /// isReMaterializable - Returns true if the definition MI of the specified
843 /// val# of the specified interval is re-materializable.
844 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
845 const VNInfo *ValNo, MachineInstr *MI) {
846 SmallVector<LiveInterval*, 4> Dummy1;
848 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
851 /// isReMaterializable - Returns true if every definition of MI of every
852 /// val# of the specified interval is re-materializable.
854 LiveIntervals::isReMaterializable(const LiveInterval &li,
855 const SmallVectorImpl<LiveInterval*> &SpillIs,
858 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
860 const VNInfo *VNI = *i;
862 continue; // Dead val#.
863 // Is the def for the val# rematerializable?
864 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
867 bool DefIsLoad = false;
869 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
876 /// FilterFoldedOps - Filter out two-address use operands. Return
877 /// true if it finds any issue with the operands that ought to prevent
879 static bool FilterFoldedOps(MachineInstr *MI,
880 SmallVector<unsigned, 2> &Ops,
882 SmallVector<unsigned, 2> &FoldOps) {
884 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
885 unsigned OpIdx = Ops[i];
886 MachineOperand &MO = MI->getOperand(OpIdx);
887 // FIXME: fold subreg use.
891 MRInfo |= (unsigned)VirtRegMap::isMod;
893 // Filter out two-address use operand(s).
894 if (MI->isRegTiedToDefOperand(OpIdx)) {
895 MRInfo = VirtRegMap::isModRef;
898 MRInfo |= (unsigned)VirtRegMap::isRef;
900 FoldOps.push_back(OpIdx);
906 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
907 /// slot / to reg or any rematerialized load into ith operand of specified
908 /// MI. If it is successul, MI is updated with the newly created MI and
910 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
911 VirtRegMap &vrm, MachineInstr *DefMI,
913 SmallVector<unsigned, 2> &Ops,
914 bool isSS, int Slot, unsigned Reg) {
915 // If it is an implicit def instruction, just delete it.
916 if (MI->isImplicitDef()) {
917 RemoveMachineInstrFromMaps(MI);
918 vrm.RemoveMachineInstrFromMaps(MI);
919 MI->eraseFromParent();
924 // Filter the list of operand indexes that are to be folded. Abort if
925 // any operand will prevent folding.
927 SmallVector<unsigned, 2> FoldOps;
928 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
931 // The only time it's safe to fold into a two address instruction is when
932 // it's folding reload and spill from / into a spill stack slot.
933 if (DefMI && (MRInfo & VirtRegMap::isMod))
936 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
937 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
939 // Remember this instruction uses the spill slot.
940 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
942 // Attempt to fold the memory reference into the instruction. If
943 // we can do this, we don't need to insert spill code.
944 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
945 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
946 vrm.transferSpillPts(MI, fmi);
947 vrm.transferRestorePts(MI, fmi);
948 vrm.transferEmergencySpills(MI, fmi);
949 ReplaceMachineInstrInMaps(MI, fmi);
950 MI->eraseFromParent();
958 /// canFoldMemoryOperand - Returns true if the specified load / store
959 /// folding is possible.
960 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
961 SmallVector<unsigned, 2> &Ops,
963 // Filter the list of operand indexes that are to be folded. Abort if
964 // any operand will prevent folding.
966 SmallVector<unsigned, 2> FoldOps;
967 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
970 // It's only legal to remat for a use, not a def.
971 if (ReMat && (MRInfo & VirtRegMap::isMod))
974 return tii_->canFoldMemoryOperand(MI, FoldOps);
977 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
978 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
980 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
985 for (++itr; itr != li.ranges.end(); ++itr) {
986 MachineBasicBlock *mbb2 =
987 indexes_->getMBBCoveringRange(itr->start, itr->end);
996 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
997 /// interval on to-be re-materialized operands of MI) with new register.
998 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
999 MachineInstr *MI, unsigned NewVReg,
1001 // There is an implicit use. That means one of the other operand is
1002 // being remat'ed and the remat'ed instruction has li.reg as an
1003 // use operand. Make sure we rewrite that as well.
1004 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1005 MachineOperand &MO = MI->getOperand(i);
1008 unsigned Reg = MO.getReg();
1009 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1011 if (!vrm.isReMaterialized(Reg))
1013 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1014 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1016 UseMO->setReg(NewVReg);
1020 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1021 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1022 bool LiveIntervals::
1023 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1024 bool TrySplit, SlotIndex index, SlotIndex end,
1026 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1027 unsigned Slot, int LdSlot,
1028 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1030 const TargetRegisterClass* rc,
1031 SmallVector<int, 4> &ReMatIds,
1032 const MachineLoopInfo *loopInfo,
1033 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1034 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1035 std::vector<LiveInterval*> &NewLIs) {
1036 bool CanFold = false;
1038 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1039 MachineOperand& mop = MI->getOperand(i);
1042 unsigned Reg = mop.getReg();
1043 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1048 bool TryFold = !DefIsReMat;
1049 bool FoldSS = true; // Default behavior unless it's a remat.
1050 int FoldSlot = Slot;
1052 // If this is the rematerializable definition MI itself and
1053 // all of its uses are rematerialized, simply delete it.
1054 if (MI == ReMatOrigDefMI && CanDelete) {
1055 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1057 RemoveMachineInstrFromMaps(MI);
1058 vrm.RemoveMachineInstrFromMaps(MI);
1059 MI->eraseFromParent();
1063 // If def for this use can't be rematerialized, then try folding.
1064 // If def is rematerializable and it's a load, also try folding.
1065 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1067 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1073 // Scan all of the operands of this instruction rewriting operands
1074 // to use NewVReg instead of li.reg as appropriate. We do this for
1077 // 1. If the instr reads the same spilled vreg multiple times, we
1078 // want to reuse the NewVReg.
1079 // 2. If the instr is a two-addr instruction, we are required to
1080 // keep the src/dst regs pinned.
1082 // Keep track of whether we replace a use and/or def so that we can
1083 // create the spill interval with the appropriate range.
1084 SmallVector<unsigned, 2> Ops;
1085 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1087 // Create a new virtual register for the spill interval.
1088 // Create the new register now so we can map the fold instruction
1089 // to the new register so when it is unfolded we get the correct
1091 bool CreatedNewVReg = false;
1093 NewVReg = mri_->createVirtualRegister(rc);
1095 CreatedNewVReg = true;
1097 // The new virtual register should get the same allocation hints as the
1099 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1100 if (Hint.first || Hint.second)
1101 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1107 // Do not fold load / store here if we are splitting. We'll find an
1108 // optimal point to insert a load / store later.
1110 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1111 Ops, FoldSS, FoldSlot, NewVReg)) {
1112 // Folding the load/store can completely change the instruction in
1113 // unpredictable ways, rescan it from the beginning.
1116 // We need to give the new vreg the same stack slot as the
1117 // spilled interval.
1118 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1124 if (isNotInMIMap(MI))
1126 goto RestartInstruction;
1129 // We'll try to fold it later if it's profitable.
1130 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1134 mop.setReg(NewVReg);
1135 if (mop.isImplicit())
1136 rewriteImplicitOps(li, MI, NewVReg, vrm);
1138 // Reuse NewVReg for other reads.
1139 bool HasEarlyClobber = false;
1140 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1141 MachineOperand &mopj = MI->getOperand(Ops[j]);
1142 mopj.setReg(NewVReg);
1143 if (mopj.isImplicit())
1144 rewriteImplicitOps(li, MI, NewVReg, vrm);
1145 if (mopj.isEarlyClobber())
1146 HasEarlyClobber = true;
1149 if (CreatedNewVReg) {
1151 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1152 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1153 // Each valnum may have its own remat id.
1154 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1156 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1158 if (!CanDelete || (HasUse && HasDef)) {
1159 // If this is a two-addr instruction then its use operands are
1160 // rematerializable but its def is not. It should be assigned a
1162 vrm.assignVirt2StackSlot(NewVReg, Slot);
1165 vrm.assignVirt2StackSlot(NewVReg, Slot);
1167 } else if (HasUse && HasDef &&
1168 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1169 // If this interval hasn't been assigned a stack slot (because earlier
1170 // def is a deleted remat def), do it now.
1171 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1172 vrm.assignVirt2StackSlot(NewVReg, Slot);
1175 // Re-matting an instruction with virtual register use. Add the
1176 // register as an implicit use on the use MI.
1177 if (DefIsReMat && ImpUse)
1178 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1180 // Create a new register interval for this spill / remat.
1181 LiveInterval &nI = getOrCreateInterval(NewVReg);
1182 if (CreatedNewVReg) {
1183 NewLIs.push_back(&nI);
1184 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1186 vrm.setIsSplitFromReg(NewVReg, li.reg);
1190 if (CreatedNewVReg) {
1191 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1192 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1193 DEBUG(dbgs() << " +" << LR);
1196 // Extend the split live interval to this def / use.
1197 SlotIndex End = index.getDefIndex();
1198 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1199 nI.getValNumInfo(nI.getNumValNums()-1));
1200 DEBUG(dbgs() << " +" << LR);
1205 // An early clobber starts at the use slot, except for an early clobber
1206 // tied to a use operand (yes, that is a thing).
1207 LiveRange LR(HasEarlyClobber && !HasUse ?
1208 index.getUseIndex() : index.getDefIndex(),
1209 index.getStoreIndex(),
1210 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1211 DEBUG(dbgs() << " +" << LR);
1216 dbgs() << "\t\t\t\tAdded new interval: ";
1217 nI.print(dbgs(), tri_);
1223 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1225 MachineBasicBlock *MBB,
1226 SlotIndex Idx) const {
1227 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1230 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1231 /// during spilling.
1233 struct RewriteInfo {
1236 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1239 struct RewriteInfoCompare {
1240 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1241 return LHS.Index < RHS.Index;
1246 void LiveIntervals::
1247 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1248 LiveInterval::Ranges::const_iterator &I,
1249 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1250 unsigned Slot, int LdSlot,
1251 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1253 const TargetRegisterClass* rc,
1254 SmallVector<int, 4> &ReMatIds,
1255 const MachineLoopInfo *loopInfo,
1256 BitVector &SpillMBBs,
1257 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1258 BitVector &RestoreMBBs,
1259 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1260 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1261 std::vector<LiveInterval*> &NewLIs) {
1262 bool AllCanFold = true;
1263 unsigned NewVReg = 0;
1264 SlotIndex start = I->start.getBaseIndex();
1265 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1267 // First collect all the def / use in this live range that will be rewritten.
1268 // Make sure they are sorted according to instruction index.
1269 std::vector<RewriteInfo> RewriteMIs;
1270 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1271 re = mri_->reg_end(); ri != re; ) {
1272 MachineInstr *MI = &*ri;
1273 MachineOperand &O = ri.getOperand();
1275 if (MI->isDebugValue()) {
1276 // Modify DBG_VALUE now that the value is in a spill slot.
1277 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1278 uint64_t Offset = MI->getOperand(1).getImm();
1279 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1280 DebugLoc DL = MI->getDebugLoc();
1281 int FI = isLoadSS ? LdSlot : (int)Slot;
1282 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1283 Offset, MDPtr, DL)) {
1284 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1285 ReplaceMachineInstrInMaps(MI, NewDV);
1286 MachineBasicBlock *MBB = MI->getParent();
1287 MBB->insert(MBB->erase(MI), NewDV);
1292 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1293 RemoveMachineInstrFromMaps(MI);
1294 vrm.RemoveMachineInstrFromMaps(MI);
1295 MI->eraseFromParent();
1298 assert(!(O.isImplicit() && O.isUse()) &&
1299 "Spilling register that's used as implicit use?");
1300 SlotIndex index = getInstructionIndex(MI);
1301 if (index < start || index >= end)
1305 // Must be defined by an implicit def. It should not be spilled. Note,
1306 // this is for correctness reason. e.g.
1307 // 8 %reg1024<def> = IMPLICIT_DEF
1308 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1309 // The live range [12, 14) are not part of the r1024 live interval since
1310 // it's defined by an implicit def. It will not conflicts with live
1311 // interval of r1025. Now suppose both registers are spilled, you can
1312 // easily see a situation where both registers are reloaded before
1313 // the INSERT_SUBREG and both target registers that would overlap.
1315 RewriteMIs.push_back(RewriteInfo(index, MI));
1317 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1319 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1320 // Now rewrite the defs and uses.
1321 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1322 RewriteInfo &rwi = RewriteMIs[i];
1324 SlotIndex index = rwi.Index;
1325 MachineInstr *MI = rwi.MI;
1326 // If MI def and/or use the same register multiple times, then there
1327 // are multiple entries.
1328 while (i != e && RewriteMIs[i].MI == MI) {
1329 assert(RewriteMIs[i].Index == index);
1332 MachineBasicBlock *MBB = MI->getParent();
1334 if (ImpUse && MI != ReMatDefMI) {
1335 // Re-matting an instruction with virtual register use. Prevent interval
1336 // from being spilled.
1337 getInterval(ImpUse).markNotSpillable();
1340 unsigned MBBId = MBB->getNumber();
1341 unsigned ThisVReg = 0;
1343 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1344 if (NVI != MBBVRegsMap.end()) {
1345 ThisVReg = NVI->second;
1352 // It's better to start a new interval to avoid artifically
1353 // extend the new interval.
1354 if (MI->readsWritesVirtualRegister(li.reg) ==
1355 std::make_pair(false,true)) {
1356 MBBVRegsMap.erase(MBB->getNumber());
1362 bool IsNew = ThisVReg == 0;
1364 // This ends the previous live interval. If all of its def / use
1365 // can be folded, give it a low spill weight.
1366 if (NewVReg && TrySplit && AllCanFold) {
1367 LiveInterval &nI = getOrCreateInterval(NewVReg);
1374 bool HasDef = false;
1375 bool HasUse = false;
1376 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1377 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1378 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1379 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1380 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1381 if (!HasDef && !HasUse)
1384 AllCanFold &= CanFold;
1386 // Update weight of spill interval.
1387 LiveInterval &nI = getOrCreateInterval(NewVReg);
1389 // The spill weight is now infinity as it cannot be spilled again.
1390 nI.markNotSpillable();
1394 // Keep track of the last def and first use in each MBB.
1396 if (MI != ReMatOrigDefMI || !CanDelete) {
1397 bool HasKill = false;
1399 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1401 // If this is a two-address code, then this index starts a new VNInfo.
1402 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1404 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1406 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1407 SpillIdxes.find(MBBId);
1409 if (SII == SpillIdxes.end()) {
1410 std::vector<SRInfo> S;
1411 S.push_back(SRInfo(index, NewVReg, true));
1412 SpillIdxes.insert(std::make_pair(MBBId, S));
1413 } else if (SII->second.back().vreg != NewVReg) {
1414 SII->second.push_back(SRInfo(index, NewVReg, true));
1415 } else if (index > SII->second.back().index) {
1416 // If there is an earlier def and this is a two-address
1417 // instruction, then it's not possible to fold the store (which
1418 // would also fold the load).
1419 SRInfo &Info = SII->second.back();
1421 Info.canFold = !HasUse;
1423 SpillMBBs.set(MBBId);
1424 } else if (SII != SpillIdxes.end() &&
1425 SII->second.back().vreg == NewVReg &&
1426 index > SII->second.back().index) {
1427 // There is an earlier def that's not killed (must be two-address).
1428 // The spill is no longer needed.
1429 SII->second.pop_back();
1430 if (SII->second.empty()) {
1431 SpillIdxes.erase(MBBId);
1432 SpillMBBs.reset(MBBId);
1439 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1440 SpillIdxes.find(MBBId);
1441 if (SII != SpillIdxes.end() &&
1442 SII->second.back().vreg == NewVReg &&
1443 index > SII->second.back().index)
1444 // Use(s) following the last def, it's not safe to fold the spill.
1445 SII->second.back().canFold = false;
1446 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1447 RestoreIdxes.find(MBBId);
1448 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1449 // If we are splitting live intervals, only fold if it's the first
1450 // use and there isn't another use later in the MBB.
1451 RII->second.back().canFold = false;
1453 // Only need a reload if there isn't an earlier def / use.
1454 if (RII == RestoreIdxes.end()) {
1455 std::vector<SRInfo> Infos;
1456 Infos.push_back(SRInfo(index, NewVReg, true));
1457 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1459 RII->second.push_back(SRInfo(index, NewVReg, true));
1461 RestoreMBBs.set(MBBId);
1465 // Update spill weight.
1466 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1467 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1470 if (NewVReg && TrySplit && AllCanFold) {
1471 // If all of its def / use can be folded, give it a low spill weight.
1472 LiveInterval &nI = getOrCreateInterval(NewVReg);
1477 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1478 unsigned vr, BitVector &RestoreMBBs,
1479 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1480 if (!RestoreMBBs[Id])
1482 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1483 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1484 if (Restores[i].index == index &&
1485 Restores[i].vreg == vr &&
1486 Restores[i].canFold)
1491 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1492 unsigned vr, BitVector &RestoreMBBs,
1493 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1494 if (!RestoreMBBs[Id])
1496 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1497 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1498 if (Restores[i].index == index && Restores[i].vreg)
1499 Restores[i].index = SlotIndex();
1502 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1503 /// spilled and create empty intervals for their uses.
1505 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1506 const TargetRegisterClass* rc,
1507 std::vector<LiveInterval*> &NewLIs) {
1508 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1509 re = mri_->reg_end(); ri != re; ) {
1510 MachineOperand &O = ri.getOperand();
1511 MachineInstr *MI = &*ri;
1513 if (MI->isDebugValue()) {
1514 // Remove debug info for now.
1516 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1520 assert(MI->isImplicitDef() &&
1521 "Register def was not rewritten?");
1522 RemoveMachineInstrFromMaps(MI);
1523 vrm.RemoveMachineInstrFromMaps(MI);
1524 MI->eraseFromParent();
1526 // This must be an use of an implicit_def so it's not part of the live
1527 // interval. Create a new empty live interval for it.
1528 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1529 unsigned NewVReg = mri_->createVirtualRegister(rc);
1531 vrm.setIsImplicitlyDefined(NewVReg);
1532 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1533 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1534 MachineOperand &MO = MI->getOperand(i);
1535 if (MO.isReg() && MO.getReg() == li.reg) {
1545 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1546 // Limit the loop depth ridiculousness.
1547 if (loopDepth > 200)
1550 // The loop depth is used to roughly estimate the number of times the
1551 // instruction is executed. Something like 10^d is simple, but will quickly
1552 // overflow a float. This expression behaves like 10^d for small d, but is
1553 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1554 // headroom before overflow.
1555 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1557 return (isDef + isUse) * lc;
1561 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1562 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1563 normalizeSpillWeight(*NewLIs[i]);
1566 std::vector<LiveInterval*> LiveIntervals::
1567 addIntervalsForSpills(const LiveInterval &li,
1568 const SmallVectorImpl<LiveInterval*> &SpillIs,
1569 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1570 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1573 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1574 li.print(dbgs(), tri_);
1578 // Each bit specify whether a spill is required in the MBB.
1579 BitVector SpillMBBs(mf_->getNumBlockIDs());
1580 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1581 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1582 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1583 DenseMap<unsigned,unsigned> MBBVRegsMap;
1584 std::vector<LiveInterval*> NewLIs;
1585 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1587 unsigned NumValNums = li.getNumValNums();
1588 SmallVector<MachineInstr*, 4> ReMatDefs;
1589 ReMatDefs.resize(NumValNums, NULL);
1590 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1591 ReMatOrigDefs.resize(NumValNums, NULL);
1592 SmallVector<int, 4> ReMatIds;
1593 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1594 BitVector ReMatDelete(NumValNums);
1595 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1597 // Spilling a split live interval. It cannot be split any further. Also,
1598 // it's also guaranteed to be a single val# / range interval.
1599 if (vrm.getPreSplitReg(li.reg)) {
1600 vrm.setIsSplitFromReg(li.reg, 0);
1601 // Unset the split kill marker on the last use.
1602 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1603 if (KillIdx != SlotIndex()) {
1604 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1605 assert(KillMI && "Last use disappeared?");
1606 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1607 assert(KillOp != -1 && "Last use disappeared?");
1608 KillMI->getOperand(KillOp).setIsKill(false);
1610 vrm.removeKillPoint(li.reg);
1611 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1612 Slot = vrm.getStackSlot(li.reg);
1613 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1614 MachineInstr *ReMatDefMI = DefIsReMat ?
1615 vrm.getReMaterializedMI(li.reg) : NULL;
1617 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1618 bool isLoad = isLoadSS ||
1619 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1620 bool IsFirstRange = true;
1621 for (LiveInterval::Ranges::const_iterator
1622 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1623 // If this is a split live interval with multiple ranges, it means there
1624 // are two-address instructions that re-defined the value. Only the
1625 // first def can be rematerialized!
1627 // Note ReMatOrigDefMI has already been deleted.
1628 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1629 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1630 false, vrm, rc, ReMatIds, loopInfo,
1631 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1632 MBBVRegsMap, NewLIs);
1634 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1635 Slot, 0, false, false, false,
1636 false, vrm, rc, ReMatIds, loopInfo,
1637 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1638 MBBVRegsMap, NewLIs);
1640 IsFirstRange = false;
1643 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1644 normalizeSpillWeights(NewLIs);
1648 bool TrySplit = !intervalIsInOneMBB(li);
1651 bool NeedStackSlot = false;
1652 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1654 const VNInfo *VNI = *i;
1655 unsigned VN = VNI->id;
1656 if (VNI->isUnused())
1657 continue; // Dead val#.
1658 // Is the def for the val# rematerializable?
1659 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1661 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1662 // Remember how to remat the def of this val#.
1663 ReMatOrigDefs[VN] = ReMatDefMI;
1664 // Original def may be modified so we have to make a copy here.
1665 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1666 CloneMIs.push_back(Clone);
1667 ReMatDefs[VN] = Clone;
1669 bool CanDelete = true;
1670 if (VNI->hasPHIKill()) {
1671 // A kill is a phi node, not all of its uses can be rematerialized.
1672 // It must not be deleted.
1674 // Need a stack slot if there is any live range where uses cannot be
1676 NeedStackSlot = true;
1679 ReMatDelete.set(VN);
1681 // Need a stack slot if there is any live range where uses cannot be
1683 NeedStackSlot = true;
1687 // One stack slot per live interval.
1688 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1689 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1690 Slot = vrm.assignVirt2StackSlot(li.reg);
1692 // This case only occurs when the prealloc splitter has already assigned
1693 // a stack slot to this vreg.
1695 Slot = vrm.getStackSlot(li.reg);
1698 // Create new intervals and rewrite defs and uses.
1699 for (LiveInterval::Ranges::const_iterator
1700 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1701 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1702 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1703 bool DefIsReMat = ReMatDefMI != NULL;
1704 bool CanDelete = ReMatDelete[I->valno->id];
1706 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1707 bool isLoad = isLoadSS ||
1708 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1709 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1710 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1711 CanDelete, vrm, rc, ReMatIds, loopInfo,
1712 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1713 MBBVRegsMap, NewLIs);
1716 // Insert spills / restores if we are splitting.
1718 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1719 normalizeSpillWeights(NewLIs);
1723 SmallPtrSet<LiveInterval*, 4> AddedKill;
1724 SmallVector<unsigned, 2> Ops;
1725 if (NeedStackSlot) {
1726 int Id = SpillMBBs.find_first();
1728 std::vector<SRInfo> &spills = SpillIdxes[Id];
1729 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1730 SlotIndex index = spills[i].index;
1731 unsigned VReg = spills[i].vreg;
1732 LiveInterval &nI = getOrCreateInterval(VReg);
1733 bool isReMat = vrm.isReMaterialized(VReg);
1734 MachineInstr *MI = getInstructionFromIndex(index);
1735 bool CanFold = false;
1736 bool FoundUse = false;
1738 if (spills[i].canFold) {
1740 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1741 MachineOperand &MO = MI->getOperand(j);
1742 if (!MO.isReg() || MO.getReg() != VReg)
1749 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1750 RestoreMBBs, RestoreIdxes))) {
1751 // MI has two-address uses of the same register. If the use
1752 // isn't the first and only use in the BB, then we can't fold
1753 // it. FIXME: Move this to rewriteInstructionsForSpills.
1760 // Fold the store into the def if possible.
1761 bool Folded = false;
1762 if (CanFold && !Ops.empty()) {
1763 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1766 // Also folded uses, do not issue a load.
1767 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1768 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1770 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1774 // Otherwise tell the spiller to issue a spill.
1776 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1777 bool isKill = LR->end == index.getStoreIndex();
1778 if (!MI->registerDefIsDead(nI.reg))
1779 // No need to spill a dead def.
1780 vrm.addSpillPoint(VReg, isKill, MI);
1782 AddedKill.insert(&nI);
1785 Id = SpillMBBs.find_next(Id);
1789 int Id = RestoreMBBs.find_first();
1791 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1792 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1793 SlotIndex index = restores[i].index;
1794 if (index == SlotIndex())
1796 unsigned VReg = restores[i].vreg;
1797 LiveInterval &nI = getOrCreateInterval(VReg);
1798 bool isReMat = vrm.isReMaterialized(VReg);
1799 MachineInstr *MI = getInstructionFromIndex(index);
1800 bool CanFold = false;
1802 if (restores[i].canFold) {
1804 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1805 MachineOperand &MO = MI->getOperand(j);
1806 if (!MO.isReg() || MO.getReg() != VReg)
1810 // If this restore were to be folded, it would have been folded
1819 // Fold the load into the use if possible.
1820 bool Folded = false;
1821 if (CanFold && !Ops.empty()) {
1823 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1825 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1827 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1828 // If the rematerializable def is a load, also try to fold it.
1829 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1830 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1831 Ops, isLoadSS, LdSlot, VReg);
1833 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1835 // Re-matting an instruction with virtual register use. Add the
1836 // register as an implicit use on the use MI and mark the register
1837 // interval as unspillable.
1838 LiveInterval &ImpLi = getInterval(ImpUse);
1839 ImpLi.markNotSpillable();
1840 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1845 // If folding is not possible / failed, then tell the spiller to issue a
1846 // load / rematerialization for us.
1848 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1850 vrm.addRestorePoint(VReg, MI);
1852 Id = RestoreMBBs.find_next(Id);
1855 // Finalize intervals: add kills, finalize spill weights, and filter out
1857 std::vector<LiveInterval*> RetNewLIs;
1858 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1859 LiveInterval *LI = NewLIs[i];
1861 if (!AddedKill.count(LI)) {
1862 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1863 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1864 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1865 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1866 assert(UseIdx != -1);
1867 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1868 LastUse->getOperand(UseIdx).setIsKill();
1869 vrm.addKillPoint(LI->reg, LastUseIdx);
1872 RetNewLIs.push_back(LI);
1876 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1877 normalizeSpillWeights(RetNewLIs);
1881 /// hasAllocatableSuperReg - Return true if the specified physical register has
1882 /// any super register that's allocatable.
1883 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1884 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1885 if (allocatableRegs_[*AS] && hasInterval(*AS))
1890 /// getRepresentativeReg - Find the largest super register of the specified
1891 /// physical register.
1892 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1893 // Find the largest super-register that is allocatable.
1894 unsigned BestReg = Reg;
1895 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1896 unsigned SuperReg = *AS;
1897 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1905 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1906 /// specified interval that conflicts with the specified physical register.
1907 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1908 unsigned PhysReg) const {
1909 unsigned NumConflicts = 0;
1910 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1911 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1912 E = mri_->reg_end(); I != E; ++I) {
1913 MachineOperand &O = I.getOperand();
1914 MachineInstr *MI = O.getParent();
1915 if (MI->isDebugValue())
1917 SlotIndex Index = getInstructionIndex(MI);
1918 if (pli.liveAt(Index))
1921 return NumConflicts;
1924 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1925 /// around all defs and uses of the specified interval. Return true if it
1926 /// was able to cut its interval.
1927 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1928 unsigned PhysReg, VirtRegMap &vrm) {
1929 unsigned SpillReg = getRepresentativeReg(PhysReg);
1931 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
1932 << " represented by " << tri_->getName(SpillReg) << '\n');
1934 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1935 // If there are registers which alias PhysReg, but which are not a
1936 // sub-register of the chosen representative super register. Assert
1937 // since we can't handle it yet.
1938 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1939 tri_->isSuperRegister(*AS, SpillReg));
1942 SmallVector<unsigned, 4> PRegs;
1943 if (hasInterval(SpillReg))
1944 PRegs.push_back(SpillReg);
1945 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
1946 if (hasInterval(*SR))
1947 PRegs.push_back(*SR);
1950 dbgs() << "Trying to spill:";
1951 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
1952 dbgs() << ' ' << tri_->getName(PRegs[i]);
1956 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1957 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1958 E = mri_->reg_end(); I != E; ++I) {
1959 MachineOperand &O = I.getOperand();
1960 MachineInstr *MI = O.getParent();
1961 if (MI->isDebugValue() || SeenMIs.count(MI))
1964 SlotIndex Index = getInstructionIndex(MI);
1965 bool LiveReg = false;
1966 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1967 unsigned PReg = PRegs[i];
1968 LiveInterval &pli = getInterval(PReg);
1969 if (!pli.liveAt(Index))
1972 SlotIndex StartIdx = Index.getLoadIndex();
1973 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1974 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
1976 raw_string_ostream Msg(msg);
1977 Msg << "Ran out of registers during register allocation!";
1978 if (MI->isInlineAsm()) {
1979 Msg << "\nPlease check your inline asm statement for invalid "
1980 << "constraints:\n";
1981 MI->print(Msg, tm_);
1983 report_fatal_error(Msg.str());
1985 pli.removeRange(StartIdx, EndIdx);
1990 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
1991 vrm.addEmergencySpill(SpillReg, MI);
1997 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1998 MachineInstr* startInst) {
1999 LiveInterval& Interval = getOrCreateInterval(reg);
2000 VNInfo* VN = Interval.getNextValue(
2001 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2002 startInst, getVNInfoAllocator());
2003 VN->setHasPHIKill(true);
2005 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2006 getMBBEndIdx(startInst->getParent()), VN);
2007 Interval.addRange(LR);