1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
144 mf_->print(OS, indexes_);
147 void LiveIntervals::dumpInstrs() const {
151 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 // We don't handle fancy stuff crossing basic block boundaries
154 if (li.ranges.size() != 1)
156 const LiveRange &range = li.ranges.front();
157 SlotIndex idx = range.start.getBaseIndex();
158 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160 // Skip deleted instructions
161 MachineInstr *firstMI = getInstructionFromIndex(idx);
162 while (!firstMI && idx != end) {
163 idx = idx.getNextIndex();
164 firstMI = getInstructionFromIndex(idx);
169 // Find last instruction in range
170 SlotIndex lastIdx = end.getPrevIndex();
171 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172 while (!lastMI && lastIdx != idx) {
173 lastIdx = lastIdx.getPrevIndex();
174 lastMI = getInstructionFromIndex(lastIdx);
179 // Range cannot cross basic block boundaries or terminators
180 MachineBasicBlock *MBB = firstMI->getParent();
181 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
184 MachineBasicBlock::const_iterator E = lastMI;
186 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187 const MachineInstr &MI = *I;
189 // Allow copies to and from li.reg
191 if (MI.getOperand(0).getReg() == li.reg ||
192 MI.getOperand(1).getReg() == li.reg)
195 // Check for operands using reg
196 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
197 const MachineOperand& mop = MI.getOperand(i);
200 unsigned PhysReg = mop.getReg();
201 if (PhysReg == 0 || PhysReg == li.reg)
203 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204 if (!vrm.hasPhys(PhysReg))
206 PhysReg = vrm.getPhys(PhysReg);
208 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 // No conflicts found.
217 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219 for (LiveInterval::Ranges::const_iterator
220 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221 for (SlotIndex index = I->start.getBaseIndex(),
222 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index = index.getNextIndex()) {
225 MachineInstr *MI = getInstructionFromIndex(index);
227 continue; // skip deleted instructions
229 if (JoinedCopies.count(MI))
231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232 MachineOperand& MO = MI->getOperand(i);
235 unsigned PhysReg = MO.getReg();
236 if (PhysReg == 0 || PhysReg == Reg ||
237 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 if (tri_->regsOverlap(Reg, PhysReg))
249 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
250 unsigned Reg = MI.getOperand(MOIdx).getReg();
251 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
252 const MachineOperand &MO = MI.getOperand(i);
255 if (MO.getReg() == Reg && MO.isDef()) {
256 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
257 MI.getOperand(MOIdx).getSubReg() &&
258 (MO.getSubReg() || MO.isImplicit()));
265 /// isPartialRedef - Return true if the specified def at the specific index is
266 /// partially re-defining the specified live interval. A common case of this is
267 /// a definition of the sub-register.
268 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
269 LiveInterval &interval) {
270 if (!MO.getSubReg() || MO.isEarlyClobber())
273 SlotIndex RedefIndex = MIIdx.getDefIndex();
274 const LiveRange *OldLR =
275 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
276 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
283 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
284 MachineBasicBlock::iterator mi,
288 LiveInterval &interval) {
289 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291 // Virtual registers may be defined multiple times (due to phi
292 // elimination and 2-addr elimination). Much of what we do only has to be
293 // done once for the vreg. We use an empty interval to detect the first
294 // time we see a vreg.
295 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
296 if (interval.empty()) {
297 // Get the Idx of the defining instructions.
298 SlotIndex defIndex = MIIdx.getDefIndex();
299 // Earlyclobbers move back one, so that they overlap the live range
301 if (MO.isEarlyClobber())
302 defIndex = MIIdx.getUseIndex();
304 // Make sure the first definition is not a partial redefinition. Add an
305 // <imp-def> of the full register.
307 mi->addRegisterDefined(interval.reg);
309 MachineInstr *CopyMI = NULL;
310 if (mi->isCopyLike()) {
314 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
315 assert(ValNo->id == 0 && "First value in interval is not 0?");
317 // Loop over all of the blocks that the vreg is defined in. There are
318 // two cases we have to handle here. The most common case is a vreg
319 // whose lifetime is contained within a basic block. In this case there
320 // will be a single kill, in MBB, which comes after the definition.
321 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
322 // FIXME: what about dead vars?
324 if (vi.Kills[0] != mi)
325 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327 killIdx = defIndex.getStoreIndex();
329 // If the kill happens after the definition, we have an intra-block
331 if (killIdx > defIndex) {
332 assert(vi.AliveBlocks.empty() &&
333 "Shouldn't be alive across any blocks!");
334 LiveRange LR(defIndex, killIdx, ValNo);
335 interval.addRange(LR);
336 DEBUG(dbgs() << " +" << LR << "\n");
341 // The other case we handle is when a virtual register lives to the end
342 // of the defining block, potentially live across some blocks, then is
343 // live into some number of blocks, but gets killed. Start by adding a
344 // range that goes from this definition to the end of the defining block.
345 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
346 DEBUG(dbgs() << " +" << NewLR);
347 interval.addRange(NewLR);
349 bool PHIJoin = lv_->isPHIJoin(interval.reg);
352 // A phi join register is killed at the end of the MBB and revived as a new
353 // valno in the killing blocks.
354 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
355 DEBUG(dbgs() << " phi-join");
356 ValNo->setHasPHIKill(true);
358 // Iterate over all of the blocks that the variable is completely
359 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
362 E = vi.AliveBlocks.end(); I != E; ++I) {
363 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
364 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
365 interval.addRange(LR);
366 DEBUG(dbgs() << " +" << LR);
370 // Finally, this virtual register is live from the start of any killing
371 // block to the 'use' slot of the killing instruction.
372 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
373 MachineInstr *Kill = vi.Kills[i];
374 SlotIndex Start = getMBBStartIdx(Kill->getParent());
375 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
377 // Create interval with one of a NEW value number. Note that this value
378 // number isn't actually defined by an instruction, weird huh? :)
380 assert(getInstructionFromIndex(Start) == 0 &&
381 "PHI def index points at actual instruction.");
382 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
383 ValNo->setIsPHIDef(true);
385 LiveRange LR(Start, killIdx, ValNo);
386 interval.addRange(LR);
387 DEBUG(dbgs() << " +" << LR);
391 if (MultipleDefsBySameMI(*mi, MOIdx))
392 // Multiple defs of the same virtual register by the same instruction.
393 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
394 // This is likely due to elimination of REG_SEQUENCE instructions. Return
395 // here since there is nothing to do.
398 // If this is the second time we see a virtual register definition, it
399 // must be due to phi elimination or two addr elimination. If this is
400 // the result of two address elimination, then the vreg is one of the
401 // def-and-use register operand.
403 // It may also be partial redef like this:
404 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
405 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
406 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
407 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
408 // If this is a two-address definition, then we have already processed
409 // the live range. The only problem is that we didn't realize there
410 // are actually two values in the live interval. Because of this we
411 // need to take the LiveRegion that defines this register and split it
413 SlotIndex RedefIndex = MIIdx.getDefIndex();
414 if (MO.isEarlyClobber())
415 RedefIndex = MIIdx.getUseIndex();
417 const LiveRange *OldLR =
418 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
419 VNInfo *OldValNo = OldLR->valno;
420 SlotIndex DefIndex = OldValNo->def.getDefIndex();
422 // Delete the previous value, which should be short and continuous,
423 // because the 2-addr copy must be in the same MBB as the redef.
424 interval.removeRange(DefIndex, RedefIndex);
426 // The new value number (#1) is defined by the instruction we claimed
428 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
430 // Value#0 is now defined by the 2-addr instruction.
431 OldValNo->def = RedefIndex;
432 OldValNo->setCopy(0);
434 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
435 if (PartReDef && mi->isCopyLike())
436 OldValNo->setCopy(&*mi);
438 // Add the new live interval which replaces the range for the input copy.
439 LiveRange LR(DefIndex, RedefIndex, ValNo);
440 DEBUG(dbgs() << " replace range with " << LR);
441 interval.addRange(LR);
443 // If this redefinition is dead, we need to add a dummy unit live
444 // range covering the def slot.
446 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
450 dbgs() << " RESULT: ";
451 interval.print(dbgs(), tri_);
453 } else if (lv_->isPHIJoin(interval.reg)) {
454 // In the case of PHI elimination, each variable definition is only
455 // live until the end of the block. We've already taken care of the
456 // rest of the live range.
458 SlotIndex defIndex = MIIdx.getDefIndex();
459 if (MO.isEarlyClobber())
460 defIndex = MIIdx.getUseIndex();
463 MachineInstr *CopyMI = NULL;
464 if (mi->isCopyLike())
466 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
468 SlotIndex killIndex = getMBBEndIdx(mbb);
469 LiveRange LR(defIndex, killIndex, ValNo);
470 interval.addRange(LR);
471 ValNo->setHasPHIKill(true);
472 DEBUG(dbgs() << " phi-join +" << LR);
474 llvm_unreachable("Multiply defined register");
478 DEBUG(dbgs() << '\n');
481 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
482 MachineBasicBlock::iterator mi,
485 LiveInterval &interval,
486 MachineInstr *CopyMI) {
487 // A physical register cannot be live across basic block, so its
488 // lifetime must end somewhere in its defining basic block.
489 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
515 if (mi->isDebugValue())
517 if (getInstructionFromIndex(baseIndex) == 0)
518 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520 if (mi->killsRegister(interval.reg, tri_)) {
521 DEBUG(dbgs() << " killed");
522 end = baseIndex.getDefIndex();
525 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527 if (mi->isRegTiedToUseOperand(DefIdx)) {
528 // Two-address instruction.
529 end = baseIndex.getDefIndex();
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that
533 // defines it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
542 baseIndex = baseIndex.getNextIndex();
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
547 // and never used. Another possible case is the implicit use of the
548 // physical register has been deleted by two-address pass.
549 end = start.getStoreIndex();
552 assert(start < end && "did not find end of interval?");
554 // Already exists? Extend old live interval.
555 VNInfo *ValNo = interval.getVNInfoAt(start);
556 bool Extend = ValNo != 0;
558 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
559 if (Extend && MO.isEarlyClobber())
560 ValNo->setHasRedefByEC(true);
561 LiveRange LR(start, end, ValNo);
562 interval.addRange(LR);
563 DEBUG(dbgs() << " +" << LR << '\n');
566 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
567 MachineBasicBlock::iterator MI,
571 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
572 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
573 getOrCreateInterval(MO.getReg()));
574 else if (allocatableRegs_[MO.getReg()]) {
575 MachineInstr *CopyMI = NULL;
576 if (MI->isCopyLike())
578 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579 getOrCreateInterval(MO.getReg()), CopyMI);
580 // Def of a register also defines its sub-registers.
581 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
582 // If MI also modifies the sub-register explicitly, avoid processing it
583 // more than once. Do not pass in TRI here so it checks for exact match.
584 if (!MI->definesRegister(*AS))
585 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586 getOrCreateInterval(*AS), 0);
590 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592 LiveInterval &interval, bool isAlias) {
593 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
595 // Look for kills, if it reaches a def before it's killed, then it shouldn't
596 // be considered a livein.
597 MachineBasicBlock::iterator mi = MBB->begin();
598 MachineBasicBlock::iterator E = MBB->end();
599 // Skip over DBG_VALUE at the start of the MBB.
600 if (mi != E && mi->isDebugValue()) {
601 while (++mi != E && mi->isDebugValue())
604 // MBB is empty except for DBG_VALUE's.
608 SlotIndex baseIndex = MIIdx;
609 SlotIndex start = baseIndex;
610 if (getInstructionFromIndex(baseIndex) == 0)
611 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613 SlotIndex end = baseIndex;
614 bool SeenDefUse = false;
617 if (mi->killsRegister(interval.reg, tri_)) {
618 DEBUG(dbgs() << " killed");
619 end = baseIndex.getDefIndex();
622 } else if (mi->definesRegister(interval.reg, tri_)) {
623 // Another instruction redefines the register before it is ever read.
624 // Then the register is essentially dead at the instruction that defines
625 // it. Hence its interval is:
626 // [defSlot(def), defSlot(def)+1)
627 DEBUG(dbgs() << " dead");
628 end = start.getStoreIndex();
633 while (++mi != E && mi->isDebugValue())
634 // Skip over DBG_VALUE.
637 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
640 // Live-in register might not be used at all.
643 DEBUG(dbgs() << " dead");
644 end = MIIdx.getStoreIndex();
646 DEBUG(dbgs() << " live through");
651 SlotIndex defIdx = getMBBStartIdx(MBB);
652 assert(getInstructionFromIndex(defIdx) == 0 &&
653 "PHI def index points at actual instruction.");
655 interval.getNextValue(defIdx, 0, VNInfoAllocator);
656 vni->setIsPHIDef(true);
657 LiveRange LR(start, end, vni);
659 interval.addRange(LR);
660 DEBUG(dbgs() << " +" << LR << '\n');
663 /// computeIntervals - computes the live intervals for virtual
664 /// registers. for some ordering of the machine instructions [1,N] a
665 /// live interval is an interval [i, j) where 1 <= i <= j < N for
666 /// which a variable is live
667 void LiveIntervals::computeIntervals() {
668 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
669 << "********** Function: "
670 << ((Value*)mf_->getFunction())->getName() << '\n');
672 SmallVector<unsigned, 8> UndefUses;
673 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675 MachineBasicBlock *MBB = MBBI;
679 // Track the index of the current machine instr.
680 SlotIndex MIIndex = getMBBStartIdx(MBB);
681 DEBUG(dbgs() << "BB#" << MBB->getNumber()
682 << ":\t\t# derived from " << MBB->getName() << "\n");
684 // Create intervals for live-ins to this BB first.
685 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
686 LE = MBB->livein_end(); LI != LE; ++LI) {
687 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
688 // Multiple live-ins can alias the same register.
689 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
690 if (!hasInterval(*AS))
691 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
695 // Skip over empty initial indices.
696 if (getInstructionFromIndex(MIIndex) == 0)
697 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
699 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701 DEBUG(dbgs() << MIIndex << "\t" << *MI);
702 if (MI->isDebugValue())
706 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707 MachineOperand &MO = MI->getOperand(i);
708 if (!MO.isReg() || !MO.getReg())
711 // handle register defs - build intervals
713 handleRegisterDef(MBB, MI, MIIndex, MO, i);
714 else if (MO.isUndef())
715 UndefUses.push_back(MO.getReg());
718 // Move to the next instr slot.
719 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
723 // Create empty intervals for registers defined by implicit_def's (except
724 // for those implicit_def that define values which are liveout of their
726 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
727 unsigned UndefReg = UndefUses[i];
728 (void)getOrCreateInterval(UndefReg);
732 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
733 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
734 return new LiveInterval(reg, Weight);
737 /// dupInterval - Duplicate a live interval. The caller is responsible for
738 /// managing the allocated memory.
739 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
740 LiveInterval *NewLI = createInterval(li->reg);
741 NewLI->Copy(*li, mri_, getVNInfoAllocator());
745 //===----------------------------------------------------------------------===//
746 // Register allocator hooks.
749 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
750 /// allow one) virtual register operand, then its uses are implicitly using
751 /// the register. Returns the virtual register.
752 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
753 MachineInstr *MI) const {
755 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
756 MachineOperand &MO = MI->getOperand(i);
757 if (!MO.isReg() || !MO.isUse())
759 unsigned Reg = MO.getReg();
760 if (Reg == 0 || Reg == li.reg)
763 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
764 !allocatableRegs_[Reg])
766 // FIXME: For now, only remat MI with at most one register operand.
768 "Can't rematerialize instruction with multiple register operand!");
777 /// isValNoAvailableAt - Return true if the val# of the specified interval
778 /// which reaches the given instruction also reaches the specified use index.
779 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
780 SlotIndex UseIdx) const {
781 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
782 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
785 /// isReMaterializable - Returns true if the definition MI of the specified
786 /// val# of the specified interval is re-materializable.
788 LiveIntervals::isReMaterializable(const LiveInterval &li,
789 const VNInfo *ValNo, MachineInstr *MI,
790 const SmallVectorImpl<LiveInterval*> &SpillIs,
795 if (!tii_->isTriviallyReMaterializable(MI, aa_))
798 // Target-specific code can mark an instruction as being rematerializable
799 // if it has one virtual reg use, though it had better be something like
800 // a PIC base register which is likely to be live everywhere.
801 unsigned ImpUse = getReMatImplicitUse(li, MI);
803 const LiveInterval &ImpLi = getInterval(ImpUse);
804 for (MachineRegisterInfo::use_nodbg_iterator
805 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
807 MachineInstr *UseMI = &*ri;
808 SlotIndex UseIdx = getInstructionIndex(UseMI);
809 if (li.getVNInfoAt(UseIdx) != ValNo)
811 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
815 // If a register operand of the re-materialized instruction is going to
816 // be spilled next, then it's not legal to re-materialize this instruction.
817 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
818 if (ImpUse == SpillIs[i]->reg)
824 /// isReMaterializable - Returns true if the definition MI of the specified
825 /// val# of the specified interval is re-materializable.
826 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
827 const VNInfo *ValNo, MachineInstr *MI) {
828 SmallVector<LiveInterval*, 4> Dummy1;
830 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
833 /// isReMaterializable - Returns true if every definition of MI of every
834 /// val# of the specified interval is re-materializable.
836 LiveIntervals::isReMaterializable(const LiveInterval &li,
837 const SmallVectorImpl<LiveInterval*> &SpillIs,
840 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
842 const VNInfo *VNI = *i;
844 continue; // Dead val#.
845 // Is the def for the val# rematerializable?
846 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
849 bool DefIsLoad = false;
851 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
858 /// FilterFoldedOps - Filter out two-address use operands. Return
859 /// true if it finds any issue with the operands that ought to prevent
861 static bool FilterFoldedOps(MachineInstr *MI,
862 SmallVector<unsigned, 2> &Ops,
864 SmallVector<unsigned, 2> &FoldOps) {
866 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
867 unsigned OpIdx = Ops[i];
868 MachineOperand &MO = MI->getOperand(OpIdx);
869 // FIXME: fold subreg use.
873 MRInfo |= (unsigned)VirtRegMap::isMod;
875 // Filter out two-address use operand(s).
876 if (MI->isRegTiedToDefOperand(OpIdx)) {
877 MRInfo = VirtRegMap::isModRef;
880 MRInfo |= (unsigned)VirtRegMap::isRef;
882 FoldOps.push_back(OpIdx);
888 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
889 /// slot / to reg or any rematerialized load into ith operand of specified
890 /// MI. If it is successul, MI is updated with the newly created MI and
892 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
893 VirtRegMap &vrm, MachineInstr *DefMI,
895 SmallVector<unsigned, 2> &Ops,
896 bool isSS, int Slot, unsigned Reg) {
897 // If it is an implicit def instruction, just delete it.
898 if (MI->isImplicitDef()) {
899 RemoveMachineInstrFromMaps(MI);
900 vrm.RemoveMachineInstrFromMaps(MI);
901 MI->eraseFromParent();
906 // Filter the list of operand indexes that are to be folded. Abort if
907 // any operand will prevent folding.
909 SmallVector<unsigned, 2> FoldOps;
910 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
913 // The only time it's safe to fold into a two address instruction is when
914 // it's folding reload and spill from / into a spill stack slot.
915 if (DefMI && (MRInfo & VirtRegMap::isMod))
918 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
919 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
921 // Remember this instruction uses the spill slot.
922 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
924 // Attempt to fold the memory reference into the instruction. If
925 // we can do this, we don't need to insert spill code.
926 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
927 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
928 vrm.transferSpillPts(MI, fmi);
929 vrm.transferRestorePts(MI, fmi);
930 vrm.transferEmergencySpills(MI, fmi);
931 ReplaceMachineInstrInMaps(MI, fmi);
932 MI->eraseFromParent();
940 /// canFoldMemoryOperand - Returns true if the specified load / store
941 /// folding is possible.
942 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
943 SmallVector<unsigned, 2> &Ops,
945 // Filter the list of operand indexes that are to be folded. Abort if
946 // any operand will prevent folding.
948 SmallVector<unsigned, 2> FoldOps;
949 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
952 // It's only legal to remat for a use, not a def.
953 if (ReMat && (MRInfo & VirtRegMap::isMod))
956 return tii_->canFoldMemoryOperand(MI, FoldOps);
959 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
960 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
962 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
967 for (++itr; itr != li.ranges.end(); ++itr) {
968 MachineBasicBlock *mbb2 =
969 indexes_->getMBBCoveringRange(itr->start, itr->end);
978 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
979 /// interval on to-be re-materialized operands of MI) with new register.
980 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
981 MachineInstr *MI, unsigned NewVReg,
983 // There is an implicit use. That means one of the other operand is
984 // being remat'ed and the remat'ed instruction has li.reg as an
985 // use operand. Make sure we rewrite that as well.
986 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
987 MachineOperand &MO = MI->getOperand(i);
990 unsigned Reg = MO.getReg();
991 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
993 if (!vrm.isReMaterialized(Reg))
995 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
996 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
998 UseMO->setReg(NewVReg);
1002 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1003 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1004 bool LiveIntervals::
1005 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1006 bool TrySplit, SlotIndex index, SlotIndex end,
1008 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1009 unsigned Slot, int LdSlot,
1010 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1012 const TargetRegisterClass* rc,
1013 SmallVector<int, 4> &ReMatIds,
1014 const MachineLoopInfo *loopInfo,
1015 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1016 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1017 std::vector<LiveInterval*> &NewLIs) {
1018 bool CanFold = false;
1020 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1021 MachineOperand& mop = MI->getOperand(i);
1024 unsigned Reg = mop.getReg();
1025 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1030 bool TryFold = !DefIsReMat;
1031 bool FoldSS = true; // Default behavior unless it's a remat.
1032 int FoldSlot = Slot;
1034 // If this is the rematerializable definition MI itself and
1035 // all of its uses are rematerialized, simply delete it.
1036 if (MI == ReMatOrigDefMI && CanDelete) {
1037 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1039 RemoveMachineInstrFromMaps(MI);
1040 vrm.RemoveMachineInstrFromMaps(MI);
1041 MI->eraseFromParent();
1045 // If def for this use can't be rematerialized, then try folding.
1046 // If def is rematerializable and it's a load, also try folding.
1047 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1049 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1055 // Scan all of the operands of this instruction rewriting operands
1056 // to use NewVReg instead of li.reg as appropriate. We do this for
1059 // 1. If the instr reads the same spilled vreg multiple times, we
1060 // want to reuse the NewVReg.
1061 // 2. If the instr is a two-addr instruction, we are required to
1062 // keep the src/dst regs pinned.
1064 // Keep track of whether we replace a use and/or def so that we can
1065 // create the spill interval with the appropriate range.
1066 SmallVector<unsigned, 2> Ops;
1067 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1069 // Create a new virtual register for the spill interval.
1070 // Create the new register now so we can map the fold instruction
1071 // to the new register so when it is unfolded we get the correct
1073 bool CreatedNewVReg = false;
1075 NewVReg = mri_->createVirtualRegister(rc);
1077 CreatedNewVReg = true;
1079 // The new virtual register should get the same allocation hints as the
1081 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1082 if (Hint.first || Hint.second)
1083 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1089 // Do not fold load / store here if we are splitting. We'll find an
1090 // optimal point to insert a load / store later.
1092 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1093 Ops, FoldSS, FoldSlot, NewVReg)) {
1094 // Folding the load/store can completely change the instruction in
1095 // unpredictable ways, rescan it from the beginning.
1098 // We need to give the new vreg the same stack slot as the
1099 // spilled interval.
1100 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1106 if (isNotInMIMap(MI))
1108 goto RestartInstruction;
1111 // We'll try to fold it later if it's profitable.
1112 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1116 mop.setReg(NewVReg);
1117 if (mop.isImplicit())
1118 rewriteImplicitOps(li, MI, NewVReg, vrm);
1120 // Reuse NewVReg for other reads.
1121 bool HasEarlyClobber = false;
1122 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1123 MachineOperand &mopj = MI->getOperand(Ops[j]);
1124 mopj.setReg(NewVReg);
1125 if (mopj.isImplicit())
1126 rewriteImplicitOps(li, MI, NewVReg, vrm);
1127 if (mopj.isEarlyClobber())
1128 HasEarlyClobber = true;
1131 if (CreatedNewVReg) {
1133 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1134 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1135 // Each valnum may have its own remat id.
1136 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1138 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1140 if (!CanDelete || (HasUse && HasDef)) {
1141 // If this is a two-addr instruction then its use operands are
1142 // rematerializable but its def is not. It should be assigned a
1144 vrm.assignVirt2StackSlot(NewVReg, Slot);
1147 vrm.assignVirt2StackSlot(NewVReg, Slot);
1149 } else if (HasUse && HasDef &&
1150 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1151 // If this interval hasn't been assigned a stack slot (because earlier
1152 // def is a deleted remat def), do it now.
1153 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1154 vrm.assignVirt2StackSlot(NewVReg, Slot);
1157 // Re-matting an instruction with virtual register use. Add the
1158 // register as an implicit use on the use MI.
1159 if (DefIsReMat && ImpUse)
1160 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1162 // Create a new register interval for this spill / remat.
1163 LiveInterval &nI = getOrCreateInterval(NewVReg);
1164 if (CreatedNewVReg) {
1165 NewLIs.push_back(&nI);
1166 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1168 vrm.setIsSplitFromReg(NewVReg, li.reg);
1172 if (CreatedNewVReg) {
1173 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1174 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1175 DEBUG(dbgs() << " +" << LR);
1178 // Extend the split live interval to this def / use.
1179 SlotIndex End = index.getDefIndex();
1180 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1181 nI.getValNumInfo(nI.getNumValNums()-1));
1182 DEBUG(dbgs() << " +" << LR);
1187 // An early clobber starts at the use slot, except for an early clobber
1188 // tied to a use operand (yes, that is a thing).
1189 LiveRange LR(HasEarlyClobber && !HasUse ?
1190 index.getUseIndex() : index.getDefIndex(),
1191 index.getStoreIndex(),
1192 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1193 DEBUG(dbgs() << " +" << LR);
1198 dbgs() << "\t\t\t\tAdded new interval: ";
1199 nI.print(dbgs(), tri_);
1205 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1207 MachineBasicBlock *MBB,
1208 SlotIndex Idx) const {
1209 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1212 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1213 /// during spilling.
1215 struct RewriteInfo {
1218 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1221 struct RewriteInfoCompare {
1222 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1223 return LHS.Index < RHS.Index;
1228 void LiveIntervals::
1229 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1230 LiveInterval::Ranges::const_iterator &I,
1231 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1232 unsigned Slot, int LdSlot,
1233 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1235 const TargetRegisterClass* rc,
1236 SmallVector<int, 4> &ReMatIds,
1237 const MachineLoopInfo *loopInfo,
1238 BitVector &SpillMBBs,
1239 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1240 BitVector &RestoreMBBs,
1241 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1242 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1243 std::vector<LiveInterval*> &NewLIs) {
1244 bool AllCanFold = true;
1245 unsigned NewVReg = 0;
1246 SlotIndex start = I->start.getBaseIndex();
1247 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1249 // First collect all the def / use in this live range that will be rewritten.
1250 // Make sure they are sorted according to instruction index.
1251 std::vector<RewriteInfo> RewriteMIs;
1252 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1253 re = mri_->reg_end(); ri != re; ) {
1254 MachineInstr *MI = &*ri;
1255 MachineOperand &O = ri.getOperand();
1257 if (MI->isDebugValue()) {
1258 // Modify DBG_VALUE now that the value is in a spill slot.
1259 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1260 uint64_t Offset = MI->getOperand(1).getImm();
1261 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1262 DebugLoc DL = MI->getDebugLoc();
1263 int FI = isLoadSS ? LdSlot : (int)Slot;
1264 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1265 Offset, MDPtr, DL)) {
1266 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1267 ReplaceMachineInstrInMaps(MI, NewDV);
1268 MachineBasicBlock *MBB = MI->getParent();
1269 MBB->insert(MBB->erase(MI), NewDV);
1274 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1275 RemoveMachineInstrFromMaps(MI);
1276 vrm.RemoveMachineInstrFromMaps(MI);
1277 MI->eraseFromParent();
1280 assert(!(O.isImplicit() && O.isUse()) &&
1281 "Spilling register that's used as implicit use?");
1282 SlotIndex index = getInstructionIndex(MI);
1283 if (index < start || index >= end)
1287 // Must be defined by an implicit def. It should not be spilled. Note,
1288 // this is for correctness reason. e.g.
1289 // 8 %reg1024<def> = IMPLICIT_DEF
1290 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1291 // The live range [12, 14) are not part of the r1024 live interval since
1292 // it's defined by an implicit def. It will not conflicts with live
1293 // interval of r1025. Now suppose both registers are spilled, you can
1294 // easily see a situation where both registers are reloaded before
1295 // the INSERT_SUBREG and both target registers that would overlap.
1297 RewriteMIs.push_back(RewriteInfo(index, MI));
1299 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1301 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1302 // Now rewrite the defs and uses.
1303 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1304 RewriteInfo &rwi = RewriteMIs[i];
1306 SlotIndex index = rwi.Index;
1307 MachineInstr *MI = rwi.MI;
1308 // If MI def and/or use the same register multiple times, then there
1309 // are multiple entries.
1310 while (i != e && RewriteMIs[i].MI == MI) {
1311 assert(RewriteMIs[i].Index == index);
1314 MachineBasicBlock *MBB = MI->getParent();
1316 if (ImpUse && MI != ReMatDefMI) {
1317 // Re-matting an instruction with virtual register use. Prevent interval
1318 // from being spilled.
1319 getInterval(ImpUse).markNotSpillable();
1322 unsigned MBBId = MBB->getNumber();
1323 unsigned ThisVReg = 0;
1325 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1326 if (NVI != MBBVRegsMap.end()) {
1327 ThisVReg = NVI->second;
1334 // It's better to start a new interval to avoid artifically
1335 // extend the new interval.
1336 if (MI->readsWritesVirtualRegister(li.reg) ==
1337 std::make_pair(false,true)) {
1338 MBBVRegsMap.erase(MBB->getNumber());
1344 bool IsNew = ThisVReg == 0;
1346 // This ends the previous live interval. If all of its def / use
1347 // can be folded, give it a low spill weight.
1348 if (NewVReg && TrySplit && AllCanFold) {
1349 LiveInterval &nI = getOrCreateInterval(NewVReg);
1356 bool HasDef = false;
1357 bool HasUse = false;
1358 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1359 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1360 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1361 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1362 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1363 if (!HasDef && !HasUse)
1366 AllCanFold &= CanFold;
1368 // Update weight of spill interval.
1369 LiveInterval &nI = getOrCreateInterval(NewVReg);
1371 // The spill weight is now infinity as it cannot be spilled again.
1372 nI.markNotSpillable();
1376 // Keep track of the last def and first use in each MBB.
1378 if (MI != ReMatOrigDefMI || !CanDelete) {
1379 bool HasKill = false;
1381 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1383 // If this is a two-address code, then this index starts a new VNInfo.
1384 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1386 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1388 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1389 SpillIdxes.find(MBBId);
1391 if (SII == SpillIdxes.end()) {
1392 std::vector<SRInfo> S;
1393 S.push_back(SRInfo(index, NewVReg, true));
1394 SpillIdxes.insert(std::make_pair(MBBId, S));
1395 } else if (SII->second.back().vreg != NewVReg) {
1396 SII->second.push_back(SRInfo(index, NewVReg, true));
1397 } else if (index > SII->second.back().index) {
1398 // If there is an earlier def and this is a two-address
1399 // instruction, then it's not possible to fold the store (which
1400 // would also fold the load).
1401 SRInfo &Info = SII->second.back();
1403 Info.canFold = !HasUse;
1405 SpillMBBs.set(MBBId);
1406 } else if (SII != SpillIdxes.end() &&
1407 SII->second.back().vreg == NewVReg &&
1408 index > SII->second.back().index) {
1409 // There is an earlier def that's not killed (must be two-address).
1410 // The spill is no longer needed.
1411 SII->second.pop_back();
1412 if (SII->second.empty()) {
1413 SpillIdxes.erase(MBBId);
1414 SpillMBBs.reset(MBBId);
1421 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1422 SpillIdxes.find(MBBId);
1423 if (SII != SpillIdxes.end() &&
1424 SII->second.back().vreg == NewVReg &&
1425 index > SII->second.back().index)
1426 // Use(s) following the last def, it's not safe to fold the spill.
1427 SII->second.back().canFold = false;
1428 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1429 RestoreIdxes.find(MBBId);
1430 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1431 // If we are splitting live intervals, only fold if it's the first
1432 // use and there isn't another use later in the MBB.
1433 RII->second.back().canFold = false;
1435 // Only need a reload if there isn't an earlier def / use.
1436 if (RII == RestoreIdxes.end()) {
1437 std::vector<SRInfo> Infos;
1438 Infos.push_back(SRInfo(index, NewVReg, true));
1439 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1441 RII->second.push_back(SRInfo(index, NewVReg, true));
1443 RestoreMBBs.set(MBBId);
1447 // Update spill weight.
1448 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1449 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1452 if (NewVReg && TrySplit && AllCanFold) {
1453 // If all of its def / use can be folded, give it a low spill weight.
1454 LiveInterval &nI = getOrCreateInterval(NewVReg);
1459 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1460 unsigned vr, BitVector &RestoreMBBs,
1461 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1462 if (!RestoreMBBs[Id])
1464 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1465 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1466 if (Restores[i].index == index &&
1467 Restores[i].vreg == vr &&
1468 Restores[i].canFold)
1473 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1474 unsigned vr, BitVector &RestoreMBBs,
1475 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1476 if (!RestoreMBBs[Id])
1478 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1479 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1480 if (Restores[i].index == index && Restores[i].vreg)
1481 Restores[i].index = SlotIndex();
1484 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1485 /// spilled and create empty intervals for their uses.
1487 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1488 const TargetRegisterClass* rc,
1489 std::vector<LiveInterval*> &NewLIs) {
1490 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1491 re = mri_->reg_end(); ri != re; ) {
1492 MachineOperand &O = ri.getOperand();
1493 MachineInstr *MI = &*ri;
1495 if (MI->isDebugValue()) {
1496 // Remove debug info for now.
1498 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1502 assert(MI->isImplicitDef() &&
1503 "Register def was not rewritten?");
1504 RemoveMachineInstrFromMaps(MI);
1505 vrm.RemoveMachineInstrFromMaps(MI);
1506 MI->eraseFromParent();
1508 // This must be an use of an implicit_def so it's not part of the live
1509 // interval. Create a new empty live interval for it.
1510 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1511 unsigned NewVReg = mri_->createVirtualRegister(rc);
1513 vrm.setIsImplicitlyDefined(NewVReg);
1514 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1515 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1516 MachineOperand &MO = MI->getOperand(i);
1517 if (MO.isReg() && MO.getReg() == li.reg) {
1527 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1528 // Limit the loop depth ridiculousness.
1529 if (loopDepth > 200)
1532 // The loop depth is used to roughly estimate the number of times the
1533 // instruction is executed. Something like 10^d is simple, but will quickly
1534 // overflow a float. This expression behaves like 10^d for small d, but is
1535 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1536 // headroom before overflow.
1537 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1539 return (isDef + isUse) * lc;
1543 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1544 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1545 normalizeSpillWeight(*NewLIs[i]);
1548 std::vector<LiveInterval*> LiveIntervals::
1549 addIntervalsForSpills(const LiveInterval &li,
1550 const SmallVectorImpl<LiveInterval*> &SpillIs,
1551 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1552 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1555 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1556 li.print(dbgs(), tri_);
1560 // Each bit specify whether a spill is required in the MBB.
1561 BitVector SpillMBBs(mf_->getNumBlockIDs());
1562 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1563 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1564 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1565 DenseMap<unsigned,unsigned> MBBVRegsMap;
1566 std::vector<LiveInterval*> NewLIs;
1567 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1569 unsigned NumValNums = li.getNumValNums();
1570 SmallVector<MachineInstr*, 4> ReMatDefs;
1571 ReMatDefs.resize(NumValNums, NULL);
1572 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1573 ReMatOrigDefs.resize(NumValNums, NULL);
1574 SmallVector<int, 4> ReMatIds;
1575 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1576 BitVector ReMatDelete(NumValNums);
1577 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1579 // Spilling a split live interval. It cannot be split any further. Also,
1580 // it's also guaranteed to be a single val# / range interval.
1581 if (vrm.getPreSplitReg(li.reg)) {
1582 vrm.setIsSplitFromReg(li.reg, 0);
1583 // Unset the split kill marker on the last use.
1584 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1585 if (KillIdx != SlotIndex()) {
1586 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1587 assert(KillMI && "Last use disappeared?");
1588 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1589 assert(KillOp != -1 && "Last use disappeared?");
1590 KillMI->getOperand(KillOp).setIsKill(false);
1592 vrm.removeKillPoint(li.reg);
1593 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1594 Slot = vrm.getStackSlot(li.reg);
1595 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1596 MachineInstr *ReMatDefMI = DefIsReMat ?
1597 vrm.getReMaterializedMI(li.reg) : NULL;
1599 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1600 bool isLoad = isLoadSS ||
1601 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1602 bool IsFirstRange = true;
1603 for (LiveInterval::Ranges::const_iterator
1604 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1605 // If this is a split live interval with multiple ranges, it means there
1606 // are two-address instructions that re-defined the value. Only the
1607 // first def can be rematerialized!
1609 // Note ReMatOrigDefMI has already been deleted.
1610 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1611 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1612 false, vrm, rc, ReMatIds, loopInfo,
1613 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1614 MBBVRegsMap, NewLIs);
1616 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1617 Slot, 0, false, false, false,
1618 false, vrm, rc, ReMatIds, loopInfo,
1619 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1620 MBBVRegsMap, NewLIs);
1622 IsFirstRange = false;
1625 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1626 normalizeSpillWeights(NewLIs);
1630 bool TrySplit = !intervalIsInOneMBB(li);
1633 bool NeedStackSlot = false;
1634 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1636 const VNInfo *VNI = *i;
1637 unsigned VN = VNI->id;
1638 if (VNI->isUnused())
1639 continue; // Dead val#.
1640 // Is the def for the val# rematerializable?
1641 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1643 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1644 // Remember how to remat the def of this val#.
1645 ReMatOrigDefs[VN] = ReMatDefMI;
1646 // Original def may be modified so we have to make a copy here.
1647 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1648 CloneMIs.push_back(Clone);
1649 ReMatDefs[VN] = Clone;
1651 bool CanDelete = true;
1652 if (VNI->hasPHIKill()) {
1653 // A kill is a phi node, not all of its uses can be rematerialized.
1654 // It must not be deleted.
1656 // Need a stack slot if there is any live range where uses cannot be
1658 NeedStackSlot = true;
1661 ReMatDelete.set(VN);
1663 // Need a stack slot if there is any live range where uses cannot be
1665 NeedStackSlot = true;
1669 // One stack slot per live interval.
1670 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1671 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1672 Slot = vrm.assignVirt2StackSlot(li.reg);
1674 // This case only occurs when the prealloc splitter has already assigned
1675 // a stack slot to this vreg.
1677 Slot = vrm.getStackSlot(li.reg);
1680 // Create new intervals and rewrite defs and uses.
1681 for (LiveInterval::Ranges::const_iterator
1682 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1683 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1684 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1685 bool DefIsReMat = ReMatDefMI != NULL;
1686 bool CanDelete = ReMatDelete[I->valno->id];
1688 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1689 bool isLoad = isLoadSS ||
1690 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1691 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1692 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1693 CanDelete, vrm, rc, ReMatIds, loopInfo,
1694 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1695 MBBVRegsMap, NewLIs);
1698 // Insert spills / restores if we are splitting.
1700 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1701 normalizeSpillWeights(NewLIs);
1705 SmallPtrSet<LiveInterval*, 4> AddedKill;
1706 SmallVector<unsigned, 2> Ops;
1707 if (NeedStackSlot) {
1708 int Id = SpillMBBs.find_first();
1710 std::vector<SRInfo> &spills = SpillIdxes[Id];
1711 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1712 SlotIndex index = spills[i].index;
1713 unsigned VReg = spills[i].vreg;
1714 LiveInterval &nI = getOrCreateInterval(VReg);
1715 bool isReMat = vrm.isReMaterialized(VReg);
1716 MachineInstr *MI = getInstructionFromIndex(index);
1717 bool CanFold = false;
1718 bool FoundUse = false;
1720 if (spills[i].canFold) {
1722 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1723 MachineOperand &MO = MI->getOperand(j);
1724 if (!MO.isReg() || MO.getReg() != VReg)
1731 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1732 RestoreMBBs, RestoreIdxes))) {
1733 // MI has two-address uses of the same register. If the use
1734 // isn't the first and only use in the BB, then we can't fold
1735 // it. FIXME: Move this to rewriteInstructionsForSpills.
1742 // Fold the store into the def if possible.
1743 bool Folded = false;
1744 if (CanFold && !Ops.empty()) {
1745 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1748 // Also folded uses, do not issue a load.
1749 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1750 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1752 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1756 // Otherwise tell the spiller to issue a spill.
1758 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1759 bool isKill = LR->end == index.getStoreIndex();
1760 if (!MI->registerDefIsDead(nI.reg))
1761 // No need to spill a dead def.
1762 vrm.addSpillPoint(VReg, isKill, MI);
1764 AddedKill.insert(&nI);
1767 Id = SpillMBBs.find_next(Id);
1771 int Id = RestoreMBBs.find_first();
1773 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1774 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1775 SlotIndex index = restores[i].index;
1776 if (index == SlotIndex())
1778 unsigned VReg = restores[i].vreg;
1779 LiveInterval &nI = getOrCreateInterval(VReg);
1780 bool isReMat = vrm.isReMaterialized(VReg);
1781 MachineInstr *MI = getInstructionFromIndex(index);
1782 bool CanFold = false;
1784 if (restores[i].canFold) {
1786 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1787 MachineOperand &MO = MI->getOperand(j);
1788 if (!MO.isReg() || MO.getReg() != VReg)
1792 // If this restore were to be folded, it would have been folded
1801 // Fold the load into the use if possible.
1802 bool Folded = false;
1803 if (CanFold && !Ops.empty()) {
1805 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1807 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1809 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1810 // If the rematerializable def is a load, also try to fold it.
1811 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1812 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1813 Ops, isLoadSS, LdSlot, VReg);
1815 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1817 // Re-matting an instruction with virtual register use. Add the
1818 // register as an implicit use on the use MI and mark the register
1819 // interval as unspillable.
1820 LiveInterval &ImpLi = getInterval(ImpUse);
1821 ImpLi.markNotSpillable();
1822 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1827 // If folding is not possible / failed, then tell the spiller to issue a
1828 // load / rematerialization for us.
1830 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1832 vrm.addRestorePoint(VReg, MI);
1834 Id = RestoreMBBs.find_next(Id);
1837 // Finalize intervals: add kills, finalize spill weights, and filter out
1839 std::vector<LiveInterval*> RetNewLIs;
1840 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1841 LiveInterval *LI = NewLIs[i];
1843 if (!AddedKill.count(LI)) {
1844 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1845 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1846 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1847 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1848 assert(UseIdx != -1);
1849 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1850 LastUse->getOperand(UseIdx).setIsKill();
1851 vrm.addKillPoint(LI->reg, LastUseIdx);
1854 RetNewLIs.push_back(LI);
1858 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1859 normalizeSpillWeights(RetNewLIs);
1863 /// hasAllocatableSuperReg - Return true if the specified physical register has
1864 /// any super register that's allocatable.
1865 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1866 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1867 if (allocatableRegs_[*AS] && hasInterval(*AS))
1872 /// getRepresentativeReg - Find the largest super register of the specified
1873 /// physical register.
1874 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1875 // Find the largest super-register that is allocatable.
1876 unsigned BestReg = Reg;
1877 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1878 unsigned SuperReg = *AS;
1879 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1887 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1888 /// specified interval that conflicts with the specified physical register.
1889 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1890 unsigned PhysReg) const {
1891 unsigned NumConflicts = 0;
1892 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1893 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1894 E = mri_->reg_end(); I != E; ++I) {
1895 MachineOperand &O = I.getOperand();
1896 MachineInstr *MI = O.getParent();
1897 if (MI->isDebugValue())
1899 SlotIndex Index = getInstructionIndex(MI);
1900 if (pli.liveAt(Index))
1903 return NumConflicts;
1906 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1907 /// around all defs and uses of the specified interval. Return true if it
1908 /// was able to cut its interval.
1909 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1910 unsigned PhysReg, VirtRegMap &vrm) {
1911 unsigned SpillReg = getRepresentativeReg(PhysReg);
1913 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
1914 << " represented by " << tri_->getName(SpillReg) << '\n');
1916 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1917 // If there are registers which alias PhysReg, but which are not a
1918 // sub-register of the chosen representative super register. Assert
1919 // since we can't handle it yet.
1920 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1921 tri_->isSuperRegister(*AS, SpillReg));
1924 SmallVector<unsigned, 4> PRegs;
1925 if (hasInterval(SpillReg))
1926 PRegs.push_back(SpillReg);
1927 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
1928 if (hasInterval(*SR))
1929 PRegs.push_back(*SR);
1932 dbgs() << "Trying to spill:";
1933 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
1934 dbgs() << ' ' << tri_->getName(PRegs[i]);
1938 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1939 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1940 E = mri_->reg_end(); I != E; ++I) {
1941 MachineOperand &O = I.getOperand();
1942 MachineInstr *MI = O.getParent();
1943 if (MI->isDebugValue() || SeenMIs.count(MI))
1946 SlotIndex Index = getInstructionIndex(MI);
1947 bool LiveReg = false;
1948 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1949 unsigned PReg = PRegs[i];
1950 LiveInterval &pli = getInterval(PReg);
1951 if (!pli.liveAt(Index))
1954 SlotIndex StartIdx = Index.getLoadIndex();
1955 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1956 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
1958 raw_string_ostream Msg(msg);
1959 Msg << "Ran out of registers during register allocation!";
1960 if (MI->isInlineAsm()) {
1961 Msg << "\nPlease check your inline asm statement for invalid "
1962 << "constraints:\n";
1963 MI->print(Msg, tm_);
1965 report_fatal_error(Msg.str());
1967 pli.removeRange(StartIdx, EndIdx);
1972 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
1973 vrm.addEmergencySpill(SpillReg, MI);
1979 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1980 MachineInstr* startInst) {
1981 LiveInterval& Interval = getOrCreateInterval(reg);
1982 VNInfo* VN = Interval.getNextValue(
1983 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1984 startInst, getVNInfoAllocator());
1985 VN->setHasPHIKill(true);
1987 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1988 getMBBEndIdx(startInst->getParent()), VN);
1989 Interval.addRange(LR);