1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
144 mf_->print(OS, indexes_);
147 void LiveIntervals::dumpInstrs() const {
151 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 // We don't handle fancy stuff crossing basic block boundaries
154 if (li.ranges.size() != 1)
156 const LiveRange &range = li.ranges.front();
157 SlotIndex idx = range.start.getBaseIndex();
158 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160 // Skip deleted instructions
161 MachineInstr *firstMI = getInstructionFromIndex(idx);
162 while (!firstMI && idx != end) {
163 idx = idx.getNextIndex();
164 firstMI = getInstructionFromIndex(idx);
169 // Find last instruction in range
170 SlotIndex lastIdx = end.getPrevIndex();
171 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172 while (!lastMI && lastIdx != idx) {
173 lastIdx = lastIdx.getPrevIndex();
174 lastMI = getInstructionFromIndex(lastIdx);
179 // Range cannot cross basic block boundaries or terminators
180 MachineBasicBlock *MBB = firstMI->getParent();
181 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
184 MachineBasicBlock::const_iterator E = lastMI;
186 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187 const MachineInstr &MI = *I;
189 // Allow copies to and from li.reg
191 if (MI.getOperand(0).getReg() == li.reg ||
192 MI.getOperand(1).getReg() == li.reg)
195 // Check for operands using reg
196 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
197 const MachineOperand& mop = MI.getOperand(i);
200 unsigned PhysReg = mop.getReg();
201 if (PhysReg == 0 || PhysReg == li.reg)
203 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204 if (!vrm.hasPhys(PhysReg))
206 PhysReg = vrm.getPhys(PhysReg);
208 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 // No conflicts found.
217 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219 for (LiveInterval::Ranges::const_iterator
220 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221 for (SlotIndex index = I->start.getBaseIndex(),
222 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index = index.getNextIndex()) {
225 MachineInstr *MI = getInstructionFromIndex(index);
227 continue; // skip deleted instructions
229 if (JoinedCopies.count(MI))
231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232 MachineOperand& MO = MI->getOperand(i);
235 unsigned PhysReg = MO.getReg();
236 if (PhysReg == 0 || PhysReg == Reg ||
237 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 if (tri_->regsOverlap(Reg, PhysReg))
249 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
250 if (TargetRegisterInfo::isPhysicalRegister(reg))
251 dbgs() << tri_->getName(reg);
253 dbgs() << "%reg" << reg;
258 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
259 unsigned Reg = MI.getOperand(MOIdx).getReg();
260 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
261 const MachineOperand &MO = MI.getOperand(i);
264 if (MO.getReg() == Reg && MO.isDef()) {
265 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
266 MI.getOperand(MOIdx).getSubReg() &&
267 (MO.getSubReg() || MO.isImplicit()));
274 /// isPartialRedef - Return true if the specified def at the specific index is
275 /// partially re-defining the specified live interval. A common case of this is
276 /// a definition of the sub-register.
277 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
278 LiveInterval &interval) {
279 if (!MO.getSubReg() || MO.isEarlyClobber())
282 SlotIndex RedefIndex = MIIdx.getDefIndex();
283 const LiveRange *OldLR =
284 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
285 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
287 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
292 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
293 MachineBasicBlock::iterator mi,
297 LiveInterval &interval) {
299 dbgs() << "\t\tregister: ";
300 printRegName(interval.reg, tri_);
303 // Virtual registers may be defined multiple times (due to phi
304 // elimination and 2-addr elimination). Much of what we do only has to be
305 // done once for the vreg. We use an empty interval to detect the first
306 // time we see a vreg.
307 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
308 if (interval.empty()) {
309 // Get the Idx of the defining instructions.
310 SlotIndex defIndex = MIIdx.getDefIndex();
311 // Earlyclobbers move back one, so that they overlap the live range
313 if (MO.isEarlyClobber())
314 defIndex = MIIdx.getUseIndex();
316 // Make sure the first definition is not a partial redefinition. Add an
317 // <imp-def> of the full register.
319 mi->addRegisterDefined(interval.reg);
321 MachineInstr *CopyMI = NULL;
322 if (mi->isCopyLike()) {
326 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
327 assert(ValNo->id == 0 && "First value in interval is not 0?");
329 // Loop over all of the blocks that the vreg is defined in. There are
330 // two cases we have to handle here. The most common case is a vreg
331 // whose lifetime is contained within a basic block. In this case there
332 // will be a single kill, in MBB, which comes after the definition.
333 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
334 // FIXME: what about dead vars?
336 if (vi.Kills[0] != mi)
337 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
339 killIdx = defIndex.getStoreIndex();
341 // If the kill happens after the definition, we have an intra-block
343 if (killIdx > defIndex) {
344 assert(vi.AliveBlocks.empty() &&
345 "Shouldn't be alive across any blocks!");
346 LiveRange LR(defIndex, killIdx, ValNo);
347 interval.addRange(LR);
348 DEBUG(dbgs() << " +" << LR << "\n");
353 // The other case we handle is when a virtual register lives to the end
354 // of the defining block, potentially live across some blocks, then is
355 // live into some number of blocks, but gets killed. Start by adding a
356 // range that goes from this definition to the end of the defining block.
357 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
358 DEBUG(dbgs() << " +" << NewLR);
359 interval.addRange(NewLR);
361 bool PHIJoin = lv_->isPHIJoin(interval.reg);
364 // A phi join register is killed at the end of the MBB and revived as a new
365 // valno in the killing blocks.
366 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
367 DEBUG(dbgs() << " phi-join");
368 ValNo->setHasPHIKill(true);
370 // Iterate over all of the blocks that the variable is completely
371 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
373 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
374 E = vi.AliveBlocks.end(); I != E; ++I) {
375 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
376 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
377 interval.addRange(LR);
378 DEBUG(dbgs() << " +" << LR);
382 // Finally, this virtual register is live from the start of any killing
383 // block to the 'use' slot of the killing instruction.
384 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
385 MachineInstr *Kill = vi.Kills[i];
386 SlotIndex Start = getMBBStartIdx(Kill->getParent());
387 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
389 // Create interval with one of a NEW value number. Note that this value
390 // number isn't actually defined by an instruction, weird huh? :)
392 assert(getInstructionFromIndex(Start) == 0 &&
393 "PHI def index points at actual instruction.");
394 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
395 ValNo->setIsPHIDef(true);
397 LiveRange LR(Start, killIdx, ValNo);
398 interval.addRange(LR);
399 DEBUG(dbgs() << " +" << LR);
403 if (MultipleDefsBySameMI(*mi, MOIdx))
404 // Multiple defs of the same virtual register by the same instruction.
405 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
406 // This is likely due to elimination of REG_SEQUENCE instructions. Return
407 // here since there is nothing to do.
410 // If this is the second time we see a virtual register definition, it
411 // must be due to phi elimination or two addr elimination. If this is
412 // the result of two address elimination, then the vreg is one of the
413 // def-and-use register operand.
415 // It may also be partial redef like this:
416 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
417 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
418 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
419 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
420 // If this is a two-address definition, then we have already processed
421 // the live range. The only problem is that we didn't realize there
422 // are actually two values in the live interval. Because of this we
423 // need to take the LiveRegion that defines this register and split it
425 SlotIndex RedefIndex = MIIdx.getDefIndex();
426 if (MO.isEarlyClobber())
427 RedefIndex = MIIdx.getUseIndex();
429 const LiveRange *OldLR =
430 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
431 VNInfo *OldValNo = OldLR->valno;
432 SlotIndex DefIndex = OldValNo->def.getDefIndex();
434 // Delete the previous value, which should be short and continuous,
435 // because the 2-addr copy must be in the same MBB as the redef.
436 interval.removeRange(DefIndex, RedefIndex);
438 // The new value number (#1) is defined by the instruction we claimed
440 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
442 // Value#0 is now defined by the 2-addr instruction.
443 OldValNo->def = RedefIndex;
444 OldValNo->setCopy(0);
446 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
447 if (PartReDef && mi->isCopyLike())
448 OldValNo->setCopy(&*mi);
450 // Add the new live interval which replaces the range for the input copy.
451 LiveRange LR(DefIndex, RedefIndex, ValNo);
452 DEBUG(dbgs() << " replace range with " << LR);
453 interval.addRange(LR);
455 // If this redefinition is dead, we need to add a dummy unit live
456 // range covering the def slot.
458 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
462 dbgs() << " RESULT: ";
463 interval.print(dbgs(), tri_);
465 } else if (lv_->isPHIJoin(interval.reg)) {
466 // In the case of PHI elimination, each variable definition is only
467 // live until the end of the block. We've already taken care of the
468 // rest of the live range.
470 SlotIndex defIndex = MIIdx.getDefIndex();
471 if (MO.isEarlyClobber())
472 defIndex = MIIdx.getUseIndex();
475 MachineInstr *CopyMI = NULL;
476 if (mi->isCopyLike())
478 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
480 SlotIndex killIndex = getMBBEndIdx(mbb);
481 LiveRange LR(defIndex, killIndex, ValNo);
482 interval.addRange(LR);
483 ValNo->setHasPHIKill(true);
484 DEBUG(dbgs() << " phi-join +" << LR);
486 llvm_unreachable("Multiply defined register");
490 DEBUG(dbgs() << '\n');
493 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
494 MachineBasicBlock::iterator mi,
497 LiveInterval &interval,
498 MachineInstr *CopyMI) {
499 // A physical register cannot be live across basic block, so its
500 // lifetime must end somewhere in its defining basic block.
502 dbgs() << "\t\tregister: ";
503 printRegName(interval.reg, tri_);
506 SlotIndex baseIndex = MIIdx;
507 SlotIndex start = baseIndex.getDefIndex();
508 // Earlyclobbers move back one.
509 if (MO.isEarlyClobber())
510 start = MIIdx.getUseIndex();
511 SlotIndex end = start;
513 // If it is not used after definition, it is considered dead at
514 // the instruction defining it. Hence its interval is:
515 // [defSlot(def), defSlot(def)+1)
516 // For earlyclobbers, the defSlot was pushed back one; the extra
517 // advance below compensates.
519 DEBUG(dbgs() << " dead");
520 end = start.getStoreIndex();
524 // If it is not dead on definition, it must be killed by a
525 // subsequent instruction. Hence its interval is:
526 // [defSlot(def), useSlot(kill)+1)
527 baseIndex = baseIndex.getNextIndex();
528 while (++mi != MBB->end()) {
530 if (mi->isDebugValue())
532 if (getInstructionFromIndex(baseIndex) == 0)
533 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
535 if (mi->killsRegister(interval.reg, tri_)) {
536 DEBUG(dbgs() << " killed");
537 end = baseIndex.getDefIndex();
540 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
542 if (mi->isRegTiedToUseOperand(DefIdx)) {
543 // Two-address instruction.
544 end = baseIndex.getDefIndex();
546 // Another instruction redefines the register before it is ever read.
547 // Then the register is essentially dead at the instruction that
548 // defines it. Hence its interval is:
549 // [defSlot(def), defSlot(def)+1)
550 DEBUG(dbgs() << " dead");
551 end = start.getStoreIndex();
557 baseIndex = baseIndex.getNextIndex();
560 // The only case we should have a dead physreg here without a killing or
561 // instruction where we know it's dead is if it is live-in to the function
562 // and never used. Another possible case is the implicit use of the
563 // physical register has been deleted by two-address pass.
564 end = start.getStoreIndex();
567 assert(start < end && "did not find end of interval?");
569 // Already exists? Extend old live interval.
570 VNInfo *ValNo = interval.getVNInfoAt(start);
571 bool Extend = ValNo != 0;
573 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
574 if (Extend && MO.isEarlyClobber())
575 ValNo->setHasRedefByEC(true);
576 LiveRange LR(start, end, ValNo);
577 interval.addRange(LR);
578 DEBUG(dbgs() << " +" << LR << '\n');
581 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
582 MachineBasicBlock::iterator MI,
586 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
587 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
588 getOrCreateInterval(MO.getReg()));
589 else if (allocatableRegs_[MO.getReg()]) {
590 MachineInstr *CopyMI = NULL;
591 if (MI->isCopyLike())
593 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594 getOrCreateInterval(MO.getReg()), CopyMI);
595 // Def of a register also defines its sub-registers.
596 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
597 // If MI also modifies the sub-register explicitly, avoid processing it
598 // more than once. Do not pass in TRI here so it checks for exact match.
599 if (!MI->definesRegister(*AS))
600 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
601 getOrCreateInterval(*AS), 0);
605 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
607 LiveInterval &interval, bool isAlias) {
609 dbgs() << "\t\tlivein register: ";
610 printRegName(interval.reg, tri_);
613 // Look for kills, if it reaches a def before it's killed, then it shouldn't
614 // be considered a livein.
615 MachineBasicBlock::iterator mi = MBB->begin();
616 MachineBasicBlock::iterator E = MBB->end();
617 // Skip over DBG_VALUE at the start of the MBB.
618 if (mi != E && mi->isDebugValue()) {
619 while (++mi != E && mi->isDebugValue())
622 // MBB is empty except for DBG_VALUE's.
626 SlotIndex baseIndex = MIIdx;
627 SlotIndex start = baseIndex;
628 if (getInstructionFromIndex(baseIndex) == 0)
629 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
631 SlotIndex end = baseIndex;
632 bool SeenDefUse = false;
635 if (mi->killsRegister(interval.reg, tri_)) {
636 DEBUG(dbgs() << " killed");
637 end = baseIndex.getDefIndex();
640 } else if (mi->definesRegister(interval.reg, tri_)) {
641 // Another instruction redefines the register before it is ever read.
642 // Then the register is essentially dead at the instruction that defines
643 // it. Hence its interval is:
644 // [defSlot(def), defSlot(def)+1)
645 DEBUG(dbgs() << " dead");
646 end = start.getStoreIndex();
651 while (++mi != E && mi->isDebugValue())
652 // Skip over DBG_VALUE.
655 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
658 // Live-in register might not be used at all.
661 DEBUG(dbgs() << " dead");
662 end = MIIdx.getStoreIndex();
664 DEBUG(dbgs() << " live through");
669 SlotIndex defIdx = getMBBStartIdx(MBB);
670 assert(getInstructionFromIndex(defIdx) == 0 &&
671 "PHI def index points at actual instruction.");
673 interval.getNextValue(defIdx, 0, VNInfoAllocator);
674 vni->setIsPHIDef(true);
675 LiveRange LR(start, end, vni);
677 interval.addRange(LR);
678 DEBUG(dbgs() << " +" << LR << '\n');
681 /// computeIntervals - computes the live intervals for virtual
682 /// registers. for some ordering of the machine instructions [1,N] a
683 /// live interval is an interval [i, j) where 1 <= i <= j < N for
684 /// which a variable is live
685 void LiveIntervals::computeIntervals() {
686 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
687 << "********** Function: "
688 << ((Value*)mf_->getFunction())->getName() << '\n');
690 SmallVector<unsigned, 8> UndefUses;
691 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
693 MachineBasicBlock *MBB = MBBI;
697 // Track the index of the current machine instr.
698 SlotIndex MIIndex = getMBBStartIdx(MBB);
699 DEBUG(dbgs() << "BB#" << MBB->getNumber()
700 << ":\t\t# derived from " << MBB->getName() << "\n");
702 // Create intervals for live-ins to this BB first.
703 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
704 LE = MBB->livein_end(); LI != LE; ++LI) {
705 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
706 // Multiple live-ins can alias the same register.
707 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
708 if (!hasInterval(*AS))
709 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
713 // Skip over empty initial indices.
714 if (getInstructionFromIndex(MIIndex) == 0)
715 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
717 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
719 DEBUG(dbgs() << MIIndex << "\t" << *MI);
720 if (MI->isDebugValue())
724 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
725 MachineOperand &MO = MI->getOperand(i);
726 if (!MO.isReg() || !MO.getReg())
729 // handle register defs - build intervals
731 handleRegisterDef(MBB, MI, MIIndex, MO, i);
732 else if (MO.isUndef())
733 UndefUses.push_back(MO.getReg());
736 // Move to the next instr slot.
737 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
741 // Create empty intervals for registers defined by implicit_def's (except
742 // for those implicit_def that define values which are liveout of their
744 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
745 unsigned UndefReg = UndefUses[i];
746 (void)getOrCreateInterval(UndefReg);
750 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
751 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
752 return new LiveInterval(reg, Weight);
755 /// dupInterval - Duplicate a live interval. The caller is responsible for
756 /// managing the allocated memory.
757 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
758 LiveInterval *NewLI = createInterval(li->reg);
759 NewLI->Copy(*li, mri_, getVNInfoAllocator());
763 //===----------------------------------------------------------------------===//
764 // Register allocator hooks.
767 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768 /// allow one) virtual register operand, then its uses are implicitly using
769 /// the register. Returns the virtual register.
770 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771 MachineInstr *MI) const {
773 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774 MachineOperand &MO = MI->getOperand(i);
775 if (!MO.isReg() || !MO.isUse())
777 unsigned Reg = MO.getReg();
778 if (Reg == 0 || Reg == li.reg)
781 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
782 !allocatableRegs_[Reg])
784 // FIXME: For now, only remat MI with at most one register operand.
786 "Can't rematerialize instruction with multiple register operand!");
795 /// isValNoAvailableAt - Return true if the val# of the specified interval
796 /// which reaches the given instruction also reaches the specified use index.
797 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
798 SlotIndex UseIdx) const {
799 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
800 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
803 /// isReMaterializable - Returns true if the definition MI of the specified
804 /// val# of the specified interval is re-materializable.
806 LiveIntervals::isReMaterializable(const LiveInterval &li,
807 const VNInfo *ValNo, MachineInstr *MI,
808 const SmallVectorImpl<LiveInterval*> &SpillIs,
813 if (!tii_->isTriviallyReMaterializable(MI, aa_))
816 // Target-specific code can mark an instruction as being rematerializable
817 // if it has one virtual reg use, though it had better be something like
818 // a PIC base register which is likely to be live everywhere.
819 unsigned ImpUse = getReMatImplicitUse(li, MI);
821 const LiveInterval &ImpLi = getInterval(ImpUse);
822 for (MachineRegisterInfo::use_nodbg_iterator
823 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
825 MachineInstr *UseMI = &*ri;
826 SlotIndex UseIdx = getInstructionIndex(UseMI);
827 if (li.getVNInfoAt(UseIdx) != ValNo)
829 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
833 // If a register operand of the re-materialized instruction is going to
834 // be spilled next, then it's not legal to re-materialize this instruction.
835 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
836 if (ImpUse == SpillIs[i]->reg)
842 /// isReMaterializable - Returns true if the definition MI of the specified
843 /// val# of the specified interval is re-materializable.
844 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
845 const VNInfo *ValNo, MachineInstr *MI) {
846 SmallVector<LiveInterval*, 4> Dummy1;
848 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
851 /// isReMaterializable - Returns true if every definition of MI of every
852 /// val# of the specified interval is re-materializable.
854 LiveIntervals::isReMaterializable(const LiveInterval &li,
855 const SmallVectorImpl<LiveInterval*> &SpillIs,
858 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
860 const VNInfo *VNI = *i;
862 continue; // Dead val#.
863 // Is the def for the val# rematerializable?
864 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
867 bool DefIsLoad = false;
869 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
876 /// FilterFoldedOps - Filter out two-address use operands. Return
877 /// true if it finds any issue with the operands that ought to prevent
879 static bool FilterFoldedOps(MachineInstr *MI,
880 SmallVector<unsigned, 2> &Ops,
882 SmallVector<unsigned, 2> &FoldOps) {
884 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
885 unsigned OpIdx = Ops[i];
886 MachineOperand &MO = MI->getOperand(OpIdx);
887 // FIXME: fold subreg use.
891 MRInfo |= (unsigned)VirtRegMap::isMod;
893 // Filter out two-address use operand(s).
894 if (MI->isRegTiedToDefOperand(OpIdx)) {
895 MRInfo = VirtRegMap::isModRef;
898 MRInfo |= (unsigned)VirtRegMap::isRef;
900 FoldOps.push_back(OpIdx);
906 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
907 /// slot / to reg or any rematerialized load into ith operand of specified
908 /// MI. If it is successul, MI is updated with the newly created MI and
910 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
911 VirtRegMap &vrm, MachineInstr *DefMI,
913 SmallVector<unsigned, 2> &Ops,
914 bool isSS, int Slot, unsigned Reg) {
915 // If it is an implicit def instruction, just delete it.
916 if (MI->isImplicitDef()) {
917 RemoveMachineInstrFromMaps(MI);
918 vrm.RemoveMachineInstrFromMaps(MI);
919 MI->eraseFromParent();
924 // Filter the list of operand indexes that are to be folded. Abort if
925 // any operand will prevent folding.
927 SmallVector<unsigned, 2> FoldOps;
928 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
931 // The only time it's safe to fold into a two address instruction is when
932 // it's folding reload and spill from / into a spill stack slot.
933 if (DefMI && (MRInfo & VirtRegMap::isMod))
936 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
937 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
939 // Remember this instruction uses the spill slot.
940 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
942 // Attempt to fold the memory reference into the instruction. If
943 // we can do this, we don't need to insert spill code.
944 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
945 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
946 vrm.transferSpillPts(MI, fmi);
947 vrm.transferRestorePts(MI, fmi);
948 vrm.transferEmergencySpills(MI, fmi);
949 ReplaceMachineInstrInMaps(MI, fmi);
950 MI->eraseFromParent();
958 /// canFoldMemoryOperand - Returns true if the specified load / store
959 /// folding is possible.
960 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
961 SmallVector<unsigned, 2> &Ops,
963 // Filter the list of operand indexes that are to be folded. Abort if
964 // any operand will prevent folding.
966 SmallVector<unsigned, 2> FoldOps;
967 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
970 // It's only legal to remat for a use, not a def.
971 if (ReMat && (MRInfo & VirtRegMap::isMod))
974 return tii_->canFoldMemoryOperand(MI, FoldOps);
977 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
978 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
980 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
985 for (++itr; itr != li.ranges.end(); ++itr) {
986 MachineBasicBlock *mbb2 =
987 indexes_->getMBBCoveringRange(itr->start, itr->end);
996 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
997 /// interval on to-be re-materialized operands of MI) with new register.
998 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
999 MachineInstr *MI, unsigned NewVReg,
1001 // There is an implicit use. That means one of the other operand is
1002 // being remat'ed and the remat'ed instruction has li.reg as an
1003 // use operand. Make sure we rewrite that as well.
1004 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1005 MachineOperand &MO = MI->getOperand(i);
1008 unsigned Reg = MO.getReg();
1009 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1011 if (!vrm.isReMaterialized(Reg))
1013 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1014 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1016 UseMO->setReg(NewVReg);
1020 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1021 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1022 bool LiveIntervals::
1023 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1024 bool TrySplit, SlotIndex index, SlotIndex end,
1026 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1027 unsigned Slot, int LdSlot,
1028 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1030 const TargetRegisterClass* rc,
1031 SmallVector<int, 4> &ReMatIds,
1032 const MachineLoopInfo *loopInfo,
1033 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1034 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1035 std::vector<LiveInterval*> &NewLIs) {
1036 bool CanFold = false;
1038 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1039 MachineOperand& mop = MI->getOperand(i);
1042 unsigned Reg = mop.getReg();
1043 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1048 bool TryFold = !DefIsReMat;
1049 bool FoldSS = true; // Default behavior unless it's a remat.
1050 int FoldSlot = Slot;
1052 // If this is the rematerializable definition MI itself and
1053 // all of its uses are rematerialized, simply delete it.
1054 if (MI == ReMatOrigDefMI && CanDelete) {
1055 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1057 RemoveMachineInstrFromMaps(MI);
1058 vrm.RemoveMachineInstrFromMaps(MI);
1059 MI->eraseFromParent();
1063 // If def for this use can't be rematerialized, then try folding.
1064 // If def is rematerializable and it's a load, also try folding.
1065 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1067 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1073 // Scan all of the operands of this instruction rewriting operands
1074 // to use NewVReg instead of li.reg as appropriate. We do this for
1077 // 1. If the instr reads the same spilled vreg multiple times, we
1078 // want to reuse the NewVReg.
1079 // 2. If the instr is a two-addr instruction, we are required to
1080 // keep the src/dst regs pinned.
1082 // Keep track of whether we replace a use and/or def so that we can
1083 // create the spill interval with the appropriate range.
1084 SmallVector<unsigned, 2> Ops;
1085 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1087 // Create a new virtual register for the spill interval.
1088 // Create the new register now so we can map the fold instruction
1089 // to the new register so when it is unfolded we get the correct
1091 bool CreatedNewVReg = false;
1093 NewVReg = mri_->createVirtualRegister(rc);
1095 CreatedNewVReg = true;
1097 // The new virtual register should get the same allocation hints as the
1099 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1100 if (Hint.first || Hint.second)
1101 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1107 // Do not fold load / store here if we are splitting. We'll find an
1108 // optimal point to insert a load / store later.
1110 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1111 Ops, FoldSS, FoldSlot, NewVReg)) {
1112 // Folding the load/store can completely change the instruction in
1113 // unpredictable ways, rescan it from the beginning.
1116 // We need to give the new vreg the same stack slot as the
1117 // spilled interval.
1118 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1124 if (isNotInMIMap(MI))
1126 goto RestartInstruction;
1129 // We'll try to fold it later if it's profitable.
1130 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1134 mop.setReg(NewVReg);
1135 if (mop.isImplicit())
1136 rewriteImplicitOps(li, MI, NewVReg, vrm);
1138 // Reuse NewVReg for other reads.
1139 bool HasEarlyClobber = false;
1140 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1141 MachineOperand &mopj = MI->getOperand(Ops[j]);
1142 mopj.setReg(NewVReg);
1143 if (mopj.isImplicit())
1144 rewriteImplicitOps(li, MI, NewVReg, vrm);
1145 if (mopj.isEarlyClobber())
1146 HasEarlyClobber = true;
1149 if (CreatedNewVReg) {
1151 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1152 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1153 // Each valnum may have its own remat id.
1154 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1156 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1158 if (!CanDelete || (HasUse && HasDef)) {
1159 // If this is a two-addr instruction then its use operands are
1160 // rematerializable but its def is not. It should be assigned a
1162 vrm.assignVirt2StackSlot(NewVReg, Slot);
1165 vrm.assignVirt2StackSlot(NewVReg, Slot);
1167 } else if (HasUse && HasDef &&
1168 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1169 // If this interval hasn't been assigned a stack slot (because earlier
1170 // def is a deleted remat def), do it now.
1171 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1172 vrm.assignVirt2StackSlot(NewVReg, Slot);
1175 // Re-matting an instruction with virtual register use. Add the
1176 // register as an implicit use on the use MI.
1177 if (DefIsReMat && ImpUse)
1178 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1180 // Create a new register interval for this spill / remat.
1181 LiveInterval &nI = getOrCreateInterval(NewVReg);
1182 if (CreatedNewVReg) {
1183 NewLIs.push_back(&nI);
1184 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1186 vrm.setIsSplitFromReg(NewVReg, li.reg);
1190 if (CreatedNewVReg) {
1191 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1192 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1193 DEBUG(dbgs() << " +" << LR);
1196 // Extend the split live interval to this def / use.
1197 SlotIndex End = index.getDefIndex();
1198 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1199 nI.getValNumInfo(nI.getNumValNums()-1));
1200 DEBUG(dbgs() << " +" << LR);
1205 LiveRange LR(HasEarlyClobber ? index.getUseIndex() : index.getDefIndex(),
1206 index.getStoreIndex(),
1207 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1208 DEBUG(dbgs() << " +" << LR);
1213 dbgs() << "\t\t\t\tAdded new interval: ";
1214 nI.print(dbgs(), tri_);
1220 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1222 MachineBasicBlock *MBB,
1223 SlotIndex Idx) const {
1224 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1227 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1228 /// during spilling.
1230 struct RewriteInfo {
1233 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1236 struct RewriteInfoCompare {
1237 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1238 return LHS.Index < RHS.Index;
1243 void LiveIntervals::
1244 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1245 LiveInterval::Ranges::const_iterator &I,
1246 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1247 unsigned Slot, int LdSlot,
1248 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1250 const TargetRegisterClass* rc,
1251 SmallVector<int, 4> &ReMatIds,
1252 const MachineLoopInfo *loopInfo,
1253 BitVector &SpillMBBs,
1254 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1255 BitVector &RestoreMBBs,
1256 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1257 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1258 std::vector<LiveInterval*> &NewLIs) {
1259 bool AllCanFold = true;
1260 unsigned NewVReg = 0;
1261 SlotIndex start = I->start.getBaseIndex();
1262 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1264 // First collect all the def / use in this live range that will be rewritten.
1265 // Make sure they are sorted according to instruction index.
1266 std::vector<RewriteInfo> RewriteMIs;
1267 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1268 re = mri_->reg_end(); ri != re; ) {
1269 MachineInstr *MI = &*ri;
1270 MachineOperand &O = ri.getOperand();
1272 if (MI->isDebugValue()) {
1273 // Modify DBG_VALUE now that the value is in a spill slot.
1274 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1275 uint64_t Offset = MI->getOperand(1).getImm();
1276 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1277 DebugLoc DL = MI->getDebugLoc();
1278 int FI = isLoadSS ? LdSlot : (int)Slot;
1279 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1280 Offset, MDPtr, DL)) {
1281 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1282 ReplaceMachineInstrInMaps(MI, NewDV);
1283 MachineBasicBlock *MBB = MI->getParent();
1284 MBB->insert(MBB->erase(MI), NewDV);
1289 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1290 RemoveMachineInstrFromMaps(MI);
1291 vrm.RemoveMachineInstrFromMaps(MI);
1292 MI->eraseFromParent();
1295 assert(!(O.isImplicit() && O.isUse()) &&
1296 "Spilling register that's used as implicit use?");
1297 SlotIndex index = getInstructionIndex(MI);
1298 if (index < start || index >= end)
1302 // Must be defined by an implicit def. It should not be spilled. Note,
1303 // this is for correctness reason. e.g.
1304 // 8 %reg1024<def> = IMPLICIT_DEF
1305 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1306 // The live range [12, 14) are not part of the r1024 live interval since
1307 // it's defined by an implicit def. It will not conflicts with live
1308 // interval of r1025. Now suppose both registers are spilled, you can
1309 // easily see a situation where both registers are reloaded before
1310 // the INSERT_SUBREG and both target registers that would overlap.
1312 RewriteMIs.push_back(RewriteInfo(index, MI));
1314 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1316 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1317 // Now rewrite the defs and uses.
1318 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1319 RewriteInfo &rwi = RewriteMIs[i];
1321 SlotIndex index = rwi.Index;
1322 MachineInstr *MI = rwi.MI;
1323 // If MI def and/or use the same register multiple times, then there
1324 // are multiple entries.
1325 while (i != e && RewriteMIs[i].MI == MI) {
1326 assert(RewriteMIs[i].Index == index);
1329 MachineBasicBlock *MBB = MI->getParent();
1331 if (ImpUse && MI != ReMatDefMI) {
1332 // Re-matting an instruction with virtual register use. Prevent interval
1333 // from being spilled.
1334 getInterval(ImpUse).markNotSpillable();
1337 unsigned MBBId = MBB->getNumber();
1338 unsigned ThisVReg = 0;
1340 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1341 if (NVI != MBBVRegsMap.end()) {
1342 ThisVReg = NVI->second;
1349 // It's better to start a new interval to avoid artifically
1350 // extend the new interval.
1351 if (MI->readsWritesVirtualRegister(li.reg) ==
1352 std::make_pair(false,true)) {
1353 MBBVRegsMap.erase(MBB->getNumber());
1359 bool IsNew = ThisVReg == 0;
1361 // This ends the previous live interval. If all of its def / use
1362 // can be folded, give it a low spill weight.
1363 if (NewVReg && TrySplit && AllCanFold) {
1364 LiveInterval &nI = getOrCreateInterval(NewVReg);
1371 bool HasDef = false;
1372 bool HasUse = false;
1373 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1374 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1375 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1376 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1377 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1378 if (!HasDef && !HasUse)
1381 AllCanFold &= CanFold;
1383 // Update weight of spill interval.
1384 LiveInterval &nI = getOrCreateInterval(NewVReg);
1386 // The spill weight is now infinity as it cannot be spilled again.
1387 nI.markNotSpillable();
1391 // Keep track of the last def and first use in each MBB.
1393 if (MI != ReMatOrigDefMI || !CanDelete) {
1394 bool HasKill = false;
1396 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1398 // If this is a two-address code, then this index starts a new VNInfo.
1399 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1401 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1403 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1404 SpillIdxes.find(MBBId);
1406 if (SII == SpillIdxes.end()) {
1407 std::vector<SRInfo> S;
1408 S.push_back(SRInfo(index, NewVReg, true));
1409 SpillIdxes.insert(std::make_pair(MBBId, S));
1410 } else if (SII->second.back().vreg != NewVReg) {
1411 SII->second.push_back(SRInfo(index, NewVReg, true));
1412 } else if (index > SII->second.back().index) {
1413 // If there is an earlier def and this is a two-address
1414 // instruction, then it's not possible to fold the store (which
1415 // would also fold the load).
1416 SRInfo &Info = SII->second.back();
1418 Info.canFold = !HasUse;
1420 SpillMBBs.set(MBBId);
1421 } else if (SII != SpillIdxes.end() &&
1422 SII->second.back().vreg == NewVReg &&
1423 index > SII->second.back().index) {
1424 // There is an earlier def that's not killed (must be two-address).
1425 // The spill is no longer needed.
1426 SII->second.pop_back();
1427 if (SII->second.empty()) {
1428 SpillIdxes.erase(MBBId);
1429 SpillMBBs.reset(MBBId);
1436 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1437 SpillIdxes.find(MBBId);
1438 if (SII != SpillIdxes.end() &&
1439 SII->second.back().vreg == NewVReg &&
1440 index > SII->second.back().index)
1441 // Use(s) following the last def, it's not safe to fold the spill.
1442 SII->second.back().canFold = false;
1443 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1444 RestoreIdxes.find(MBBId);
1445 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1446 // If we are splitting live intervals, only fold if it's the first
1447 // use and there isn't another use later in the MBB.
1448 RII->second.back().canFold = false;
1450 // Only need a reload if there isn't an earlier def / use.
1451 if (RII == RestoreIdxes.end()) {
1452 std::vector<SRInfo> Infos;
1453 Infos.push_back(SRInfo(index, NewVReg, true));
1454 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1456 RII->second.push_back(SRInfo(index, NewVReg, true));
1458 RestoreMBBs.set(MBBId);
1462 // Update spill weight.
1463 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1464 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1467 if (NewVReg && TrySplit && AllCanFold) {
1468 // If all of its def / use can be folded, give it a low spill weight.
1469 LiveInterval &nI = getOrCreateInterval(NewVReg);
1474 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1475 unsigned vr, BitVector &RestoreMBBs,
1476 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1477 if (!RestoreMBBs[Id])
1479 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1480 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1481 if (Restores[i].index == index &&
1482 Restores[i].vreg == vr &&
1483 Restores[i].canFold)
1488 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1489 unsigned vr, BitVector &RestoreMBBs,
1490 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1491 if (!RestoreMBBs[Id])
1493 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1494 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1495 if (Restores[i].index == index && Restores[i].vreg)
1496 Restores[i].index = SlotIndex();
1499 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1500 /// spilled and create empty intervals for their uses.
1502 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1503 const TargetRegisterClass* rc,
1504 std::vector<LiveInterval*> &NewLIs) {
1505 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1506 re = mri_->reg_end(); ri != re; ) {
1507 MachineOperand &O = ri.getOperand();
1508 MachineInstr *MI = &*ri;
1510 if (MI->isDebugValue()) {
1511 // Remove debug info for now.
1513 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1517 assert(MI->isImplicitDef() &&
1518 "Register def was not rewritten?");
1519 RemoveMachineInstrFromMaps(MI);
1520 vrm.RemoveMachineInstrFromMaps(MI);
1521 MI->eraseFromParent();
1523 // This must be an use of an implicit_def so it's not part of the live
1524 // interval. Create a new empty live interval for it.
1525 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1526 unsigned NewVReg = mri_->createVirtualRegister(rc);
1528 vrm.setIsImplicitlyDefined(NewVReg);
1529 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1530 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1531 MachineOperand &MO = MI->getOperand(i);
1532 if (MO.isReg() && MO.getReg() == li.reg) {
1542 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1543 // Limit the loop depth ridiculousness.
1544 if (loopDepth > 200)
1547 // The loop depth is used to roughly estimate the number of times the
1548 // instruction is executed. Something like 10^d is simple, but will quickly
1549 // overflow a float. This expression behaves like 10^d for small d, but is
1550 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1551 // headroom before overflow.
1552 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1554 return (isDef + isUse) * lc;
1558 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1559 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1560 normalizeSpillWeight(*NewLIs[i]);
1563 std::vector<LiveInterval*> LiveIntervals::
1564 addIntervalsForSpills(const LiveInterval &li,
1565 const SmallVectorImpl<LiveInterval*> &SpillIs,
1566 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1567 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1570 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1571 li.print(dbgs(), tri_);
1575 // Each bit specify whether a spill is required in the MBB.
1576 BitVector SpillMBBs(mf_->getNumBlockIDs());
1577 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1578 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1579 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1580 DenseMap<unsigned,unsigned> MBBVRegsMap;
1581 std::vector<LiveInterval*> NewLIs;
1582 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1584 unsigned NumValNums = li.getNumValNums();
1585 SmallVector<MachineInstr*, 4> ReMatDefs;
1586 ReMatDefs.resize(NumValNums, NULL);
1587 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1588 ReMatOrigDefs.resize(NumValNums, NULL);
1589 SmallVector<int, 4> ReMatIds;
1590 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1591 BitVector ReMatDelete(NumValNums);
1592 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1594 // Spilling a split live interval. It cannot be split any further. Also,
1595 // it's also guaranteed to be a single val# / range interval.
1596 if (vrm.getPreSplitReg(li.reg)) {
1597 vrm.setIsSplitFromReg(li.reg, 0);
1598 // Unset the split kill marker on the last use.
1599 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1600 if (KillIdx != SlotIndex()) {
1601 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1602 assert(KillMI && "Last use disappeared?");
1603 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1604 assert(KillOp != -1 && "Last use disappeared?");
1605 KillMI->getOperand(KillOp).setIsKill(false);
1607 vrm.removeKillPoint(li.reg);
1608 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1609 Slot = vrm.getStackSlot(li.reg);
1610 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1611 MachineInstr *ReMatDefMI = DefIsReMat ?
1612 vrm.getReMaterializedMI(li.reg) : NULL;
1614 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1615 bool isLoad = isLoadSS ||
1616 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1617 bool IsFirstRange = true;
1618 for (LiveInterval::Ranges::const_iterator
1619 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1620 // If this is a split live interval with multiple ranges, it means there
1621 // are two-address instructions that re-defined the value. Only the
1622 // first def can be rematerialized!
1624 // Note ReMatOrigDefMI has already been deleted.
1625 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1626 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1627 false, vrm, rc, ReMatIds, loopInfo,
1628 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1629 MBBVRegsMap, NewLIs);
1631 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1632 Slot, 0, false, false, false,
1633 false, vrm, rc, ReMatIds, loopInfo,
1634 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1635 MBBVRegsMap, NewLIs);
1637 IsFirstRange = false;
1640 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1641 normalizeSpillWeights(NewLIs);
1645 bool TrySplit = !intervalIsInOneMBB(li);
1648 bool NeedStackSlot = false;
1649 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1651 const VNInfo *VNI = *i;
1652 unsigned VN = VNI->id;
1653 if (VNI->isUnused())
1654 continue; // Dead val#.
1655 // Is the def for the val# rematerializable?
1656 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1658 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1659 // Remember how to remat the def of this val#.
1660 ReMatOrigDefs[VN] = ReMatDefMI;
1661 // Original def may be modified so we have to make a copy here.
1662 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1663 CloneMIs.push_back(Clone);
1664 ReMatDefs[VN] = Clone;
1666 bool CanDelete = true;
1667 if (VNI->hasPHIKill()) {
1668 // A kill is a phi node, not all of its uses can be rematerialized.
1669 // It must not be deleted.
1671 // Need a stack slot if there is any live range where uses cannot be
1673 NeedStackSlot = true;
1676 ReMatDelete.set(VN);
1678 // Need a stack slot if there is any live range where uses cannot be
1680 NeedStackSlot = true;
1684 // One stack slot per live interval.
1685 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1686 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1687 Slot = vrm.assignVirt2StackSlot(li.reg);
1689 // This case only occurs when the prealloc splitter has already assigned
1690 // a stack slot to this vreg.
1692 Slot = vrm.getStackSlot(li.reg);
1695 // Create new intervals and rewrite defs and uses.
1696 for (LiveInterval::Ranges::const_iterator
1697 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1698 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1699 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1700 bool DefIsReMat = ReMatDefMI != NULL;
1701 bool CanDelete = ReMatDelete[I->valno->id];
1703 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1704 bool isLoad = isLoadSS ||
1705 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1706 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1707 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1708 CanDelete, vrm, rc, ReMatIds, loopInfo,
1709 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1710 MBBVRegsMap, NewLIs);
1713 // Insert spills / restores if we are splitting.
1715 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1716 normalizeSpillWeights(NewLIs);
1720 SmallPtrSet<LiveInterval*, 4> AddedKill;
1721 SmallVector<unsigned, 2> Ops;
1722 if (NeedStackSlot) {
1723 int Id = SpillMBBs.find_first();
1725 std::vector<SRInfo> &spills = SpillIdxes[Id];
1726 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1727 SlotIndex index = spills[i].index;
1728 unsigned VReg = spills[i].vreg;
1729 LiveInterval &nI = getOrCreateInterval(VReg);
1730 bool isReMat = vrm.isReMaterialized(VReg);
1731 MachineInstr *MI = getInstructionFromIndex(index);
1732 bool CanFold = false;
1733 bool FoundUse = false;
1735 if (spills[i].canFold) {
1737 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1738 MachineOperand &MO = MI->getOperand(j);
1739 if (!MO.isReg() || MO.getReg() != VReg)
1746 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1747 RestoreMBBs, RestoreIdxes))) {
1748 // MI has two-address uses of the same register. If the use
1749 // isn't the first and only use in the BB, then we can't fold
1750 // it. FIXME: Move this to rewriteInstructionsForSpills.
1757 // Fold the store into the def if possible.
1758 bool Folded = false;
1759 if (CanFold && !Ops.empty()) {
1760 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1763 // Also folded uses, do not issue a load.
1764 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1765 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1767 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1771 // Otherwise tell the spiller to issue a spill.
1773 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1774 bool isKill = LR->end == index.getStoreIndex();
1775 if (!MI->registerDefIsDead(nI.reg))
1776 // No need to spill a dead def.
1777 vrm.addSpillPoint(VReg, isKill, MI);
1779 AddedKill.insert(&nI);
1782 Id = SpillMBBs.find_next(Id);
1786 int Id = RestoreMBBs.find_first();
1788 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1789 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1790 SlotIndex index = restores[i].index;
1791 if (index == SlotIndex())
1793 unsigned VReg = restores[i].vreg;
1794 LiveInterval &nI = getOrCreateInterval(VReg);
1795 bool isReMat = vrm.isReMaterialized(VReg);
1796 MachineInstr *MI = getInstructionFromIndex(index);
1797 bool CanFold = false;
1799 if (restores[i].canFold) {
1801 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1802 MachineOperand &MO = MI->getOperand(j);
1803 if (!MO.isReg() || MO.getReg() != VReg)
1807 // If this restore were to be folded, it would have been folded
1816 // Fold the load into the use if possible.
1817 bool Folded = false;
1818 if (CanFold && !Ops.empty()) {
1820 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1822 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1824 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1825 // If the rematerializable def is a load, also try to fold it.
1826 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1827 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1828 Ops, isLoadSS, LdSlot, VReg);
1830 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1832 // Re-matting an instruction with virtual register use. Add the
1833 // register as an implicit use on the use MI and mark the register
1834 // interval as unspillable.
1835 LiveInterval &ImpLi = getInterval(ImpUse);
1836 ImpLi.markNotSpillable();
1837 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1842 // If folding is not possible / failed, then tell the spiller to issue a
1843 // load / rematerialization for us.
1845 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1847 vrm.addRestorePoint(VReg, MI);
1849 Id = RestoreMBBs.find_next(Id);
1852 // Finalize intervals: add kills, finalize spill weights, and filter out
1854 std::vector<LiveInterval*> RetNewLIs;
1855 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1856 LiveInterval *LI = NewLIs[i];
1858 if (!AddedKill.count(LI)) {
1859 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1860 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1861 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1862 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1863 assert(UseIdx != -1);
1864 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1865 LastUse->getOperand(UseIdx).setIsKill();
1866 vrm.addKillPoint(LI->reg, LastUseIdx);
1869 RetNewLIs.push_back(LI);
1873 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1874 normalizeSpillWeights(RetNewLIs);
1878 /// hasAllocatableSuperReg - Return true if the specified physical register has
1879 /// any super register that's allocatable.
1880 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1881 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1882 if (allocatableRegs_[*AS] && hasInterval(*AS))
1887 /// getRepresentativeReg - Find the largest super register of the specified
1888 /// physical register.
1889 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1890 // Find the largest super-register that is allocatable.
1891 unsigned BestReg = Reg;
1892 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1893 unsigned SuperReg = *AS;
1894 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1902 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1903 /// specified interval that conflicts with the specified physical register.
1904 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1905 unsigned PhysReg) const {
1906 unsigned NumConflicts = 0;
1907 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1908 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1909 E = mri_->reg_end(); I != E; ++I) {
1910 MachineOperand &O = I.getOperand();
1911 MachineInstr *MI = O.getParent();
1912 if (MI->isDebugValue())
1914 SlotIndex Index = getInstructionIndex(MI);
1915 if (pli.liveAt(Index))
1918 return NumConflicts;
1921 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1922 /// around all defs and uses of the specified interval. Return true if it
1923 /// was able to cut its interval.
1924 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1925 unsigned PhysReg, VirtRegMap &vrm) {
1926 unsigned SpillReg = getRepresentativeReg(PhysReg);
1928 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1929 // If there are registers which alias PhysReg, but which are not a
1930 // sub-register of the chosen representative super register. Assert
1931 // since we can't handle it yet.
1932 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1933 tri_->isSuperRegister(*AS, SpillReg));
1936 SmallVector<unsigned, 4> PRegs;
1937 if (hasInterval(SpillReg))
1938 PRegs.push_back(SpillReg);
1940 SmallSet<unsigned, 4> Added;
1941 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1942 if (Added.insert(*AS) && hasInterval(*AS)) {
1943 PRegs.push_back(*AS);
1944 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1949 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1950 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1951 E = mri_->reg_end(); I != E; ++I) {
1952 MachineOperand &O = I.getOperand();
1953 MachineInstr *MI = O.getParent();
1954 if (MI->isDebugValue() || SeenMIs.count(MI))
1957 SlotIndex Index = getInstructionIndex(MI);
1958 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1959 unsigned PReg = PRegs[i];
1960 LiveInterval &pli = getInterval(PReg);
1961 if (!pli.liveAt(Index))
1963 vrm.addEmergencySpill(PReg, MI);
1964 SlotIndex StartIdx = Index.getLoadIndex();
1965 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1966 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1967 pli.removeRange(StartIdx, EndIdx);
1971 raw_string_ostream Msg(msg);
1972 Msg << "Ran out of registers during register allocation!";
1973 if (MI->isInlineAsm()) {
1974 Msg << "\nPlease check your inline asm statement for invalid "
1975 << "constraints:\n";
1976 MI->print(Msg, tm_);
1978 report_fatal_error(Msg.str());
1980 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1981 if (!hasInterval(*AS))
1983 LiveInterval &spli = getInterval(*AS);
1984 if (spli.liveAt(Index))
1985 spli.removeRange(Index.getLoadIndex(),
1986 Index.getNextIndex().getBaseIndex());
1993 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1994 MachineInstr* startInst) {
1995 LiveInterval& Interval = getOrCreateInterval(reg);
1996 VNInfo* VN = Interval.getNextValue(
1997 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1998 startInst, getVNInfoAllocator());
1999 VN->setHasPHIKill(true);
2001 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2002 getMBBEndIdx(startInst->getParent()), VN);
2003 Interval.addRange(LR);