1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false);
61 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
63 AU.addRequired<AliasAnalysis>();
64 AU.addPreserved<AliasAnalysis>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<MachineLoopInfo>();
68 AU.addPreserved<MachineLoopInfo>();
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 AU.addPreserved<ProcessImplicitDefs>();
78 AU.addRequired<ProcessImplicitDefs>();
79 AU.addPreserved<SlotIndexes>();
80 AU.addRequiredTransitive<SlotIndexes>();
81 MachineFunctionPass::getAnalysisUsage(AU);
84 void LiveIntervals::releaseMemory() {
85 // Free the live intervals themselves.
86 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
93 VNInfoAllocator.Reset();
94 while (!CloneMIs.empty()) {
95 MachineInstr *MI = CloneMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 /// runOnMachineFunction - Register allocate the whole function
103 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105 mri_ = &mf_->getRegInfo();
106 tm_ = &fn.getTarget();
107 tri_ = tm_->getRegisterInfo();
108 tii_ = tm_->getInstrInfo();
109 aa_ = &getAnalysis<AliasAnalysis>();
110 lv_ = &getAnalysis<LiveVariables>();
111 indexes_ = &getAnalysis<SlotIndexes>();
112 allocatableRegs_ = tri_->getAllocatableSet(fn);
116 numIntervals += getNumIntervals();
122 /// print - Implement the dump method.
123 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
124 OS << "********** INTERVALS **********\n";
125 for (const_iterator I = begin(), E = end(); I != E; ++I) {
126 I->second->print(OS, tri_);
133 void LiveIntervals::printInstrs(raw_ostream &OS) const {
134 OS << "********** MACHINEINSTRS **********\n";
136 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
137 mbbi != mbbe; ++mbbi) {
138 OS << "BB#" << mbbi->getNumber()
139 << ":\t\t# derived from " << mbbi->getName() << "\n";
140 for (MachineBasicBlock::iterator mii = mbbi->begin(),
141 mie = mbbi->end(); mii != mie; ++mii) {
142 if (mii->isDebugValue())
145 OS << getInstructionIndex(mii) << '\t' << *mii;
150 void LiveIntervals::dumpInstrs() const {
154 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
155 VirtRegMap &vrm, unsigned reg) {
156 // We don't handle fancy stuff crossing basic block boundaries
157 if (li.ranges.size() != 1)
159 const LiveRange &range = li.ranges.front();
160 SlotIndex idx = range.start.getBaseIndex();
161 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163 // Skip deleted instructions
164 MachineInstr *firstMI = getInstructionFromIndex(idx);
165 while (!firstMI && idx != end) {
166 idx = idx.getNextIndex();
167 firstMI = getInstructionFromIndex(idx);
172 // Find last instruction in range
173 SlotIndex lastIdx = end.getPrevIndex();
174 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
175 while (!lastMI && lastIdx != idx) {
176 lastIdx = lastIdx.getPrevIndex();
177 lastMI = getInstructionFromIndex(lastIdx);
182 // Range cannot cross basic block boundaries or terminators
183 MachineBasicBlock *MBB = firstMI->getParent();
184 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
187 MachineBasicBlock::const_iterator E = lastMI;
189 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
190 const MachineInstr &MI = *I;
192 // Allow copies to and from li.reg
194 if (MI.getOperand(0).getReg() == li.reg ||
195 MI.getOperand(1).getReg() == li.reg)
198 // Check for operands using reg
199 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
200 const MachineOperand& mop = MI.getOperand(i);
203 unsigned PhysReg = mop.getReg();
204 if (PhysReg == 0 || PhysReg == li.reg)
206 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
207 if (!vrm.hasPhys(PhysReg))
209 PhysReg = vrm.getPhys(PhysReg);
211 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
216 // No conflicts found.
220 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
221 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
222 for (LiveInterval::Ranges::const_iterator
223 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
224 for (SlotIndex index = I->start.getBaseIndex(),
225 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
227 index = index.getNextIndex()) {
228 MachineInstr *MI = getInstructionFromIndex(index);
230 continue; // skip deleted instructions
232 if (JoinedCopies.count(MI))
234 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
235 MachineOperand& MO = MI->getOperand(i);
238 unsigned PhysReg = MO.getReg();
239 if (PhysReg == 0 || PhysReg == Reg ||
240 TargetRegisterInfo::isVirtualRegister(PhysReg))
242 if (tri_->regsOverlap(Reg, PhysReg))
252 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
253 if (TargetRegisterInfo::isPhysicalRegister(reg))
254 dbgs() << tri_->getName(reg);
256 dbgs() << "%reg" << reg;
261 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
262 unsigned Reg = MI.getOperand(MOIdx).getReg();
263 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
264 const MachineOperand &MO = MI.getOperand(i);
267 if (MO.getReg() == Reg && MO.isDef()) {
268 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
269 MI.getOperand(MOIdx).getSubReg() &&
270 (MO.getSubReg() || MO.isImplicit()));
277 /// isPartialRedef - Return true if the specified def at the specific index is
278 /// partially re-defining the specified live interval. A common case of this is
279 /// a definition of the sub-register.
280 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
281 LiveInterval &interval) {
282 if (!MO.getSubReg() || MO.isEarlyClobber())
285 SlotIndex RedefIndex = MIIdx.getDefIndex();
286 const LiveRange *OldLR =
287 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
288 if (OldLR->valno->isDefAccurate()) {
289 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
290 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
295 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
296 MachineBasicBlock::iterator mi,
300 LiveInterval &interval) {
302 dbgs() << "\t\tregister: ";
303 printRegName(interval.reg, tri_);
306 // Virtual registers may be defined multiple times (due to phi
307 // elimination and 2-addr elimination). Much of what we do only has to be
308 // done once for the vreg. We use an empty interval to detect the first
309 // time we see a vreg.
310 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
311 if (interval.empty()) {
312 // Get the Idx of the defining instructions.
313 SlotIndex defIndex = MIIdx.getDefIndex();
314 // Earlyclobbers move back one, so that they overlap the live range
316 if (MO.isEarlyClobber())
317 defIndex = MIIdx.getUseIndex();
319 // Make sure the first definition is not a partial redefinition. Add an
320 // <imp-def> of the full register.
322 mi->addRegisterDefined(interval.reg);
324 MachineInstr *CopyMI = NULL;
325 if (mi->isCopyLike()) {
329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
331 assert(ValNo->id == 0 && "First value in interval is not 0?");
333 // Loop over all of the blocks that the vreg is defined in. There are
334 // two cases we have to handle here. The most common case is a vreg
335 // whose lifetime is contained within a basic block. In this case there
336 // will be a single kill, in MBB, which comes after the definition.
337 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338 // FIXME: what about dead vars?
340 if (vi.Kills[0] != mi)
341 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
343 killIdx = defIndex.getStoreIndex();
345 // If the kill happens after the definition, we have an intra-block
347 if (killIdx > defIndex) {
348 assert(vi.AliveBlocks.empty() &&
349 "Shouldn't be alive across any blocks!");
350 LiveRange LR(defIndex, killIdx, ValNo);
351 interval.addRange(LR);
352 DEBUG(dbgs() << " +" << LR << "\n");
357 // The other case we handle is when a virtual register lives to the end
358 // of the defining block, potentially live across some blocks, then is
359 // live into some number of blocks, but gets killed. Start by adding a
360 // range that goes from this definition to the end of the defining block.
361 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
362 DEBUG(dbgs() << " +" << NewLR);
363 interval.addRange(NewLR);
365 bool PHIJoin = lv_->isPHIJoin(interval.reg);
368 // A phi join register is killed at the end of the MBB and revived as a new
369 // valno in the killing blocks.
370 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
371 DEBUG(dbgs() << " phi-join");
372 ValNo->setHasPHIKill(true);
374 // Iterate over all of the blocks that the variable is completely
375 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
377 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
378 E = vi.AliveBlocks.end(); I != E; ++I) {
379 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
380 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
381 interval.addRange(LR);
382 DEBUG(dbgs() << " +" << LR);
386 // Finally, this virtual register is live from the start of any killing
387 // block to the 'use' slot of the killing instruction.
388 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
389 MachineInstr *Kill = vi.Kills[i];
390 SlotIndex Start = getMBBStartIdx(Kill->getParent());
391 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
393 // Create interval with one of a NEW value number. Note that this value
394 // number isn't actually defined by an instruction, weird huh? :)
396 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
398 ValNo->setIsPHIDef(true);
400 LiveRange LR(Start, killIdx, ValNo);
401 interval.addRange(LR);
402 DEBUG(dbgs() << " +" << LR);
406 if (MultipleDefsBySameMI(*mi, MOIdx))
407 // Multiple defs of the same virtual register by the same instruction.
408 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
409 // This is likely due to elimination of REG_SEQUENCE instructions. Return
410 // here since there is nothing to do.
413 // If this is the second time we see a virtual register definition, it
414 // must be due to phi elimination or two addr elimination. If this is
415 // the result of two address elimination, then the vreg is one of the
416 // def-and-use register operand.
418 // It may also be partial redef like this:
419 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
420 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
421 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
422 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
423 // If this is a two-address definition, then we have already processed
424 // the live range. The only problem is that we didn't realize there
425 // are actually two values in the live interval. Because of this we
426 // need to take the LiveRegion that defines this register and split it
428 SlotIndex RedefIndex = MIIdx.getDefIndex();
429 if (MO.isEarlyClobber())
430 RedefIndex = MIIdx.getUseIndex();
432 const LiveRange *OldLR =
433 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
434 VNInfo *OldValNo = OldLR->valno;
435 SlotIndex DefIndex = OldValNo->def.getDefIndex();
437 // Delete the previous value, which should be short and continuous,
438 // because the 2-addr copy must be in the same MBB as the redef.
439 interval.removeRange(DefIndex, RedefIndex);
441 // The new value number (#1) is defined by the instruction we claimed
443 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
444 false, // update at *
446 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
448 // Value#0 is now defined by the 2-addr instruction.
449 OldValNo->def = RedefIndex;
450 OldValNo->setCopy(0);
452 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
453 if (PartReDef && mi->isCopyLike())
454 OldValNo->setCopy(&*mi);
456 // Add the new live interval which replaces the range for the input copy.
457 LiveRange LR(DefIndex, RedefIndex, ValNo);
458 DEBUG(dbgs() << " replace range with " << LR);
459 interval.addRange(LR);
461 // If this redefinition is dead, we need to add a dummy unit live
462 // range covering the def slot.
464 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
468 dbgs() << " RESULT: ";
469 interval.print(dbgs(), tri_);
471 } else if (lv_->isPHIJoin(interval.reg)) {
472 // In the case of PHI elimination, each variable definition is only
473 // live until the end of the block. We've already taken care of the
474 // rest of the live range.
476 SlotIndex defIndex = MIIdx.getDefIndex();
477 if (MO.isEarlyClobber())
478 defIndex = MIIdx.getUseIndex();
481 MachineInstr *CopyMI = NULL;
482 if (mi->isCopyLike())
484 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
486 SlotIndex killIndex = getMBBEndIdx(mbb);
487 LiveRange LR(defIndex, killIndex, ValNo);
488 interval.addRange(LR);
489 ValNo->setHasPHIKill(true);
490 DEBUG(dbgs() << " phi-join +" << LR);
492 llvm_unreachable("Multiply defined register");
496 DEBUG(dbgs() << '\n');
499 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
500 MachineBasicBlock::iterator mi,
503 LiveInterval &interval,
504 MachineInstr *CopyMI) {
505 // A physical register cannot be live across basic block, so its
506 // lifetime must end somewhere in its defining basic block.
508 dbgs() << "\t\tregister: ";
509 printRegName(interval.reg, tri_);
512 SlotIndex baseIndex = MIIdx;
513 SlotIndex start = baseIndex.getDefIndex();
514 // Earlyclobbers move back one.
515 if (MO.isEarlyClobber())
516 start = MIIdx.getUseIndex();
517 SlotIndex end = start;
519 // If it is not used after definition, it is considered dead at
520 // the instruction defining it. Hence its interval is:
521 // [defSlot(def), defSlot(def)+1)
522 // For earlyclobbers, the defSlot was pushed back one; the extra
523 // advance below compensates.
525 DEBUG(dbgs() << " dead");
526 end = start.getStoreIndex();
530 // If it is not dead on definition, it must be killed by a
531 // subsequent instruction. Hence its interval is:
532 // [defSlot(def), useSlot(kill)+1)
533 baseIndex = baseIndex.getNextIndex();
534 while (++mi != MBB->end()) {
536 if (mi->isDebugValue())
538 if (getInstructionFromIndex(baseIndex) == 0)
539 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
541 if (mi->killsRegister(interval.reg, tri_)) {
542 DEBUG(dbgs() << " killed");
543 end = baseIndex.getDefIndex();
546 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
548 if (mi->isRegTiedToUseOperand(DefIdx)) {
549 // Two-address instruction.
550 end = baseIndex.getDefIndex();
552 // Another instruction redefines the register before it is ever read.
553 // Then the register is essentially dead at the instruction that
554 // defines it. Hence its interval is:
555 // [defSlot(def), defSlot(def)+1)
556 DEBUG(dbgs() << " dead");
557 end = start.getStoreIndex();
563 baseIndex = baseIndex.getNextIndex();
566 // The only case we should have a dead physreg here without a killing or
567 // instruction where we know it's dead is if it is live-in to the function
568 // and never used. Another possible case is the implicit use of the
569 // physical register has been deleted by two-address pass.
570 end = start.getStoreIndex();
573 assert(start < end && "did not find end of interval?");
575 // Already exists? Extend old live interval.
576 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
577 bool Extend = OldLR != interval.end();
578 VNInfo *ValNo = Extend
579 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
580 if (MO.isEarlyClobber() && Extend)
581 ValNo->setHasRedefByEC(true);
582 LiveRange LR(start, end, ValNo);
583 interval.addRange(LR);
584 DEBUG(dbgs() << " +" << LR << '\n');
587 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
588 MachineBasicBlock::iterator MI,
592 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
593 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
594 getOrCreateInterval(MO.getReg()));
595 else if (allocatableRegs_[MO.getReg()]) {
596 MachineInstr *CopyMI = NULL;
597 if (MI->isCopyLike())
599 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
600 getOrCreateInterval(MO.getReg()), CopyMI);
601 // Def of a register also defines its sub-registers.
602 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
603 // If MI also modifies the sub-register explicitly, avoid processing it
604 // more than once. Do not pass in TRI here so it checks for exact match.
605 if (!MI->definesRegister(*AS))
606 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
607 getOrCreateInterval(*AS), 0);
611 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
613 LiveInterval &interval, bool isAlias) {
615 dbgs() << "\t\tlivein register: ";
616 printRegName(interval.reg, tri_);
619 // Look for kills, if it reaches a def before it's killed, then it shouldn't
620 // be considered a livein.
621 MachineBasicBlock::iterator mi = MBB->begin();
622 MachineBasicBlock::iterator E = MBB->end();
623 // Skip over DBG_VALUE at the start of the MBB.
624 if (mi != E && mi->isDebugValue()) {
625 while (++mi != E && mi->isDebugValue())
628 // MBB is empty except for DBG_VALUE's.
632 SlotIndex baseIndex = MIIdx;
633 SlotIndex start = baseIndex;
634 if (getInstructionFromIndex(baseIndex) == 0)
635 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
637 SlotIndex end = baseIndex;
638 bool SeenDefUse = false;
641 if (mi->killsRegister(interval.reg, tri_)) {
642 DEBUG(dbgs() << " killed");
643 end = baseIndex.getDefIndex();
646 } else if (mi->definesRegister(interval.reg, tri_)) {
647 // Another instruction redefines the register before it is ever read.
648 // Then the register is essentially dead at the instruction that defines
649 // it. Hence its interval is:
650 // [defSlot(def), defSlot(def)+1)
651 DEBUG(dbgs() << " dead");
652 end = start.getStoreIndex();
657 while (++mi != E && mi->isDebugValue())
658 // Skip over DBG_VALUE.
661 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
664 // Live-in register might not be used at all.
667 DEBUG(dbgs() << " dead");
668 end = MIIdx.getStoreIndex();
670 DEBUG(dbgs() << " live through");
676 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
677 0, false, VNInfoAllocator);
678 vni->setIsPHIDef(true);
679 LiveRange LR(start, end, vni);
681 interval.addRange(LR);
682 DEBUG(dbgs() << " +" << LR << '\n');
685 /// computeIntervals - computes the live intervals for virtual
686 /// registers. for some ordering of the machine instructions [1,N] a
687 /// live interval is an interval [i, j) where 1 <= i <= j < N for
688 /// which a variable is live
689 void LiveIntervals::computeIntervals() {
690 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
691 << "********** Function: "
692 << ((Value*)mf_->getFunction())->getName() << '\n');
694 SmallVector<unsigned, 8> UndefUses;
695 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
697 MachineBasicBlock *MBB = MBBI;
701 // Track the index of the current machine instr.
702 SlotIndex MIIndex = getMBBStartIdx(MBB);
703 DEBUG(dbgs() << "BB#" << MBB->getNumber()
704 << ":\t\t# derived from " << MBB->getName() << "\n");
706 // Create intervals for live-ins to this BB first.
707 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
708 LE = MBB->livein_end(); LI != LE; ++LI) {
709 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
710 // Multiple live-ins can alias the same register.
711 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
712 if (!hasInterval(*AS))
713 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
717 // Skip over empty initial indices.
718 if (getInstructionFromIndex(MIIndex) == 0)
719 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
721 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
723 DEBUG(dbgs() << MIIndex << "\t" << *MI);
724 if (MI->isDebugValue())
728 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
729 MachineOperand &MO = MI->getOperand(i);
730 if (!MO.isReg() || !MO.getReg())
733 // handle register defs - build intervals
735 handleRegisterDef(MBB, MI, MIIndex, MO, i);
736 else if (MO.isUndef())
737 UndefUses.push_back(MO.getReg());
740 // Move to the next instr slot.
741 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
745 // Create empty intervals for registers defined by implicit_def's (except
746 // for those implicit_def that define values which are liveout of their
748 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
749 unsigned UndefReg = UndefUses[i];
750 (void)getOrCreateInterval(UndefReg);
754 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
755 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
756 return new LiveInterval(reg, Weight);
759 /// dupInterval - Duplicate a live interval. The caller is responsible for
760 /// managing the allocated memory.
761 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
762 LiveInterval *NewLI = createInterval(li->reg);
763 NewLI->Copy(*li, mri_, getVNInfoAllocator());
767 //===----------------------------------------------------------------------===//
768 // Register allocator hooks.
771 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
772 /// allow one) virtual register operand, then its uses are implicitly using
773 /// the register. Returns the virtual register.
774 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
775 MachineInstr *MI) const {
777 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
778 MachineOperand &MO = MI->getOperand(i);
779 if (!MO.isReg() || !MO.isUse())
781 unsigned Reg = MO.getReg();
782 if (Reg == 0 || Reg == li.reg)
785 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
786 !allocatableRegs_[Reg])
788 // FIXME: For now, only remat MI with at most one register operand.
790 "Can't rematerialize instruction with multiple register operand!");
799 /// isValNoAvailableAt - Return true if the val# of the specified interval
800 /// which reaches the given instruction also reaches the specified use index.
801 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
802 SlotIndex UseIdx) const {
803 SlotIndex Index = getInstructionIndex(MI);
804 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
805 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
806 return UI != li.end() && UI->valno == ValNo;
809 /// isReMaterializable - Returns true if the definition MI of the specified
810 /// val# of the specified interval is re-materializable.
811 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
812 const VNInfo *ValNo, MachineInstr *MI,
813 SmallVectorImpl<LiveInterval*> &SpillIs,
818 if (!tii_->isTriviallyReMaterializable(MI, aa_))
821 // Target-specific code can mark an instruction as being rematerializable
822 // if it has one virtual reg use, though it had better be something like
823 // a PIC base register which is likely to be live everywhere.
824 unsigned ImpUse = getReMatImplicitUse(li, MI);
826 const LiveInterval &ImpLi = getInterval(ImpUse);
827 for (MachineRegisterInfo::use_nodbg_iterator
828 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
830 MachineInstr *UseMI = &*ri;
831 SlotIndex UseIdx = getInstructionIndex(UseMI);
832 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
834 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
838 // If a register operand of the re-materialized instruction is going to
839 // be spilled next, then it's not legal to re-materialize this instruction.
840 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
841 if (ImpUse == SpillIs[i]->reg)
847 /// isReMaterializable - Returns true if the definition MI of the specified
848 /// val# of the specified interval is re-materializable.
849 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
850 const VNInfo *ValNo, MachineInstr *MI) {
851 SmallVector<LiveInterval*, 4> Dummy1;
853 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
856 /// isReMaterializable - Returns true if every definition of MI of every
857 /// val# of the specified interval is re-materializable.
858 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
859 SmallVectorImpl<LiveInterval*> &SpillIs,
862 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
864 const VNInfo *VNI = *i;
866 continue; // Dead val#.
867 // Is the def for the val# rematerializable?
868 if (!VNI->isDefAccurate())
870 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
871 bool DefIsLoad = false;
873 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
880 /// FilterFoldedOps - Filter out two-address use operands. Return
881 /// true if it finds any issue with the operands that ought to prevent
883 static bool FilterFoldedOps(MachineInstr *MI,
884 SmallVector<unsigned, 2> &Ops,
886 SmallVector<unsigned, 2> &FoldOps) {
888 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
889 unsigned OpIdx = Ops[i];
890 MachineOperand &MO = MI->getOperand(OpIdx);
891 // FIXME: fold subreg use.
895 MRInfo |= (unsigned)VirtRegMap::isMod;
897 // Filter out two-address use operand(s).
898 if (MI->isRegTiedToDefOperand(OpIdx)) {
899 MRInfo = VirtRegMap::isModRef;
902 MRInfo |= (unsigned)VirtRegMap::isRef;
904 FoldOps.push_back(OpIdx);
910 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
911 /// slot / to reg or any rematerialized load into ith operand of specified
912 /// MI. If it is successul, MI is updated with the newly created MI and
914 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
915 VirtRegMap &vrm, MachineInstr *DefMI,
917 SmallVector<unsigned, 2> &Ops,
918 bool isSS, int Slot, unsigned Reg) {
919 // If it is an implicit def instruction, just delete it.
920 if (MI->isImplicitDef()) {
921 RemoveMachineInstrFromMaps(MI);
922 vrm.RemoveMachineInstrFromMaps(MI);
923 MI->eraseFromParent();
928 // Filter the list of operand indexes that are to be folded. Abort if
929 // any operand will prevent folding.
931 SmallVector<unsigned, 2> FoldOps;
932 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
935 // The only time it's safe to fold into a two address instruction is when
936 // it's folding reload and spill from / into a spill stack slot.
937 if (DefMI && (MRInfo & VirtRegMap::isMod))
940 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
941 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
943 // Remember this instruction uses the spill slot.
944 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
946 // Attempt to fold the memory reference into the instruction. If
947 // we can do this, we don't need to insert spill code.
948 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
949 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
950 vrm.transferSpillPts(MI, fmi);
951 vrm.transferRestorePts(MI, fmi);
952 vrm.transferEmergencySpills(MI, fmi);
953 ReplaceMachineInstrInMaps(MI, fmi);
954 MI->eraseFromParent();
962 /// canFoldMemoryOperand - Returns true if the specified load / store
963 /// folding is possible.
964 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
965 SmallVector<unsigned, 2> &Ops,
967 // Filter the list of operand indexes that are to be folded. Abort if
968 // any operand will prevent folding.
970 SmallVector<unsigned, 2> FoldOps;
971 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
974 // It's only legal to remat for a use, not a def.
975 if (ReMat && (MRInfo & VirtRegMap::isMod))
978 return tii_->canFoldMemoryOperand(MI, FoldOps);
981 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
982 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
984 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
989 for (++itr; itr != li.ranges.end(); ++itr) {
990 MachineBasicBlock *mbb2 =
991 indexes_->getMBBCoveringRange(itr->start, itr->end);
1000 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1001 /// interval on to-be re-materialized operands of MI) with new register.
1002 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1003 MachineInstr *MI, unsigned NewVReg,
1005 // There is an implicit use. That means one of the other operand is
1006 // being remat'ed and the remat'ed instruction has li.reg as an
1007 // use operand. Make sure we rewrite that as well.
1008 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1009 MachineOperand &MO = MI->getOperand(i);
1012 unsigned Reg = MO.getReg();
1013 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1015 if (!vrm.isReMaterialized(Reg))
1017 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1018 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1020 UseMO->setReg(NewVReg);
1024 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1025 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1026 bool LiveIntervals::
1027 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1028 bool TrySplit, SlotIndex index, SlotIndex end,
1030 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1031 unsigned Slot, int LdSlot,
1032 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1034 const TargetRegisterClass* rc,
1035 SmallVector<int, 4> &ReMatIds,
1036 const MachineLoopInfo *loopInfo,
1037 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1038 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1039 std::vector<LiveInterval*> &NewLIs) {
1040 bool CanFold = false;
1042 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1043 MachineOperand& mop = MI->getOperand(i);
1046 unsigned Reg = mop.getReg();
1047 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1052 bool TryFold = !DefIsReMat;
1053 bool FoldSS = true; // Default behavior unless it's a remat.
1054 int FoldSlot = Slot;
1056 // If this is the rematerializable definition MI itself and
1057 // all of its uses are rematerialized, simply delete it.
1058 if (MI == ReMatOrigDefMI && CanDelete) {
1059 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1061 RemoveMachineInstrFromMaps(MI);
1062 vrm.RemoveMachineInstrFromMaps(MI);
1063 MI->eraseFromParent();
1067 // If def for this use can't be rematerialized, then try folding.
1068 // If def is rematerializable and it's a load, also try folding.
1069 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1071 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1077 // Scan all of the operands of this instruction rewriting operands
1078 // to use NewVReg instead of li.reg as appropriate. We do this for
1081 // 1. If the instr reads the same spilled vreg multiple times, we
1082 // want to reuse the NewVReg.
1083 // 2. If the instr is a two-addr instruction, we are required to
1084 // keep the src/dst regs pinned.
1086 // Keep track of whether we replace a use and/or def so that we can
1087 // create the spill interval with the appropriate range.
1088 SmallVector<unsigned, 2> Ops;
1089 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1091 // Create a new virtual register for the spill interval.
1092 // Create the new register now so we can map the fold instruction
1093 // to the new register so when it is unfolded we get the correct
1095 bool CreatedNewVReg = false;
1097 NewVReg = mri_->createVirtualRegister(rc);
1099 CreatedNewVReg = true;
1101 // The new virtual register should get the same allocation hints as the
1103 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1104 if (Hint.first || Hint.second)
1105 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1111 // Do not fold load / store here if we are splitting. We'll find an
1112 // optimal point to insert a load / store later.
1114 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1115 Ops, FoldSS, FoldSlot, NewVReg)) {
1116 // Folding the load/store can completely change the instruction in
1117 // unpredictable ways, rescan it from the beginning.
1120 // We need to give the new vreg the same stack slot as the
1121 // spilled interval.
1122 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1128 if (isNotInMIMap(MI))
1130 goto RestartInstruction;
1133 // We'll try to fold it later if it's profitable.
1134 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1138 mop.setReg(NewVReg);
1139 if (mop.isImplicit())
1140 rewriteImplicitOps(li, MI, NewVReg, vrm);
1142 // Reuse NewVReg for other reads.
1143 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1144 MachineOperand &mopj = MI->getOperand(Ops[j]);
1145 mopj.setReg(NewVReg);
1146 if (mopj.isImplicit())
1147 rewriteImplicitOps(li, MI, NewVReg, vrm);
1150 if (CreatedNewVReg) {
1152 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1153 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1154 // Each valnum may have its own remat id.
1155 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1157 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1159 if (!CanDelete || (HasUse && HasDef)) {
1160 // If this is a two-addr instruction then its use operands are
1161 // rematerializable but its def is not. It should be assigned a
1163 vrm.assignVirt2StackSlot(NewVReg, Slot);
1166 vrm.assignVirt2StackSlot(NewVReg, Slot);
1168 } else if (HasUse && HasDef &&
1169 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1170 // If this interval hasn't been assigned a stack slot (because earlier
1171 // def is a deleted remat def), do it now.
1172 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1173 vrm.assignVirt2StackSlot(NewVReg, Slot);
1176 // Re-matting an instruction with virtual register use. Add the
1177 // register as an implicit use on the use MI.
1178 if (DefIsReMat && ImpUse)
1179 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1181 // Create a new register interval for this spill / remat.
1182 LiveInterval &nI = getOrCreateInterval(NewVReg);
1183 if (CreatedNewVReg) {
1184 NewLIs.push_back(&nI);
1185 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1187 vrm.setIsSplitFromReg(NewVReg, li.reg);
1191 if (CreatedNewVReg) {
1192 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1193 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1194 DEBUG(dbgs() << " +" << LR);
1197 // Extend the split live interval to this def / use.
1198 SlotIndex End = index.getDefIndex();
1199 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1200 nI.getValNumInfo(nI.getNumValNums()-1));
1201 DEBUG(dbgs() << " +" << LR);
1206 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1207 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1208 DEBUG(dbgs() << " +" << LR);
1213 dbgs() << "\t\t\t\tAdded new interval: ";
1214 nI.print(dbgs(), tri_);
1220 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1222 MachineBasicBlock *MBB,
1223 SlotIndex Idx) const {
1224 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1227 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1228 /// during spilling.
1230 struct RewriteInfo {
1233 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1236 struct RewriteInfoCompare {
1237 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1238 return LHS.Index < RHS.Index;
1243 void LiveIntervals::
1244 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1245 LiveInterval::Ranges::const_iterator &I,
1246 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1247 unsigned Slot, int LdSlot,
1248 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1250 const TargetRegisterClass* rc,
1251 SmallVector<int, 4> &ReMatIds,
1252 const MachineLoopInfo *loopInfo,
1253 BitVector &SpillMBBs,
1254 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1255 BitVector &RestoreMBBs,
1256 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1257 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1258 std::vector<LiveInterval*> &NewLIs) {
1259 bool AllCanFold = true;
1260 unsigned NewVReg = 0;
1261 SlotIndex start = I->start.getBaseIndex();
1262 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1264 // First collect all the def / use in this live range that will be rewritten.
1265 // Make sure they are sorted according to instruction index.
1266 std::vector<RewriteInfo> RewriteMIs;
1267 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1268 re = mri_->reg_end(); ri != re; ) {
1269 MachineInstr *MI = &*ri;
1270 MachineOperand &O = ri.getOperand();
1272 if (MI->isDebugValue()) {
1273 // Modify DBG_VALUE now that the value is in a spill slot.
1274 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1275 uint64_t Offset = MI->getOperand(1).getImm();
1276 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1277 DebugLoc DL = MI->getDebugLoc();
1278 int FI = isLoadSS ? LdSlot : (int)Slot;
1279 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1280 Offset, MDPtr, DL)) {
1281 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1282 ReplaceMachineInstrInMaps(MI, NewDV);
1283 MachineBasicBlock *MBB = MI->getParent();
1284 MBB->insert(MBB->erase(MI), NewDV);
1289 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1290 RemoveMachineInstrFromMaps(MI);
1291 vrm.RemoveMachineInstrFromMaps(MI);
1292 MI->eraseFromParent();
1295 assert(!(O.isImplicit() && O.isUse()) &&
1296 "Spilling register that's used as implicit use?");
1297 SlotIndex index = getInstructionIndex(MI);
1298 if (index < start || index >= end)
1302 // Must be defined by an implicit def. It should not be spilled. Note,
1303 // this is for correctness reason. e.g.
1304 // 8 %reg1024<def> = IMPLICIT_DEF
1305 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1306 // The live range [12, 14) are not part of the r1024 live interval since
1307 // it's defined by an implicit def. It will not conflicts with live
1308 // interval of r1025. Now suppose both registers are spilled, you can
1309 // easily see a situation where both registers are reloaded before
1310 // the INSERT_SUBREG and both target registers that would overlap.
1312 RewriteMIs.push_back(RewriteInfo(index, MI));
1314 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1316 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1317 // Now rewrite the defs and uses.
1318 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1319 RewriteInfo &rwi = RewriteMIs[i];
1321 SlotIndex index = rwi.Index;
1322 MachineInstr *MI = rwi.MI;
1323 // If MI def and/or use the same register multiple times, then there
1324 // are multiple entries.
1325 while (i != e && RewriteMIs[i].MI == MI) {
1326 assert(RewriteMIs[i].Index == index);
1329 MachineBasicBlock *MBB = MI->getParent();
1331 if (ImpUse && MI != ReMatDefMI) {
1332 // Re-matting an instruction with virtual register use. Prevent interval
1333 // from being spilled.
1334 getInterval(ImpUse).markNotSpillable();
1337 unsigned MBBId = MBB->getNumber();
1338 unsigned ThisVReg = 0;
1340 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1341 if (NVI != MBBVRegsMap.end()) {
1342 ThisVReg = NVI->second;
1349 // It's better to start a new interval to avoid artifically
1350 // extend the new interval.
1351 if (MI->readsWritesVirtualRegister(li.reg) ==
1352 std::make_pair(false,true)) {
1353 MBBVRegsMap.erase(MBB->getNumber());
1359 bool IsNew = ThisVReg == 0;
1361 // This ends the previous live interval. If all of its def / use
1362 // can be folded, give it a low spill weight.
1363 if (NewVReg && TrySplit && AllCanFold) {
1364 LiveInterval &nI = getOrCreateInterval(NewVReg);
1371 bool HasDef = false;
1372 bool HasUse = false;
1373 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1374 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1375 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1376 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1377 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1378 if (!HasDef && !HasUse)
1381 AllCanFold &= CanFold;
1383 // Update weight of spill interval.
1384 LiveInterval &nI = getOrCreateInterval(NewVReg);
1386 // The spill weight is now infinity as it cannot be spilled again.
1387 nI.markNotSpillable();
1391 // Keep track of the last def and first use in each MBB.
1393 if (MI != ReMatOrigDefMI || !CanDelete) {
1394 bool HasKill = false;
1396 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1398 // If this is a two-address code, then this index starts a new VNInfo.
1399 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1401 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1403 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1404 SpillIdxes.find(MBBId);
1406 if (SII == SpillIdxes.end()) {
1407 std::vector<SRInfo> S;
1408 S.push_back(SRInfo(index, NewVReg, true));
1409 SpillIdxes.insert(std::make_pair(MBBId, S));
1410 } else if (SII->second.back().vreg != NewVReg) {
1411 SII->second.push_back(SRInfo(index, NewVReg, true));
1412 } else if (index > SII->second.back().index) {
1413 // If there is an earlier def and this is a two-address
1414 // instruction, then it's not possible to fold the store (which
1415 // would also fold the load).
1416 SRInfo &Info = SII->second.back();
1418 Info.canFold = !HasUse;
1420 SpillMBBs.set(MBBId);
1421 } else if (SII != SpillIdxes.end() &&
1422 SII->second.back().vreg == NewVReg &&
1423 index > SII->second.back().index) {
1424 // There is an earlier def that's not killed (must be two-address).
1425 // The spill is no longer needed.
1426 SII->second.pop_back();
1427 if (SII->second.empty()) {
1428 SpillIdxes.erase(MBBId);
1429 SpillMBBs.reset(MBBId);
1436 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1437 SpillIdxes.find(MBBId);
1438 if (SII != SpillIdxes.end() &&
1439 SII->second.back().vreg == NewVReg &&
1440 index > SII->second.back().index)
1441 // Use(s) following the last def, it's not safe to fold the spill.
1442 SII->second.back().canFold = false;
1443 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1444 RestoreIdxes.find(MBBId);
1445 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1446 // If we are splitting live intervals, only fold if it's the first
1447 // use and there isn't another use later in the MBB.
1448 RII->second.back().canFold = false;
1450 // Only need a reload if there isn't an earlier def / use.
1451 if (RII == RestoreIdxes.end()) {
1452 std::vector<SRInfo> Infos;
1453 Infos.push_back(SRInfo(index, NewVReg, true));
1454 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1456 RII->second.push_back(SRInfo(index, NewVReg, true));
1458 RestoreMBBs.set(MBBId);
1462 // Update spill weight.
1463 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1464 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1467 if (NewVReg && TrySplit && AllCanFold) {
1468 // If all of its def / use can be folded, give it a low spill weight.
1469 LiveInterval &nI = getOrCreateInterval(NewVReg);
1474 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1475 unsigned vr, BitVector &RestoreMBBs,
1476 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1477 if (!RestoreMBBs[Id])
1479 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1480 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1481 if (Restores[i].index == index &&
1482 Restores[i].vreg == vr &&
1483 Restores[i].canFold)
1488 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1489 unsigned vr, BitVector &RestoreMBBs,
1490 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1491 if (!RestoreMBBs[Id])
1493 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1494 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1495 if (Restores[i].index == index && Restores[i].vreg)
1496 Restores[i].index = SlotIndex();
1499 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1500 /// spilled and create empty intervals for their uses.
1502 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1503 const TargetRegisterClass* rc,
1504 std::vector<LiveInterval*> &NewLIs) {
1505 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1506 re = mri_->reg_end(); ri != re; ) {
1507 MachineOperand &O = ri.getOperand();
1508 MachineInstr *MI = &*ri;
1510 if (MI->isDebugValue()) {
1511 // Remove debug info for now.
1513 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1517 assert(MI->isImplicitDef() &&
1518 "Register def was not rewritten?");
1519 RemoveMachineInstrFromMaps(MI);
1520 vrm.RemoveMachineInstrFromMaps(MI);
1521 MI->eraseFromParent();
1523 // This must be an use of an implicit_def so it's not part of the live
1524 // interval. Create a new empty live interval for it.
1525 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1526 unsigned NewVReg = mri_->createVirtualRegister(rc);
1528 vrm.setIsImplicitlyDefined(NewVReg);
1529 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1530 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1531 MachineOperand &MO = MI->getOperand(i);
1532 if (MO.isReg() && MO.getReg() == li.reg) {
1542 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1543 // Limit the loop depth ridiculousness.
1544 if (loopDepth > 200)
1547 // The loop depth is used to roughly estimate the number of times the
1548 // instruction is executed. Something like 10^d is simple, but will quickly
1549 // overflow a float. This expression behaves like 10^d for small d, but is
1550 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1551 // headroom before overflow.
1552 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1554 return (isDef + isUse) * lc;
1558 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1559 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1560 normalizeSpillWeight(*NewLIs[i]);
1563 std::vector<LiveInterval*> LiveIntervals::
1564 addIntervalsForSpills(const LiveInterval &li,
1565 SmallVectorImpl<LiveInterval*> &SpillIs,
1566 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1567 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1570 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1571 li.print(dbgs(), tri_);
1575 // Each bit specify whether a spill is required in the MBB.
1576 BitVector SpillMBBs(mf_->getNumBlockIDs());
1577 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1578 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1579 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1580 DenseMap<unsigned,unsigned> MBBVRegsMap;
1581 std::vector<LiveInterval*> NewLIs;
1582 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1584 unsigned NumValNums = li.getNumValNums();
1585 SmallVector<MachineInstr*, 4> ReMatDefs;
1586 ReMatDefs.resize(NumValNums, NULL);
1587 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1588 ReMatOrigDefs.resize(NumValNums, NULL);
1589 SmallVector<int, 4> ReMatIds;
1590 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1591 BitVector ReMatDelete(NumValNums);
1592 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1594 // Spilling a split live interval. It cannot be split any further. Also,
1595 // it's also guaranteed to be a single val# / range interval.
1596 if (vrm.getPreSplitReg(li.reg)) {
1597 vrm.setIsSplitFromReg(li.reg, 0);
1598 // Unset the split kill marker on the last use.
1599 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1600 if (KillIdx != SlotIndex()) {
1601 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1602 assert(KillMI && "Last use disappeared?");
1603 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1604 assert(KillOp != -1 && "Last use disappeared?");
1605 KillMI->getOperand(KillOp).setIsKill(false);
1607 vrm.removeKillPoint(li.reg);
1608 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1609 Slot = vrm.getStackSlot(li.reg);
1610 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1611 MachineInstr *ReMatDefMI = DefIsReMat ?
1612 vrm.getReMaterializedMI(li.reg) : NULL;
1614 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1615 bool isLoad = isLoadSS ||
1616 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1617 bool IsFirstRange = true;
1618 for (LiveInterval::Ranges::const_iterator
1619 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1620 // If this is a split live interval with multiple ranges, it means there
1621 // are two-address instructions that re-defined the value. Only the
1622 // first def can be rematerialized!
1624 // Note ReMatOrigDefMI has already been deleted.
1625 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1626 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1627 false, vrm, rc, ReMatIds, loopInfo,
1628 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1629 MBBVRegsMap, NewLIs);
1631 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1632 Slot, 0, false, false, false,
1633 false, vrm, rc, ReMatIds, loopInfo,
1634 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1635 MBBVRegsMap, NewLIs);
1637 IsFirstRange = false;
1640 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1641 normalizeSpillWeights(NewLIs);
1645 bool TrySplit = !intervalIsInOneMBB(li);
1648 bool NeedStackSlot = false;
1649 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1651 const VNInfo *VNI = *i;
1652 unsigned VN = VNI->id;
1653 if (VNI->isUnused())
1654 continue; // Dead val#.
1655 // Is the def for the val# rematerializable?
1656 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1657 ? getInstructionFromIndex(VNI->def) : 0;
1659 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1660 // Remember how to remat the def of this val#.
1661 ReMatOrigDefs[VN] = ReMatDefMI;
1662 // Original def may be modified so we have to make a copy here.
1663 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1664 CloneMIs.push_back(Clone);
1665 ReMatDefs[VN] = Clone;
1667 bool CanDelete = true;
1668 if (VNI->hasPHIKill()) {
1669 // A kill is a phi node, not all of its uses can be rematerialized.
1670 // It must not be deleted.
1672 // Need a stack slot if there is any live range where uses cannot be
1674 NeedStackSlot = true;
1677 ReMatDelete.set(VN);
1679 // Need a stack slot if there is any live range where uses cannot be
1681 NeedStackSlot = true;
1685 // One stack slot per live interval.
1686 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1687 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1688 Slot = vrm.assignVirt2StackSlot(li.reg);
1690 // This case only occurs when the prealloc splitter has already assigned
1691 // a stack slot to this vreg.
1693 Slot = vrm.getStackSlot(li.reg);
1696 // Create new intervals and rewrite defs and uses.
1697 for (LiveInterval::Ranges::const_iterator
1698 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1699 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1700 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1701 bool DefIsReMat = ReMatDefMI != NULL;
1702 bool CanDelete = ReMatDelete[I->valno->id];
1704 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1705 bool isLoad = isLoadSS ||
1706 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1707 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1708 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1709 CanDelete, vrm, rc, ReMatIds, loopInfo,
1710 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1711 MBBVRegsMap, NewLIs);
1714 // Insert spills / restores if we are splitting.
1716 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1717 normalizeSpillWeights(NewLIs);
1721 SmallPtrSet<LiveInterval*, 4> AddedKill;
1722 SmallVector<unsigned, 2> Ops;
1723 if (NeedStackSlot) {
1724 int Id = SpillMBBs.find_first();
1726 std::vector<SRInfo> &spills = SpillIdxes[Id];
1727 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1728 SlotIndex index = spills[i].index;
1729 unsigned VReg = spills[i].vreg;
1730 LiveInterval &nI = getOrCreateInterval(VReg);
1731 bool isReMat = vrm.isReMaterialized(VReg);
1732 MachineInstr *MI = getInstructionFromIndex(index);
1733 bool CanFold = false;
1734 bool FoundUse = false;
1736 if (spills[i].canFold) {
1738 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1739 MachineOperand &MO = MI->getOperand(j);
1740 if (!MO.isReg() || MO.getReg() != VReg)
1747 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1748 RestoreMBBs, RestoreIdxes))) {
1749 // MI has two-address uses of the same register. If the use
1750 // isn't the first and only use in the BB, then we can't fold
1751 // it. FIXME: Move this to rewriteInstructionsForSpills.
1758 // Fold the store into the def if possible.
1759 bool Folded = false;
1760 if (CanFold && !Ops.empty()) {
1761 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1764 // Also folded uses, do not issue a load.
1765 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1766 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1768 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1772 // Otherwise tell the spiller to issue a spill.
1774 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1775 bool isKill = LR->end == index.getStoreIndex();
1776 if (!MI->registerDefIsDead(nI.reg))
1777 // No need to spill a dead def.
1778 vrm.addSpillPoint(VReg, isKill, MI);
1780 AddedKill.insert(&nI);
1783 Id = SpillMBBs.find_next(Id);
1787 int Id = RestoreMBBs.find_first();
1789 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1790 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1791 SlotIndex index = restores[i].index;
1792 if (index == SlotIndex())
1794 unsigned VReg = restores[i].vreg;
1795 LiveInterval &nI = getOrCreateInterval(VReg);
1796 bool isReMat = vrm.isReMaterialized(VReg);
1797 MachineInstr *MI = getInstructionFromIndex(index);
1798 bool CanFold = false;
1800 if (restores[i].canFold) {
1802 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1803 MachineOperand &MO = MI->getOperand(j);
1804 if (!MO.isReg() || MO.getReg() != VReg)
1808 // If this restore were to be folded, it would have been folded
1817 // Fold the load into the use if possible.
1818 bool Folded = false;
1819 if (CanFold && !Ops.empty()) {
1821 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1823 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1825 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1826 // If the rematerializable def is a load, also try to fold it.
1827 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1828 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1829 Ops, isLoadSS, LdSlot, VReg);
1831 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1833 // Re-matting an instruction with virtual register use. Add the
1834 // register as an implicit use on the use MI and mark the register
1835 // interval as unspillable.
1836 LiveInterval &ImpLi = getInterval(ImpUse);
1837 ImpLi.markNotSpillable();
1838 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1843 // If folding is not possible / failed, then tell the spiller to issue a
1844 // load / rematerialization for us.
1846 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1848 vrm.addRestorePoint(VReg, MI);
1850 Id = RestoreMBBs.find_next(Id);
1853 // Finalize intervals: add kills, finalize spill weights, and filter out
1855 std::vector<LiveInterval*> RetNewLIs;
1856 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1857 LiveInterval *LI = NewLIs[i];
1859 if (!AddedKill.count(LI)) {
1860 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1861 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1862 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1863 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1864 assert(UseIdx != -1);
1865 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1866 LastUse->getOperand(UseIdx).setIsKill();
1867 vrm.addKillPoint(LI->reg, LastUseIdx);
1870 RetNewLIs.push_back(LI);
1874 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1875 normalizeSpillWeights(RetNewLIs);
1879 /// hasAllocatableSuperReg - Return true if the specified physical register has
1880 /// any super register that's allocatable.
1881 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1882 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1883 if (allocatableRegs_[*AS] && hasInterval(*AS))
1888 /// getRepresentativeReg - Find the largest super register of the specified
1889 /// physical register.
1890 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1891 // Find the largest super-register that is allocatable.
1892 unsigned BestReg = Reg;
1893 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1894 unsigned SuperReg = *AS;
1895 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1903 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1904 /// specified interval that conflicts with the specified physical register.
1905 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1906 unsigned PhysReg) const {
1907 unsigned NumConflicts = 0;
1908 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1909 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1910 E = mri_->reg_end(); I != E; ++I) {
1911 MachineOperand &O = I.getOperand();
1912 MachineInstr *MI = O.getParent();
1913 if (MI->isDebugValue())
1915 SlotIndex Index = getInstructionIndex(MI);
1916 if (pli.liveAt(Index))
1919 return NumConflicts;
1922 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1923 /// around all defs and uses of the specified interval. Return true if it
1924 /// was able to cut its interval.
1925 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1926 unsigned PhysReg, VirtRegMap &vrm) {
1927 unsigned SpillReg = getRepresentativeReg(PhysReg);
1929 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1930 // If there are registers which alias PhysReg, but which are not a
1931 // sub-register of the chosen representative super register. Assert
1932 // since we can't handle it yet.
1933 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1934 tri_->isSuperRegister(*AS, SpillReg));
1937 SmallVector<unsigned, 4> PRegs;
1938 if (hasInterval(SpillReg))
1939 PRegs.push_back(SpillReg);
1941 SmallSet<unsigned, 4> Added;
1942 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1943 if (Added.insert(*AS) && hasInterval(*AS)) {
1944 PRegs.push_back(*AS);
1945 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1950 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1951 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1952 E = mri_->reg_end(); I != E; ++I) {
1953 MachineOperand &O = I.getOperand();
1954 MachineInstr *MI = O.getParent();
1955 if (MI->isDebugValue() || SeenMIs.count(MI))
1958 SlotIndex Index = getInstructionIndex(MI);
1959 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1960 unsigned PReg = PRegs[i];
1961 LiveInterval &pli = getInterval(PReg);
1962 if (!pli.liveAt(Index))
1964 vrm.addEmergencySpill(PReg, MI);
1965 SlotIndex StartIdx = Index.getLoadIndex();
1966 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1967 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1968 pli.removeRange(StartIdx, EndIdx);
1972 raw_string_ostream Msg(msg);
1973 Msg << "Ran out of registers during register allocation!";
1974 if (MI->isInlineAsm()) {
1975 Msg << "\nPlease check your inline asm statement for invalid "
1976 << "constraints:\n";
1977 MI->print(Msg, tm_);
1979 report_fatal_error(Msg.str());
1981 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1982 if (!hasInterval(*AS))
1984 LiveInterval &spli = getInterval(*AS);
1985 if (spli.liveAt(Index))
1986 spli.removeRange(Index.getLoadIndex(),
1987 Index.getNextIndex().getBaseIndex());
1994 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1995 MachineInstr* startInst) {
1996 LiveInterval& Interval = getOrCreateInterval(reg);
1997 VNInfo* VN = Interval.getNextValue(
1998 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1999 startInst, true, getVNInfoAllocator());
2000 VN->setHasPHIKill(true);
2002 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2003 getMBBEndIdx(startInst->getParent()), VN);
2004 Interval.addRange(LR);