1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 MachineFunctionPass::getAnalysisUsage(AU);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90 E = r2iMap_.end(); I != E; ++I)
98 terminatorGaps.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!ClonedMIs.empty()) {
103 MachineInstr *MI = ClonedMIs.back();
104 ClonedMIs.pop_back();
105 mf_->DeleteMachineInstr(MI);
109 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110 const TargetInstrInfo *tii_) {
111 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
116 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
117 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
118 MI->getOperand(2).getReg() == Reg)
120 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
121 MI->getOperand(1).getReg() == Reg)
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
140 MachineInstr *MI = &*I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 ImpDefMIs.push_back(MI);
149 bool ChangedToImpDef = false;
150 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
151 MachineOperand& MO = MI->getOperand(i);
152 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
154 unsigned Reg = MO.getReg();
157 if (!ImpDefRegs.count(Reg))
159 // Use is a copy, just turn it into an implicit_def.
160 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
161 bool isKill = MO.isKill();
162 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
163 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
164 MI->RemoveOperand(j);
166 ImpDefRegs.erase(Reg);
167 ChangedToImpDef = true;
172 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
173 // Make sure other uses of
174 for (unsigned j = i+1; j != e; ++j) {
175 MachineOperand &MOJ = MI->getOperand(j);
176 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
179 ImpDefRegs.erase(Reg);
183 if (ChangedToImpDef) {
184 // Backtrack to process this new implicit_def.
187 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
188 MachineOperand& MO = MI->getOperand(i);
189 if (!MO.isReg() || !MO.isDef())
191 ImpDefRegs.erase(MO.getReg());
196 // Any outstanding liveout implicit_def's?
197 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
198 MachineInstr *MI = ImpDefMIs[i];
199 unsigned Reg = MI->getOperand(0).getReg();
200 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
201 !ImpDefRegs.count(Reg)) {
202 // Delete all "local" implicit_def's. That include those which define
203 // physical registers since they cannot be liveout.
204 MI->eraseFromParent();
208 // If there are multiple defs of the same register and at least one
209 // is not an implicit_def, do not insert implicit_def's before the
212 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
213 DE = mri_->def_end(); DI != DE; ++DI) {
214 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
222 // The only implicit_def which we want to keep are those that are live
224 MI->eraseFromParent();
226 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
227 UE = mri_->use_end(); UI != UE; ) {
228 MachineOperand &RMO = UI.getOperand();
229 MachineInstr *RMI = &*UI;
231 MachineBasicBlock *RMBB = RMI->getParent();
235 // Turn a copy use into an implicit_def.
236 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
237 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
239 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
240 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
241 RMI->RemoveOperand(j);
245 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
246 unsigned NewVReg = mri_->createVirtualRegister(RC);
258 void LiveIntervals::computeNumbering() {
259 Index2MiMap OldI2MI = i2miMap_;
260 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
266 terminatorGaps.clear();
270 // Number MachineInstrs and MachineBasicBlocks.
271 // Initialize MBB indexes to a sentinal.
272 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
273 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
275 MachineInstrIndex MIIndex;
276 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
278 MachineInstrIndex StartIdx = MIIndex;
280 // Insert an empty slot at the beginning of each block.
281 MIIndex = MIIndex.nextIndex();
282 i2miMap_.push_back(0);
284 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
287 if (I == MBB->getFirstTerminator()) {
288 // Leave a gap for before terminators, this is where we will point
290 MachineInstrIndex tGap(true, MIIndex);
292 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
294 "Multiple 'first' terminators encountered during numbering.");
295 inserted = inserted; // Avoid compiler warning if assertions turned off.
296 i2miMap_.push_back(0);
298 MIIndex = MIIndex.nextIndex();
301 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
302 assert(inserted && "multiple MachineInstr -> index mappings");
304 i2miMap_.push_back(I);
305 MIIndex = MIIndex.nextIndex();
308 // Insert max(1, numdefs) empty slots after every instruction.
309 unsigned Slots = I->getDesc().getNumDefs();
313 MIIndex = MIIndex.nextIndex();
314 i2miMap_.push_back(0);
319 if (MBB->getFirstTerminator() == MBB->end()) {
320 // Leave a gap for before terminators, this is where we will point
322 MachineInstrIndex tGap(true, MIIndex);
324 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
326 "Multiple 'first' terminators encountered during numbering.");
327 inserted = inserted; // Avoid compiler warning if assertions turned off.
328 i2miMap_.push_back(0);
330 MIIndex = MIIndex.nextIndex();
333 // Set the MBB2IdxMap entry for this MBB.
334 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex.prevSlot());
335 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
338 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
340 if (!OldI2MI.empty())
341 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
342 for (LiveInterval::iterator LI = OI->second->begin(),
343 LE = OI->second->end(); LI != LE; ++LI) {
345 // Remap the start index of the live range to the corresponding new
346 // number, or our best guess at what it _should_ correspond to if the
347 // original instruction has been erased. This is either the following
348 // instruction or its predecessor.
349 unsigned index = LI->start.getVecIndex();
350 MachineInstrIndex::Slot offset = LI->start.getSlot();
351 if (LI->start.isLoad()) {
352 std::vector<IdxMBBPair>::const_iterator I =
353 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
354 // Take the pair containing the index
355 std::vector<IdxMBBPair>::const_iterator J =
356 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
358 LI->start = getMBBStartIdx(J->second);
360 LI->start = MachineInstrIndex(
361 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
362 (MachineInstrIndex::Slot)offset);
365 // Remap the ending index in the same way that we remapped the start,
366 // except for the final step where we always map to the immediately
367 // following instruction.
368 index = (LI->end.prevSlot()).getVecIndex();
369 offset = LI->end.getSlot();
370 if (LI->end.isLoad()) {
371 // VReg dies at end of block.
372 std::vector<IdxMBBPair>::const_iterator I =
373 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
376 LI->end = getMBBEndIdx(I->second).nextSlot();
378 unsigned idx = index;
379 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
381 if (index != OldI2MI.size())
383 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
384 (idx == index ? offset : MachineInstrIndex::LOAD));
387 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
391 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
392 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
395 // Remap the VNInfo def index, which works the same as the
396 // start indices above. VN's with special sentinel defs
397 // don't need to be remapped.
398 if (vni->isDefAccurate() && !vni->isUnused()) {
399 unsigned index = vni->def.getVecIndex();
400 MachineInstrIndex::Slot offset = vni->def.getSlot();
401 if (vni->def.isLoad()) {
402 std::vector<IdxMBBPair>::const_iterator I =
403 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
404 // Take the pair containing the index
405 std::vector<IdxMBBPair>::const_iterator J =
406 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
408 vni->def = getMBBStartIdx(J->second);
410 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
414 // Remap the VNInfo kill indices, which works the same as
415 // the end indices above.
416 for (size_t i = 0; i < vni->kills.size(); ++i) {
417 unsigned index = vni->kills[i].prevSlot().getVecIndex();
418 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
420 if (vni->kills[i].isLoad()) {
421 assert("Value killed at a load slot.");
422 /*std::vector<IdxMBBPair>::const_iterator I =
423 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
426 vni->kills[i] = getMBBEndIdx(I->second);*/
428 if (vni->kills[i].isPHIIndex()) {
429 std::vector<IdxMBBPair>::const_iterator I =
430 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
432 vni->kills[i] = terminatorGaps[I->second];
434 assert(OldI2MI[index] != 0 &&
435 "Kill refers to instruction not present in index maps.");
436 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
440 unsigned idx = index;
441 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
443 if (index != OldI2MI.size())
444 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
445 (idx == index ? offset : 0);
447 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
455 void LiveIntervals::scaleNumbering(int factor) {
457 // * scale MBB begin and end points
458 // * scale all ranges.
459 // * Update VNI structures.
460 // * Scale instruction numberings
462 // Scale the MBB indices.
464 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
465 MBB != MBBE; ++MBB) {
466 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
467 mbbIndices.first = mbbIndices.first.scale(factor);
468 mbbIndices.second = mbbIndices.second.scale(factor);
469 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
471 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
473 // Scale terminator gaps.
474 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
475 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
477 terminatorGaps[TGI->first] = TGI->second.scale(factor);
480 // Scale the intervals.
481 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
482 LI->second->scaleNumbering(factor);
485 // Scale MachineInstrs.
486 Mi2IndexMap oldmi2iMap = mi2iMap_;
487 MachineInstrIndex highestSlot;
488 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
490 MachineInstrIndex newSlot = MI->second.scale(factor);
491 mi2iMap_[MI->first] = newSlot;
492 highestSlot = std::max(highestSlot, newSlot);
495 unsigned highestVIndex = highestSlot.getVecIndex();
497 i2miMap_.resize(highestVIndex + 1);
498 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
500 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
506 /// runOnMachineFunction - Register allocate the whole function
508 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
510 mri_ = &mf_->getRegInfo();
511 tm_ = &fn.getTarget();
512 tri_ = tm_->getRegisterInfo();
513 tii_ = tm_->getInstrInfo();
514 aa_ = &getAnalysis<AliasAnalysis>();
515 lv_ = &getAnalysis<LiveVariables>();
516 allocatableRegs_ = tri_->getAllocatableSet(fn);
518 processImplicitDefs();
522 numIntervals += getNumIntervals();
528 /// print - Implement the dump method.
529 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
530 OS << "********** INTERVALS **********\n";
531 for (const_iterator I = begin(), E = end(); I != E; ++I) {
532 I->second->print(OS, tri_);
536 OS << "********** MACHINEINSTRS **********\n";
538 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
539 mbbi != mbbe; ++mbbi) {
540 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
541 for (MachineBasicBlock::iterator mii = mbbi->begin(),
542 mie = mbbi->end(); mii != mie; ++mii) {
543 OS << getInstructionIndex(mii) << '\t' << *mii;
548 /// conflictsWithPhysRegDef - Returns true if the specified register
549 /// is defined during the duration of the specified interval.
550 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
551 VirtRegMap &vrm, unsigned reg) {
552 for (LiveInterval::Ranges::const_iterator
553 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
554 for (MachineInstrIndex index = getBaseIndex(I->start),
555 end = getBaseIndex(I->end.prevSlot()).nextIndex(); index != end;
556 index = index.nextIndex()) {
557 // skip deleted instructions
558 while (index != end && !getInstructionFromIndex(index))
559 index = index.nextIndex();
560 if (index == end) break;
562 MachineInstr *MI = getInstructionFromIndex(index);
563 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
564 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
565 if (SrcReg == li.reg || DstReg == li.reg)
567 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
568 MachineOperand& mop = MI->getOperand(i);
571 unsigned PhysReg = mop.getReg();
572 if (PhysReg == 0 || PhysReg == li.reg)
574 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
575 if (!vrm.hasPhys(PhysReg))
577 PhysReg = vrm.getPhys(PhysReg);
579 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
588 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
589 /// it can check use as well.
590 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
591 unsigned Reg, bool CheckUse,
592 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
593 for (LiveInterval::Ranges::const_iterator
594 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
595 for (MachineInstrIndex index = getBaseIndex(I->start),
596 end = getBaseIndex(I->end.prevSlot()).nextIndex(); index != end;
597 index = index.nextIndex()) {
598 // Skip deleted instructions.
599 MachineInstr *MI = 0;
600 while (index != end) {
601 MI = getInstructionFromIndex(index);
604 index = index.nextIndex();
606 if (index == end) break;
608 if (JoinedCopies.count(MI))
610 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
611 MachineOperand& MO = MI->getOperand(i);
614 if (MO.isUse() && !CheckUse)
616 unsigned PhysReg = MO.getReg();
617 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
619 if (tri_->isSubRegister(Reg, PhysReg))
629 void LiveIntervals::printRegName(unsigned reg) const {
630 if (TargetRegisterInfo::isPhysicalRegister(reg))
631 errs() << tri_->getName(reg);
633 errs() << "%reg" << reg;
636 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
637 MachineBasicBlock::iterator mi,
638 MachineInstrIndex MIIdx,
641 LiveInterval &interval) {
643 errs() << "\t\tregister: ";
644 printRegName(interval.reg);
647 // Virtual registers may be defined multiple times (due to phi
648 // elimination and 2-addr elimination). Much of what we do only has to be
649 // done once for the vreg. We use an empty interval to detect the first
650 // time we see a vreg.
651 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
652 if (interval.empty()) {
653 // Get the Idx of the defining instructions.
654 MachineInstrIndex defIndex = getDefIndex(MIIdx);
655 // Earlyclobbers move back one.
656 if (MO.isEarlyClobber())
657 defIndex = getUseIndex(MIIdx);
659 MachineInstr *CopyMI = NULL;
660 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
661 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
662 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
663 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
664 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
666 // Earlyclobbers move back one.
667 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
669 assert(ValNo->id == 0 && "First value in interval is not 0?");
671 // Loop over all of the blocks that the vreg is defined in. There are
672 // two cases we have to handle here. The most common case is a vreg
673 // whose lifetime is contained within a basic block. In this case there
674 // will be a single kill, in MBB, which comes after the definition.
675 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
676 // FIXME: what about dead vars?
677 MachineInstrIndex killIdx;
678 if (vi.Kills[0] != mi)
679 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0])).nextSlot();
681 killIdx = defIndex.nextSlot();
683 // If the kill happens after the definition, we have an intra-block
685 if (killIdx > defIndex) {
686 assert(vi.AliveBlocks.empty() &&
687 "Shouldn't be alive across any blocks!");
688 LiveRange LR(defIndex, killIdx, ValNo);
689 interval.addRange(LR);
690 DEBUG(errs() << " +" << LR << "\n");
691 ValNo->addKill(killIdx);
696 // The other case we handle is when a virtual register lives to the end
697 // of the defining block, potentially live across some blocks, then is
698 // live into some number of blocks, but gets killed. Start by adding a
699 // range that goes from this definition to the end of the defining block.
700 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).nextSlot(), ValNo);
701 DEBUG(errs() << " +" << NewLR);
702 interval.addRange(NewLR);
704 // Iterate over all of the blocks that the variable is completely
705 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
707 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
708 E = vi.AliveBlocks.end(); I != E; ++I) {
709 LiveRange LR(getMBBStartIdx(*I),
710 getMBBEndIdx(*I).nextSlot(), // MBB ends at -1.
712 interval.addRange(LR);
713 DEBUG(errs() << " +" << LR);
716 // Finally, this virtual register is live from the start of any killing
717 // block to the 'use' slot of the killing instruction.
718 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
719 MachineInstr *Kill = vi.Kills[i];
720 MachineInstrIndex killIdx =
721 getUseIndex(getInstructionIndex(Kill)).nextSlot();
722 LiveRange LR(getMBBStartIdx(Kill->getParent()),
724 interval.addRange(LR);
725 ValNo->addKill(killIdx);
726 DEBUG(errs() << " +" << LR);
730 // If this is the second time we see a virtual register definition, it
731 // must be due to phi elimination or two addr elimination. If this is
732 // the result of two address elimination, then the vreg is one of the
733 // def-and-use register operand.
734 if (mi->isRegTiedToUseOperand(MOIdx)) {
735 // If this is a two-address definition, then we have already processed
736 // the live range. The only problem is that we didn't realize there
737 // are actually two values in the live interval. Because of this we
738 // need to take the LiveRegion that defines this register and split it
740 assert(interval.containsOneValue());
741 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
742 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
743 if (MO.isEarlyClobber())
744 RedefIndex = getUseIndex(MIIdx);
746 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex.prevSlot());
747 VNInfo *OldValNo = OldLR->valno;
749 // Delete the initial value, which should be short and continuous,
750 // because the 2-addr copy must be in the same MBB as the redef.
751 interval.removeRange(DefIndex, RedefIndex);
753 // Two-address vregs should always only be redefined once. This means
754 // that at this point, there should be exactly one value number in it.
755 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
757 // The new value number (#1) is defined by the instruction we claimed
759 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
760 false, // update at *
762 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
764 // Value#0 is now defined by the 2-addr instruction.
765 OldValNo->def = RedefIndex;
766 OldValNo->setCopy(0);
767 if (MO.isEarlyClobber())
768 OldValNo->setHasRedefByEC(true);
770 // Add the new live interval which replaces the range for the input copy.
771 LiveRange LR(DefIndex, RedefIndex, ValNo);
772 DEBUG(errs() << " replace range with " << LR);
773 interval.addRange(LR);
774 ValNo->addKill(RedefIndex);
776 // If this redefinition is dead, we need to add a dummy unit live
777 // range covering the def slot.
779 interval.addRange(LiveRange(RedefIndex,
780 RedefIndex.nextSlot(), OldValNo));
783 errs() << " RESULT: ";
784 interval.print(errs(), tri_);
787 // Otherwise, this must be because of phi elimination. If this is the
788 // first redefinition of the vreg that we have seen, go back and change
789 // the live range in the PHI block to be a different value number.
790 if (interval.containsOneValue()) {
791 assert(vi.Kills.size() == 1 &&
792 "PHI elimination vreg should have one kill, the PHI itself!");
794 // Remove the old range that we now know has an incorrect number.
795 VNInfo *VNI = interval.getValNumInfo(0);
796 MachineInstr *Killer = vi.Kills[0];
797 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
798 MachineInstrIndex End =
799 getUseIndex(getInstructionIndex(Killer)).nextSlot();
801 errs() << " Removing [" << Start << "," << End << "] from: ";
802 interval.print(errs(), tri_);
805 interval.removeRange(Start, End);
806 assert(interval.ranges.size() == 1 &&
807 "newly discovered PHI interval has >1 ranges.");
808 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
809 VNI->addKill(terminatorGaps[killMBB]);
810 VNI->setHasPHIKill(true);
812 errs() << " RESULT: ";
813 interval.print(errs(), tri_);
816 // Replace the interval with one of a NEW value number. Note that this
817 // value number isn't actually defined by an instruction, weird huh? :)
818 LiveRange LR(Start, End,
819 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
820 0, false, VNInfoAllocator));
821 LR.valno->setIsPHIDef(true);
822 DEBUG(errs() << " replace range with " << LR);
823 interval.addRange(LR);
824 LR.valno->addKill(End);
826 errs() << " RESULT: ";
827 interval.print(errs(), tri_);
831 // In the case of PHI elimination, each variable definition is only
832 // live until the end of the block. We've already taken care of the
833 // rest of the live range.
834 MachineInstrIndex defIndex = getDefIndex(MIIdx);
835 if (MO.isEarlyClobber())
836 defIndex = getUseIndex(MIIdx);
839 MachineInstr *CopyMI = NULL;
840 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
841 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
842 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
843 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
844 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
846 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
848 MachineInstrIndex killIndex = getMBBEndIdx(mbb).nextSlot();
849 LiveRange LR(defIndex, killIndex, ValNo);
850 interval.addRange(LR);
851 ValNo->addKill(terminatorGaps[mbb]);
852 ValNo->setHasPHIKill(true);
853 DEBUG(errs() << " +" << LR);
857 DEBUG(errs() << '\n');
860 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
861 MachineBasicBlock::iterator mi,
862 MachineInstrIndex MIIdx,
864 LiveInterval &interval,
865 MachineInstr *CopyMI) {
866 // A physical register cannot be live across basic block, so its
867 // lifetime must end somewhere in its defining basic block.
869 errs() << "\t\tregister: ";
870 printRegName(interval.reg);
873 MachineInstrIndex baseIndex = MIIdx;
874 MachineInstrIndex start = getDefIndex(baseIndex);
875 // Earlyclobbers move back one.
876 if (MO.isEarlyClobber())
877 start = getUseIndex(MIIdx);
878 MachineInstrIndex end = start;
880 // If it is not used after definition, it is considered dead at
881 // the instruction defining it. Hence its interval is:
882 // [defSlot(def), defSlot(def)+1)
884 DEBUG(errs() << " dead");
885 end = start.nextSlot();
889 // If it is not dead on definition, it must be killed by a
890 // subsequent instruction. Hence its interval is:
891 // [defSlot(def), useSlot(kill)+1)
892 baseIndex = baseIndex.nextIndex();
893 while (++mi != MBB->end()) {
894 while (baseIndex.getVecIndex() < i2miMap_.size() &&
895 getInstructionFromIndex(baseIndex) == 0)
896 baseIndex = baseIndex.nextIndex();
897 if (mi->killsRegister(interval.reg, tri_)) {
898 DEBUG(errs() << " killed");
899 end = getUseIndex(baseIndex).nextSlot();
902 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
904 if (mi->isRegTiedToUseOperand(DefIdx)) {
905 // Two-address instruction.
906 end = getDefIndex(baseIndex);
907 if (mi->getOperand(DefIdx).isEarlyClobber())
908 end = getUseIndex(baseIndex);
910 // Another instruction redefines the register before it is ever read.
911 // Then the register is essentially dead at the instruction that defines
912 // it. Hence its interval is:
913 // [defSlot(def), defSlot(def)+1)
914 DEBUG(errs() << " dead");
915 end = start.nextSlot();
921 baseIndex = baseIndex.nextIndex();
924 // The only case we should have a dead physreg here without a killing or
925 // instruction where we know it's dead is if it is live-in to the function
926 // and never used. Another possible case is the implicit use of the
927 // physical register has been deleted by two-address pass.
928 end = start.nextSlot();
931 assert(start < end && "did not find end of interval?");
933 // Already exists? Extend old live interval.
934 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
935 bool Extend = OldLR != interval.end();
936 VNInfo *ValNo = Extend
937 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
938 if (MO.isEarlyClobber() && Extend)
939 ValNo->setHasRedefByEC(true);
940 LiveRange LR(start, end, ValNo);
941 interval.addRange(LR);
942 LR.valno->addKill(end);
943 DEBUG(errs() << " +" << LR << '\n');
946 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
947 MachineBasicBlock::iterator MI,
948 MachineInstrIndex MIIdx,
951 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
952 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
953 getOrCreateInterval(MO.getReg()));
954 else if (allocatableRegs_[MO.getReg()]) {
955 MachineInstr *CopyMI = NULL;
956 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
957 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
958 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
959 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
960 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
962 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
963 getOrCreateInterval(MO.getReg()), CopyMI);
964 // Def of a register also defines its sub-registers.
965 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
966 // If MI also modifies the sub-register explicitly, avoid processing it
967 // more than once. Do not pass in TRI here so it checks for exact match.
968 if (!MI->modifiesRegister(*AS))
969 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
970 getOrCreateInterval(*AS), 0);
974 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
975 MachineInstrIndex MIIdx,
976 LiveInterval &interval, bool isAlias) {
978 errs() << "\t\tlivein register: ";
979 printRegName(interval.reg);
982 // Look for kills, if it reaches a def before it's killed, then it shouldn't
983 // be considered a livein.
984 MachineBasicBlock::iterator mi = MBB->begin();
985 MachineInstrIndex baseIndex = MIIdx;
986 MachineInstrIndex start = baseIndex;
987 while (baseIndex.getVecIndex() < i2miMap_.size() &&
988 getInstructionFromIndex(baseIndex) == 0)
989 baseIndex = baseIndex.nextIndex();
990 MachineInstrIndex end = baseIndex;
991 bool SeenDefUse = false;
993 while (mi != MBB->end()) {
994 if (mi->killsRegister(interval.reg, tri_)) {
995 DEBUG(errs() << " killed");
996 end = getUseIndex(baseIndex).nextSlot();
999 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1000 // Another instruction redefines the register before it is ever read.
1001 // Then the register is essentially dead at the instruction that defines
1002 // it. Hence its interval is:
1003 // [defSlot(def), defSlot(def)+1)
1004 DEBUG(errs() << " dead");
1005 end = getDefIndex(start).nextSlot();
1010 baseIndex = baseIndex.nextIndex();
1012 if (mi != MBB->end()) {
1013 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1014 getInstructionFromIndex(baseIndex) == 0)
1015 baseIndex = baseIndex.nextIndex();
1019 // Live-in register might not be used at all.
1022 DEBUG(errs() << " dead");
1023 end = getDefIndex(MIIdx).nextSlot();
1025 DEBUG(errs() << " live through");
1031 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1032 0, false, VNInfoAllocator);
1033 vni->setIsPHIDef(true);
1034 LiveRange LR(start, end, vni);
1036 interval.addRange(LR);
1037 LR.valno->addKill(end);
1038 DEBUG(errs() << " +" << LR << '\n');
1041 /// computeIntervals - computes the live intervals for virtual
1042 /// registers. for some ordering of the machine instructions [1,N] a
1043 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1044 /// which a variable is live
1045 void LiveIntervals::computeIntervals() {
1046 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1047 << "********** Function: "
1048 << ((Value*)mf_->getFunction())->getName() << '\n');
1050 SmallVector<unsigned, 8> UndefUses;
1051 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1052 MBBI != E; ++MBBI) {
1053 MachineBasicBlock *MBB = MBBI;
1054 // Track the index of the current machine instr.
1055 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1056 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1058 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1060 // Create intervals for live-ins to this BB first.
1061 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1062 LE = MBB->livein_end(); LI != LE; ++LI) {
1063 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1064 // Multiple live-ins can alias the same register.
1065 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1066 if (!hasInterval(*AS))
1067 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1071 // Skip over empty initial indices.
1072 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1073 getInstructionFromIndex(MIIndex) == 0)
1074 MIIndex = MIIndex.nextIndex();
1076 for (; MI != miEnd; ++MI) {
1077 DEBUG(errs() << MIIndex << "\t" << *MI);
1080 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1081 MachineOperand &MO = MI->getOperand(i);
1082 if (!MO.isReg() || !MO.getReg())
1085 // handle register defs - build intervals
1087 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1088 else if (MO.isUndef())
1089 UndefUses.push_back(MO.getReg());
1092 // Skip over the empty slots after each instruction.
1093 unsigned Slots = MI->getDesc().getNumDefs();
1098 MIIndex = MIIndex.nextIndex();
1100 // Skip over empty indices.
1101 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1102 getInstructionFromIndex(MIIndex) == 0)
1103 MIIndex = MIIndex.nextIndex();
1107 // Create empty intervals for registers defined by implicit_def's (except
1108 // for those implicit_def that define values which are liveout of their
1110 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1111 unsigned UndefReg = UndefUses[i];
1112 (void)getOrCreateInterval(UndefReg);
1116 bool LiveIntervals::findLiveInMBBs(
1117 MachineInstrIndex Start, MachineInstrIndex End,
1118 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1119 std::vector<IdxMBBPair>::const_iterator I =
1120 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1122 bool ResVal = false;
1123 while (I != Idx2MBBMap.end()) {
1124 if (I->first >= End)
1126 MBBs.push_back(I->second);
1133 bool LiveIntervals::findReachableMBBs(
1134 MachineInstrIndex Start, MachineInstrIndex End,
1135 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1136 std::vector<IdxMBBPair>::const_iterator I =
1137 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1139 bool ResVal = false;
1140 while (I != Idx2MBBMap.end()) {
1143 MachineBasicBlock *MBB = I->second;
1144 if (getMBBEndIdx(MBB) > End)
1146 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1147 SE = MBB->succ_end(); SI != SE; ++SI)
1148 MBBs.push_back(*SI);
1155 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1156 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1157 return new LiveInterval(reg, Weight);
1160 /// dupInterval - Duplicate a live interval. The caller is responsible for
1161 /// managing the allocated memory.
1162 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1163 LiveInterval *NewLI = createInterval(li->reg);
1164 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1168 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1169 /// copy field and returns the source register that defines it.
1170 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1171 if (!VNI->getCopy())
1174 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1175 // If it's extracting out of a physical register, return the sub-register.
1176 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1177 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1178 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1180 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1181 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1182 return VNI->getCopy()->getOperand(2).getReg();
1184 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1185 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1187 llvm_unreachable("Unrecognized copy instruction!");
1191 //===----------------------------------------------------------------------===//
1192 // Register allocator hooks.
1195 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1196 /// allow one) virtual register operand, then its uses are implicitly using
1197 /// the register. Returns the virtual register.
1198 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1199 MachineInstr *MI) const {
1201 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1202 MachineOperand &MO = MI->getOperand(i);
1203 if (!MO.isReg() || !MO.isUse())
1205 unsigned Reg = MO.getReg();
1206 if (Reg == 0 || Reg == li.reg)
1209 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1210 !allocatableRegs_[Reg])
1212 // FIXME: For now, only remat MI with at most one register operand.
1214 "Can't rematerialize instruction with multiple register operand!");
1215 RegOp = MO.getReg();
1223 /// isValNoAvailableAt - Return true if the val# of the specified interval
1224 /// which reaches the given instruction also reaches the specified use index.
1225 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1226 MachineInstrIndex UseIdx) const {
1227 MachineInstrIndex Index = getInstructionIndex(MI);
1228 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1229 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1230 return UI != li.end() && UI->valno == ValNo;
1233 /// isReMaterializable - Returns true if the definition MI of the specified
1234 /// val# of the specified interval is re-materializable.
1235 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1236 const VNInfo *ValNo, MachineInstr *MI,
1237 SmallVectorImpl<LiveInterval*> &SpillIs,
1242 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1246 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1247 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1248 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1249 // this but remember this is not safe to fold into a two-address
1251 // This is a load from fixed stack slot. It can be rematerialized.
1254 // If the target-specific rules don't identify an instruction as
1255 // being trivially rematerializable, use some target-independent
1257 if (!MI->getDesc().isRematerializable() ||
1258 !tii_->isTriviallyReMaterializable(MI)) {
1259 if (!EnableAggressiveRemat)
1262 // If the instruction accesses memory but the memoperands have been lost,
1263 // we can't analyze it.
1264 const TargetInstrDesc &TID = MI->getDesc();
1265 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1268 // Avoid instructions obviously unsafe for remat.
1269 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1272 // If the instruction accesses memory and the memory could be non-constant,
1273 // assume the instruction is not rematerializable.
1274 for (std::list<MachineMemOperand>::const_iterator
1275 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1276 const MachineMemOperand &MMO = *I;
1277 if (MMO.isVolatile() || MMO.isStore())
1279 const Value *V = MMO.getValue();
1282 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1283 if (!PSV->isConstant(mf_->getFrameInfo()))
1285 } else if (!aa_->pointsToConstantMemory(V))
1289 // If any of the registers accessed are non-constant, conservatively assume
1290 // the instruction is not rematerializable.
1291 unsigned ImpUse = 0;
1292 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1293 const MachineOperand &MO = MI->getOperand(i);
1295 unsigned Reg = MO.getReg();
1298 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1301 // Only allow one def, and that in the first operand.
1302 if (MO.isDef() != (i == 0))
1305 // Only allow constant-valued registers.
1306 bool IsLiveIn = mri_->isLiveIn(Reg);
1307 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1308 E = mri_->def_end();
1310 // For the def, it should be the only def of that register.
1311 if (MO.isDef() && (next(I) != E || IsLiveIn))
1315 // Only allow one use other register use, as that's all the
1316 // remat mechanisms support currently.
1317 if (Reg != li.reg) {
1320 else if (Reg != ImpUse)
1323 // For the use, there should be only one associated def.
1324 if (I != E && (next(I) != E || IsLiveIn))
1331 unsigned ImpUse = getReMatImplicitUse(li, MI);
1333 const LiveInterval &ImpLi = getInterval(ImpUse);
1334 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1335 re = mri_->use_end(); ri != re; ++ri) {
1336 MachineInstr *UseMI = &*ri;
1337 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1338 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1340 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1344 // If a register operand of the re-materialized instruction is going to
1345 // be spilled next, then it's not legal to re-materialize this instruction.
1346 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1347 if (ImpUse == SpillIs[i]->reg)
1353 /// isReMaterializable - Returns true if the definition MI of the specified
1354 /// val# of the specified interval is re-materializable.
1355 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1356 const VNInfo *ValNo, MachineInstr *MI) {
1357 SmallVector<LiveInterval*, 4> Dummy1;
1359 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1362 /// isReMaterializable - Returns true if every definition of MI of every
1363 /// val# of the specified interval is re-materializable.
1364 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1365 SmallVectorImpl<LiveInterval*> &SpillIs,
1368 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1370 const VNInfo *VNI = *i;
1371 if (VNI->isUnused())
1372 continue; // Dead val#.
1373 // Is the def for the val# rematerializable?
1374 if (!VNI->isDefAccurate())
1376 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1377 bool DefIsLoad = false;
1379 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1381 isLoad |= DefIsLoad;
1386 /// FilterFoldedOps - Filter out two-address use operands. Return
1387 /// true if it finds any issue with the operands that ought to prevent
1389 static bool FilterFoldedOps(MachineInstr *MI,
1390 SmallVector<unsigned, 2> &Ops,
1392 SmallVector<unsigned, 2> &FoldOps) {
1394 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1395 unsigned OpIdx = Ops[i];
1396 MachineOperand &MO = MI->getOperand(OpIdx);
1397 // FIXME: fold subreg use.
1401 MRInfo |= (unsigned)VirtRegMap::isMod;
1403 // Filter out two-address use operand(s).
1404 if (MI->isRegTiedToDefOperand(OpIdx)) {
1405 MRInfo = VirtRegMap::isModRef;
1408 MRInfo |= (unsigned)VirtRegMap::isRef;
1410 FoldOps.push_back(OpIdx);
1416 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1417 /// slot / to reg or any rematerialized load into ith operand of specified
1418 /// MI. If it is successul, MI is updated with the newly created MI and
1420 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1421 VirtRegMap &vrm, MachineInstr *DefMI,
1422 MachineInstrIndex InstrIdx,
1423 SmallVector<unsigned, 2> &Ops,
1424 bool isSS, int Slot, unsigned Reg) {
1425 // If it is an implicit def instruction, just delete it.
1426 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1427 RemoveMachineInstrFromMaps(MI);
1428 vrm.RemoveMachineInstrFromMaps(MI);
1429 MI->eraseFromParent();
1434 // Filter the list of operand indexes that are to be folded. Abort if
1435 // any operand will prevent folding.
1436 unsigned MRInfo = 0;
1437 SmallVector<unsigned, 2> FoldOps;
1438 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1441 // The only time it's safe to fold into a two address instruction is when
1442 // it's folding reload and spill from / into a spill stack slot.
1443 if (DefMI && (MRInfo & VirtRegMap::isMod))
1446 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1447 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1449 // Remember this instruction uses the spill slot.
1450 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1452 // Attempt to fold the memory reference into the instruction. If
1453 // we can do this, we don't need to insert spill code.
1454 MachineBasicBlock &MBB = *MI->getParent();
1455 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1456 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1457 vrm.transferSpillPts(MI, fmi);
1458 vrm.transferRestorePts(MI, fmi);
1459 vrm.transferEmergencySpills(MI, fmi);
1461 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1462 mi2iMap_[fmi] = InstrIdx;
1463 MI = MBB.insert(MBB.erase(MI), fmi);
1470 /// canFoldMemoryOperand - Returns true if the specified load / store
1471 /// folding is possible.
1472 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1473 SmallVector<unsigned, 2> &Ops,
1475 // Filter the list of operand indexes that are to be folded. Abort if
1476 // any operand will prevent folding.
1477 unsigned MRInfo = 0;
1478 SmallVector<unsigned, 2> FoldOps;
1479 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1482 // It's only legal to remat for a use, not a def.
1483 if (ReMat && (MRInfo & VirtRegMap::isMod))
1486 return tii_->canFoldMemoryOperand(MI, FoldOps);
1489 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1490 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1491 for (LiveInterval::Ranges::const_iterator
1492 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1493 std::vector<IdxMBBPair>::const_iterator II =
1494 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1495 if (II == Idx2MBBMap.end())
1497 if (I->end > II->first) // crossing a MBB.
1499 MBBs.insert(II->second);
1500 if (MBBs.size() > 1)
1506 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1507 /// interval on to-be re-materialized operands of MI) with new register.
1508 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1509 MachineInstr *MI, unsigned NewVReg,
1511 // There is an implicit use. That means one of the other operand is
1512 // being remat'ed and the remat'ed instruction has li.reg as an
1513 // use operand. Make sure we rewrite that as well.
1514 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1515 MachineOperand &MO = MI->getOperand(i);
1518 unsigned Reg = MO.getReg();
1519 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1521 if (!vrm.isReMaterialized(Reg))
1523 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1524 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1526 UseMO->setReg(NewVReg);
1530 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1531 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1532 bool LiveIntervals::
1533 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1534 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1536 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1537 unsigned Slot, int LdSlot,
1538 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1540 const TargetRegisterClass* rc,
1541 SmallVector<int, 4> &ReMatIds,
1542 const MachineLoopInfo *loopInfo,
1543 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1544 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1545 std::vector<LiveInterval*> &NewLIs) {
1546 bool CanFold = false;
1548 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1549 MachineOperand& mop = MI->getOperand(i);
1552 unsigned Reg = mop.getReg();
1553 unsigned RegI = Reg;
1554 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1559 bool TryFold = !DefIsReMat;
1560 bool FoldSS = true; // Default behavior unless it's a remat.
1561 int FoldSlot = Slot;
1563 // If this is the rematerializable definition MI itself and
1564 // all of its uses are rematerialized, simply delete it.
1565 if (MI == ReMatOrigDefMI && CanDelete) {
1566 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1568 RemoveMachineInstrFromMaps(MI);
1569 vrm.RemoveMachineInstrFromMaps(MI);
1570 MI->eraseFromParent();
1574 // If def for this use can't be rematerialized, then try folding.
1575 // If def is rematerializable and it's a load, also try folding.
1576 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1578 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1584 // Scan all of the operands of this instruction rewriting operands
1585 // to use NewVReg instead of li.reg as appropriate. We do this for
1588 // 1. If the instr reads the same spilled vreg multiple times, we
1589 // want to reuse the NewVReg.
1590 // 2. If the instr is a two-addr instruction, we are required to
1591 // keep the src/dst regs pinned.
1593 // Keep track of whether we replace a use and/or def so that we can
1594 // create the spill interval with the appropriate range.
1596 HasUse = mop.isUse();
1597 HasDef = mop.isDef();
1598 SmallVector<unsigned, 2> Ops;
1600 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1601 const MachineOperand &MOj = MI->getOperand(j);
1604 unsigned RegJ = MOj.getReg();
1605 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1609 if (!MOj.isUndef()) {
1610 HasUse |= MOj.isUse();
1611 HasDef |= MOj.isDef();
1616 // Create a new virtual register for the spill interval.
1617 // Create the new register now so we can map the fold instruction
1618 // to the new register so when it is unfolded we get the correct
1620 bool CreatedNewVReg = false;
1622 NewVReg = mri_->createVirtualRegister(rc);
1624 CreatedNewVReg = true;
1630 // Do not fold load / store here if we are splitting. We'll find an
1631 // optimal point to insert a load / store later.
1633 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1634 Ops, FoldSS, FoldSlot, NewVReg)) {
1635 // Folding the load/store can completely change the instruction in
1636 // unpredictable ways, rescan it from the beginning.
1639 // We need to give the new vreg the same stack slot as the
1640 // spilled interval.
1641 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1647 if (isNotInMIMap(MI))
1649 goto RestartInstruction;
1652 // We'll try to fold it later if it's profitable.
1653 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1657 mop.setReg(NewVReg);
1658 if (mop.isImplicit())
1659 rewriteImplicitOps(li, MI, NewVReg, vrm);
1661 // Reuse NewVReg for other reads.
1662 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1663 MachineOperand &mopj = MI->getOperand(Ops[j]);
1664 mopj.setReg(NewVReg);
1665 if (mopj.isImplicit())
1666 rewriteImplicitOps(li, MI, NewVReg, vrm);
1669 if (CreatedNewVReg) {
1671 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1672 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1673 // Each valnum may have its own remat id.
1674 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1676 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1678 if (!CanDelete || (HasUse && HasDef)) {
1679 // If this is a two-addr instruction then its use operands are
1680 // rematerializable but its def is not. It should be assigned a
1682 vrm.assignVirt2StackSlot(NewVReg, Slot);
1685 vrm.assignVirt2StackSlot(NewVReg, Slot);
1687 } else if (HasUse && HasDef &&
1688 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1689 // If this interval hasn't been assigned a stack slot (because earlier
1690 // def is a deleted remat def), do it now.
1691 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1692 vrm.assignVirt2StackSlot(NewVReg, Slot);
1695 // Re-matting an instruction with virtual register use. Add the
1696 // register as an implicit use on the use MI.
1697 if (DefIsReMat && ImpUse)
1698 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1700 // Create a new register interval for this spill / remat.
1701 LiveInterval &nI = getOrCreateInterval(NewVReg);
1702 if (CreatedNewVReg) {
1703 NewLIs.push_back(&nI);
1704 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1706 vrm.setIsSplitFromReg(NewVReg, li.reg);
1710 if (CreatedNewVReg) {
1711 LiveRange LR(getLoadIndex(index), getUseIndex(index).nextSlot(),
1712 nI.getNextValue(MachineInstrIndex(), 0, false,
1714 DEBUG(errs() << " +" << LR);
1717 // Extend the split live interval to this def / use.
1718 MachineInstrIndex End = getUseIndex(index).nextSlot();
1719 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1720 nI.getValNumInfo(nI.getNumValNums()-1));
1721 DEBUG(errs() << " +" << LR);
1726 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1727 nI.getNextValue(MachineInstrIndex(), 0, false,
1729 DEBUG(errs() << " +" << LR);
1734 errs() << "\t\t\t\tAdded new interval: ";
1735 nI.print(errs(), tri_);
1741 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1743 MachineBasicBlock *MBB,
1744 MachineInstrIndex Idx) const {
1745 MachineInstrIndex End = getMBBEndIdx(MBB);
1746 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1747 if (VNI->kills[j].isPHIIndex())
1750 MachineInstrIndex KillIdx = VNI->kills[j];
1751 if (KillIdx > Idx && KillIdx < End)
1757 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1758 /// during spilling.
1760 struct RewriteInfo {
1761 MachineInstrIndex Index;
1765 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1766 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1769 struct RewriteInfoCompare {
1770 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1771 return LHS.Index < RHS.Index;
1776 void LiveIntervals::
1777 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1778 LiveInterval::Ranges::const_iterator &I,
1779 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1780 unsigned Slot, int LdSlot,
1781 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1783 const TargetRegisterClass* rc,
1784 SmallVector<int, 4> &ReMatIds,
1785 const MachineLoopInfo *loopInfo,
1786 BitVector &SpillMBBs,
1787 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1788 BitVector &RestoreMBBs,
1789 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1790 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1791 std::vector<LiveInterval*> &NewLIs) {
1792 bool AllCanFold = true;
1793 unsigned NewVReg = 0;
1794 MachineInstrIndex start = getBaseIndex(I->start);
1795 MachineInstrIndex end = getBaseIndex(I->end.prevSlot()).nextIndex();
1797 // First collect all the def / use in this live range that will be rewritten.
1798 // Make sure they are sorted according to instruction index.
1799 std::vector<RewriteInfo> RewriteMIs;
1800 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1801 re = mri_->reg_end(); ri != re; ) {
1802 MachineInstr *MI = &*ri;
1803 MachineOperand &O = ri.getOperand();
1805 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1806 MachineInstrIndex index = getInstructionIndex(MI);
1807 if (index < start || index >= end)
1811 // Must be defined by an implicit def. It should not be spilled. Note,
1812 // this is for correctness reason. e.g.
1813 // 8 %reg1024<def> = IMPLICIT_DEF
1814 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1815 // The live range [12, 14) are not part of the r1024 live interval since
1816 // it's defined by an implicit def. It will not conflicts with live
1817 // interval of r1025. Now suppose both registers are spilled, you can
1818 // easily see a situation where both registers are reloaded before
1819 // the INSERT_SUBREG and both target registers that would overlap.
1821 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1823 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1825 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1826 // Now rewrite the defs and uses.
1827 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1828 RewriteInfo &rwi = RewriteMIs[i];
1830 MachineInstrIndex index = rwi.Index;
1831 bool MIHasUse = rwi.HasUse;
1832 bool MIHasDef = rwi.HasDef;
1833 MachineInstr *MI = rwi.MI;
1834 // If MI def and/or use the same register multiple times, then there
1835 // are multiple entries.
1836 unsigned NumUses = MIHasUse;
1837 while (i != e && RewriteMIs[i].MI == MI) {
1838 assert(RewriteMIs[i].Index == index);
1839 bool isUse = RewriteMIs[i].HasUse;
1840 if (isUse) ++NumUses;
1842 MIHasDef |= RewriteMIs[i].HasDef;
1845 MachineBasicBlock *MBB = MI->getParent();
1847 if (ImpUse && MI != ReMatDefMI) {
1848 // Re-matting an instruction with virtual register use. Update the
1849 // register interval's spill weight to HUGE_VALF to prevent it from
1851 LiveInterval &ImpLi = getInterval(ImpUse);
1852 ImpLi.weight = HUGE_VALF;
1855 unsigned MBBId = MBB->getNumber();
1856 unsigned ThisVReg = 0;
1858 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1859 if (NVI != MBBVRegsMap.end()) {
1860 ThisVReg = NVI->second;
1867 // It's better to start a new interval to avoid artifically
1868 // extend the new interval.
1869 if (MIHasDef && !MIHasUse) {
1870 MBBVRegsMap.erase(MBB->getNumber());
1876 bool IsNew = ThisVReg == 0;
1878 // This ends the previous live interval. If all of its def / use
1879 // can be folded, give it a low spill weight.
1880 if (NewVReg && TrySplit && AllCanFold) {
1881 LiveInterval &nI = getOrCreateInterval(NewVReg);
1888 bool HasDef = false;
1889 bool HasUse = false;
1890 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1891 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1892 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1893 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1894 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1895 if (!HasDef && !HasUse)
1898 AllCanFold &= CanFold;
1900 // Update weight of spill interval.
1901 LiveInterval &nI = getOrCreateInterval(NewVReg);
1903 // The spill weight is now infinity as it cannot be spilled again.
1904 nI.weight = HUGE_VALF;
1908 // Keep track of the last def and first use in each MBB.
1910 if (MI != ReMatOrigDefMI || !CanDelete) {
1911 bool HasKill = false;
1913 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1915 // If this is a two-address code, then this index starts a new VNInfo.
1916 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
1918 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1920 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1921 SpillIdxes.find(MBBId);
1923 if (SII == SpillIdxes.end()) {
1924 std::vector<SRInfo> S;
1925 S.push_back(SRInfo(index, NewVReg, true));
1926 SpillIdxes.insert(std::make_pair(MBBId, S));
1927 } else if (SII->second.back().vreg != NewVReg) {
1928 SII->second.push_back(SRInfo(index, NewVReg, true));
1929 } else if (index > SII->second.back().index) {
1930 // If there is an earlier def and this is a two-address
1931 // instruction, then it's not possible to fold the store (which
1932 // would also fold the load).
1933 SRInfo &Info = SII->second.back();
1935 Info.canFold = !HasUse;
1937 SpillMBBs.set(MBBId);
1938 } else if (SII != SpillIdxes.end() &&
1939 SII->second.back().vreg == NewVReg &&
1940 index > SII->second.back().index) {
1941 // There is an earlier def that's not killed (must be two-address).
1942 // The spill is no longer needed.
1943 SII->second.pop_back();
1944 if (SII->second.empty()) {
1945 SpillIdxes.erase(MBBId);
1946 SpillMBBs.reset(MBBId);
1953 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1954 SpillIdxes.find(MBBId);
1955 if (SII != SpillIdxes.end() &&
1956 SII->second.back().vreg == NewVReg &&
1957 index > SII->second.back().index)
1958 // Use(s) following the last def, it's not safe to fold the spill.
1959 SII->second.back().canFold = false;
1960 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1961 RestoreIdxes.find(MBBId);
1962 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1963 // If we are splitting live intervals, only fold if it's the first
1964 // use and there isn't another use later in the MBB.
1965 RII->second.back().canFold = false;
1967 // Only need a reload if there isn't an earlier def / use.
1968 if (RII == RestoreIdxes.end()) {
1969 std::vector<SRInfo> Infos;
1970 Infos.push_back(SRInfo(index, NewVReg, true));
1971 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1973 RII->second.push_back(SRInfo(index, NewVReg, true));
1975 RestoreMBBs.set(MBBId);
1979 // Update spill weight.
1980 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1981 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1984 if (NewVReg && TrySplit && AllCanFold) {
1985 // If all of its def / use can be folded, give it a low spill weight.
1986 LiveInterval &nI = getOrCreateInterval(NewVReg);
1991 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
1992 unsigned vr, BitVector &RestoreMBBs,
1993 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1994 if (!RestoreMBBs[Id])
1996 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1997 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1998 if (Restores[i].index == index &&
1999 Restores[i].vreg == vr &&
2000 Restores[i].canFold)
2005 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2006 unsigned vr, BitVector &RestoreMBBs,
2007 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2008 if (!RestoreMBBs[Id])
2010 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2011 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2012 if (Restores[i].index == index && Restores[i].vreg)
2013 Restores[i].index = MachineInstrIndex();
2016 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2017 /// spilled and create empty intervals for their uses.
2019 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2020 const TargetRegisterClass* rc,
2021 std::vector<LiveInterval*> &NewLIs) {
2022 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2023 re = mri_->reg_end(); ri != re; ) {
2024 MachineOperand &O = ri.getOperand();
2025 MachineInstr *MI = &*ri;
2028 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2029 "Register def was not rewritten?");
2030 RemoveMachineInstrFromMaps(MI);
2031 vrm.RemoveMachineInstrFromMaps(MI);
2032 MI->eraseFromParent();
2034 // This must be an use of an implicit_def so it's not part of the live
2035 // interval. Create a new empty live interval for it.
2036 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2037 unsigned NewVReg = mri_->createVirtualRegister(rc);
2039 vrm.setIsImplicitlyDefined(NewVReg);
2040 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2041 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2042 MachineOperand &MO = MI->getOperand(i);
2043 if (MO.isReg() && MO.getReg() == li.reg) {
2052 std::vector<LiveInterval*> LiveIntervals::
2053 addIntervalsForSpillsFast(const LiveInterval &li,
2054 const MachineLoopInfo *loopInfo,
2056 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2058 std::vector<LiveInterval*> added;
2060 assert(li.weight != HUGE_VALF &&
2061 "attempt to spill already spilled interval!");
2064 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2069 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2071 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2072 while (RI != mri_->reg_end()) {
2073 MachineInstr* MI = &*RI;
2075 SmallVector<unsigned, 2> Indices;
2076 bool HasUse = false;
2077 bool HasDef = false;
2079 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2080 MachineOperand& mop = MI->getOperand(i);
2081 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2083 HasUse |= MI->getOperand(i).isUse();
2084 HasDef |= MI->getOperand(i).isDef();
2086 Indices.push_back(i);
2089 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2090 Indices, true, slot, li.reg)) {
2091 unsigned NewVReg = mri_->createVirtualRegister(rc);
2093 vrm.assignVirt2StackSlot(NewVReg, slot);
2095 // create a new register for this spill
2096 LiveInterval &nI = getOrCreateInterval(NewVReg);
2098 // the spill weight is now infinity as it
2099 // cannot be spilled again
2100 nI.weight = HUGE_VALF;
2102 // Rewrite register operands to use the new vreg.
2103 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2104 E = Indices.end(); I != E; ++I) {
2105 MI->getOperand(*I).setReg(NewVReg);
2107 if (MI->getOperand(*I).isUse())
2108 MI->getOperand(*I).setIsKill(true);
2111 // Fill in the new live interval.
2112 MachineInstrIndex index = getInstructionIndex(MI);
2114 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2115 nI.getNextValue(MachineInstrIndex(), 0, false,
2116 getVNInfoAllocator()));
2117 DEBUG(errs() << " +" << LR);
2119 vrm.addRestorePoint(NewVReg, MI);
2122 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2123 nI.getNextValue(MachineInstrIndex(), 0, false,
2124 getVNInfoAllocator()));
2125 DEBUG(errs() << " +" << LR);
2127 vrm.addSpillPoint(NewVReg, true, MI);
2130 added.push_back(&nI);
2133 errs() << "\t\t\t\tadded new interval: ";
2140 RI = mri_->reg_begin(li.reg);
2146 std::vector<LiveInterval*> LiveIntervals::
2147 addIntervalsForSpills(const LiveInterval &li,
2148 SmallVectorImpl<LiveInterval*> &SpillIs,
2149 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2151 if (EnableFastSpilling)
2152 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2154 assert(li.weight != HUGE_VALF &&
2155 "attempt to spill already spilled interval!");
2158 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2159 li.print(errs(), tri_);
2163 // Each bit specify whether a spill is required in the MBB.
2164 BitVector SpillMBBs(mf_->getNumBlockIDs());
2165 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2166 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2167 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2168 DenseMap<unsigned,unsigned> MBBVRegsMap;
2169 std::vector<LiveInterval*> NewLIs;
2170 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2172 unsigned NumValNums = li.getNumValNums();
2173 SmallVector<MachineInstr*, 4> ReMatDefs;
2174 ReMatDefs.resize(NumValNums, NULL);
2175 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2176 ReMatOrigDefs.resize(NumValNums, NULL);
2177 SmallVector<int, 4> ReMatIds;
2178 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2179 BitVector ReMatDelete(NumValNums);
2180 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2182 // Spilling a split live interval. It cannot be split any further. Also,
2183 // it's also guaranteed to be a single val# / range interval.
2184 if (vrm.getPreSplitReg(li.reg)) {
2185 vrm.setIsSplitFromReg(li.reg, 0);
2186 // Unset the split kill marker on the last use.
2187 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2188 if (KillIdx != MachineInstrIndex()) {
2189 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2190 assert(KillMI && "Last use disappeared?");
2191 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2192 assert(KillOp != -1 && "Last use disappeared?");
2193 KillMI->getOperand(KillOp).setIsKill(false);
2195 vrm.removeKillPoint(li.reg);
2196 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2197 Slot = vrm.getStackSlot(li.reg);
2198 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2199 MachineInstr *ReMatDefMI = DefIsReMat ?
2200 vrm.getReMaterializedMI(li.reg) : NULL;
2202 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2203 bool isLoad = isLoadSS ||
2204 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2205 bool IsFirstRange = true;
2206 for (LiveInterval::Ranges::const_iterator
2207 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2208 // If this is a split live interval with multiple ranges, it means there
2209 // are two-address instructions that re-defined the value. Only the
2210 // first def can be rematerialized!
2212 // Note ReMatOrigDefMI has already been deleted.
2213 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2214 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2215 false, vrm, rc, ReMatIds, loopInfo,
2216 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2217 MBBVRegsMap, NewLIs);
2219 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2220 Slot, 0, false, false, false,
2221 false, vrm, rc, ReMatIds, loopInfo,
2222 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2223 MBBVRegsMap, NewLIs);
2225 IsFirstRange = false;
2228 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2232 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2233 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2237 bool NeedStackSlot = false;
2238 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2240 const VNInfo *VNI = *i;
2241 unsigned VN = VNI->id;
2242 if (VNI->isUnused())
2243 continue; // Dead val#.
2244 // Is the def for the val# rematerializable?
2245 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2246 ? getInstructionFromIndex(VNI->def) : 0;
2248 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2249 // Remember how to remat the def of this val#.
2250 ReMatOrigDefs[VN] = ReMatDefMI;
2251 // Original def may be modified so we have to make a copy here.
2252 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2253 ClonedMIs.push_back(Clone);
2254 ReMatDefs[VN] = Clone;
2256 bool CanDelete = true;
2257 if (VNI->hasPHIKill()) {
2258 // A kill is a phi node, not all of its uses can be rematerialized.
2259 // It must not be deleted.
2261 // Need a stack slot if there is any live range where uses cannot be
2263 NeedStackSlot = true;
2266 ReMatDelete.set(VN);
2268 // Need a stack slot if there is any live range where uses cannot be
2270 NeedStackSlot = true;
2274 // One stack slot per live interval.
2275 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2276 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2277 Slot = vrm.assignVirt2StackSlot(li.reg);
2279 // This case only occurs when the prealloc splitter has already assigned
2280 // a stack slot to this vreg.
2282 Slot = vrm.getStackSlot(li.reg);
2285 // Create new intervals and rewrite defs and uses.
2286 for (LiveInterval::Ranges::const_iterator
2287 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2288 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2289 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2290 bool DefIsReMat = ReMatDefMI != NULL;
2291 bool CanDelete = ReMatDelete[I->valno->id];
2293 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2294 bool isLoad = isLoadSS ||
2295 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2296 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2297 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2298 CanDelete, vrm, rc, ReMatIds, loopInfo,
2299 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2300 MBBVRegsMap, NewLIs);
2303 // Insert spills / restores if we are splitting.
2305 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2309 SmallPtrSet<LiveInterval*, 4> AddedKill;
2310 SmallVector<unsigned, 2> Ops;
2311 if (NeedStackSlot) {
2312 int Id = SpillMBBs.find_first();
2314 std::vector<SRInfo> &spills = SpillIdxes[Id];
2315 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2316 MachineInstrIndex index = spills[i].index;
2317 unsigned VReg = spills[i].vreg;
2318 LiveInterval &nI = getOrCreateInterval(VReg);
2319 bool isReMat = vrm.isReMaterialized(VReg);
2320 MachineInstr *MI = getInstructionFromIndex(index);
2321 bool CanFold = false;
2322 bool FoundUse = false;
2324 if (spills[i].canFold) {
2326 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2327 MachineOperand &MO = MI->getOperand(j);
2328 if (!MO.isReg() || MO.getReg() != VReg)
2335 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2336 RestoreMBBs, RestoreIdxes))) {
2337 // MI has two-address uses of the same register. If the use
2338 // isn't the first and only use in the BB, then we can't fold
2339 // it. FIXME: Move this to rewriteInstructionsForSpills.
2346 // Fold the store into the def if possible.
2347 bool Folded = false;
2348 if (CanFold && !Ops.empty()) {
2349 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2352 // Also folded uses, do not issue a load.
2353 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2354 nI.removeRange(getLoadIndex(index),getUseIndex(index).nextSlot());
2356 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2360 // Otherwise tell the spiller to issue a spill.
2362 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2363 bool isKill = LR->end == getStoreIndex(index);
2364 if (!MI->registerDefIsDead(nI.reg))
2365 // No need to spill a dead def.
2366 vrm.addSpillPoint(VReg, isKill, MI);
2368 AddedKill.insert(&nI);
2371 Id = SpillMBBs.find_next(Id);
2375 int Id = RestoreMBBs.find_first();
2377 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2378 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2379 MachineInstrIndex index = restores[i].index;
2380 if (index == MachineInstrIndex())
2382 unsigned VReg = restores[i].vreg;
2383 LiveInterval &nI = getOrCreateInterval(VReg);
2384 bool isReMat = vrm.isReMaterialized(VReg);
2385 MachineInstr *MI = getInstructionFromIndex(index);
2386 bool CanFold = false;
2388 if (restores[i].canFold) {
2390 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2391 MachineOperand &MO = MI->getOperand(j);
2392 if (!MO.isReg() || MO.getReg() != VReg)
2396 // If this restore were to be folded, it would have been folded
2405 // Fold the load into the use if possible.
2406 bool Folded = false;
2407 if (CanFold && !Ops.empty()) {
2409 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2411 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2413 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2414 // If the rematerializable def is a load, also try to fold it.
2415 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2416 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2417 Ops, isLoadSS, LdSlot, VReg);
2419 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2421 // Re-matting an instruction with virtual register use. Add the
2422 // register as an implicit use on the use MI and update the register
2423 // interval's spill weight to HUGE_VALF to prevent it from being
2425 LiveInterval &ImpLi = getInterval(ImpUse);
2426 ImpLi.weight = HUGE_VALF;
2427 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2432 // If folding is not possible / failed, then tell the spiller to issue a
2433 // load / rematerialization for us.
2435 nI.removeRange(getLoadIndex(index), getUseIndex(index).nextSlot());
2437 vrm.addRestorePoint(VReg, MI);
2439 Id = RestoreMBBs.find_next(Id);
2442 // Finalize intervals: add kills, finalize spill weights, and filter out
2444 std::vector<LiveInterval*> RetNewLIs;
2445 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2446 LiveInterval *LI = NewLIs[i];
2448 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2449 if (!AddedKill.count(LI)) {
2450 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2451 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2452 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2453 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2454 assert(UseIdx != -1);
2455 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2456 LastUse->getOperand(UseIdx).setIsKill();
2457 vrm.addKillPoint(LI->reg, LastUseIdx);
2460 RetNewLIs.push_back(LI);
2464 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2468 /// hasAllocatableSuperReg - Return true if the specified physical register has
2469 /// any super register that's allocatable.
2470 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2471 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2472 if (allocatableRegs_[*AS] && hasInterval(*AS))
2477 /// getRepresentativeReg - Find the largest super register of the specified
2478 /// physical register.
2479 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2480 // Find the largest super-register that is allocatable.
2481 unsigned BestReg = Reg;
2482 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2483 unsigned SuperReg = *AS;
2484 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2492 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2493 /// specified interval that conflicts with the specified physical register.
2494 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2495 unsigned PhysReg) const {
2496 unsigned NumConflicts = 0;
2497 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2498 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2499 E = mri_->reg_end(); I != E; ++I) {
2500 MachineOperand &O = I.getOperand();
2501 MachineInstr *MI = O.getParent();
2502 MachineInstrIndex Index = getInstructionIndex(MI);
2503 if (pli.liveAt(Index))
2506 return NumConflicts;
2509 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2510 /// around all defs and uses of the specified interval. Return true if it
2511 /// was able to cut its interval.
2512 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2513 unsigned PhysReg, VirtRegMap &vrm) {
2514 unsigned SpillReg = getRepresentativeReg(PhysReg);
2516 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2517 // If there are registers which alias PhysReg, but which are not a
2518 // sub-register of the chosen representative super register. Assert
2519 // since we can't handle it yet.
2520 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2521 tri_->isSuperRegister(*AS, SpillReg));
2524 LiveInterval &pli = getInterval(SpillReg);
2525 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2526 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2527 E = mri_->reg_end(); I != E; ++I) {
2528 MachineOperand &O = I.getOperand();
2529 MachineInstr *MI = O.getParent();
2530 if (SeenMIs.count(MI))
2533 MachineInstrIndex Index = getInstructionIndex(MI);
2534 if (pli.liveAt(Index)) {
2535 vrm.addEmergencySpill(SpillReg, MI);
2536 MachineInstrIndex StartIdx = getLoadIndex(Index);
2537 MachineInstrIndex EndIdx = getStoreIndex(Index).nextSlot();
2538 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2539 pli.removeRange(StartIdx, EndIdx);
2543 raw_string_ostream Msg(msg);
2544 Msg << "Ran out of registers during register allocation!";
2545 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2546 Msg << "\nPlease check your inline asm statement for invalid "
2547 << "constraints:\n";
2548 MI->print(Msg, tm_);
2550 llvm_report_error(Msg.str());
2552 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2553 if (!hasInterval(*AS))
2555 LiveInterval &spli = getInterval(*AS);
2556 if (spli.liveAt(Index))
2557 spli.removeRange(getLoadIndex(Index),
2558 getStoreIndex(Index).nextSlot());
2565 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2566 MachineInstr* startInst) {
2567 LiveInterval& Interval = getOrCreateInterval(reg);
2568 VNInfo* VN = Interval.getNextValue(
2569 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2570 startInst, true, getVNInfoAllocator());
2571 VN->setHasPHIKill(true);
2572 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2574 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2575 getMBBEndIdx(startInst->getParent()).nextSlot(), VN);
2576 Interval.addRange(LR);