1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/PseudoSourceValue.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
58 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
59 cl::init(-1), cl::Hidden);
61 STATISTIC(numIntervals , "Number of original intervals");
62 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
63 STATISTIC(numSplits , "Number of intervals split");
64 STATISTIC(numCoalescing, "Number of early coalescing performed");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 MachineFunctionPass::getAnalysisUsage(AU);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90 E = r2iMap_.end(); I != E; ++I)
98 terminatorGaps.clear();
99 phiJoinCopies.clear();
101 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
111 unsigned OpIdx, const TargetInstrInfo *tii_){
112 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
113 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
117 if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
119 if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
124 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
125 /// there is one implicit_def for each use. Add isUndef marker to
126 /// implicit_def defs and their uses.
127 void LiveIntervals::processImplicitDefs() {
128 SmallSet<unsigned, 8> ImpDefRegs;
129 SmallVector<MachineInstr*, 8> ImpDefMIs;
130 MachineBasicBlock *Entry = mf_->begin();
131 SmallPtrSet<MachineBasicBlock*,16> Visited;
132 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
133 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
135 MachineBasicBlock *MBB = *DFI;
136 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
138 MachineInstr *MI = &*I;
140 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
141 unsigned Reg = MI->getOperand(0).getReg();
142 ImpDefRegs.insert(Reg);
143 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
144 for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
145 ImpDefRegs.insert(*SS);
147 ImpDefMIs.push_back(MI);
151 if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
152 MachineOperand &MO = MI->getOperand(2);
153 if (ImpDefRegs.count(MO.getReg())) {
154 // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
155 // This is an identity copy, eliminate it now.
157 LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
160 MI->eraseFromParent();
165 bool ChangedToImpDef = false;
166 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
167 MachineOperand& MO = MI->getOperand(i);
168 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
170 unsigned Reg = MO.getReg();
173 if (!ImpDefRegs.count(Reg))
175 // Use is a copy, just turn it into an implicit_def.
176 if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
177 bool isKill = MO.isKill();
178 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
179 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
180 MI->RemoveOperand(j);
182 ImpDefRegs.erase(Reg);
183 LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
186 ChangedToImpDef = true;
191 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
192 // Make sure other uses of
193 for (unsigned j = i+1; j != e; ++j) {
194 MachineOperand &MOJ = MI->getOperand(j);
195 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
198 ImpDefRegs.erase(Reg);
202 if (ChangedToImpDef) {
203 // Backtrack to process this new implicit_def.
206 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
207 MachineOperand& MO = MI->getOperand(i);
208 if (!MO.isReg() || !MO.isDef())
210 ImpDefRegs.erase(MO.getReg());
215 // Any outstanding liveout implicit_def's?
216 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
217 MachineInstr *MI = ImpDefMIs[i];
218 unsigned Reg = MI->getOperand(0).getReg();
219 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
220 !ImpDefRegs.count(Reg)) {
221 // Delete all "local" implicit_def's. That include those which define
222 // physical registers since they cannot be liveout.
223 MI->eraseFromParent();
227 // If there are multiple defs of the same register and at least one
228 // is not an implicit_def, do not insert implicit_def's before the
231 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
232 DE = mri_->def_end(); DI != DE; ++DI) {
233 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
241 // The only implicit_def which we want to keep are those that are live
243 MI->eraseFromParent();
245 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
246 UE = mri_->use_end(); UI != UE; ) {
247 MachineOperand &RMO = UI.getOperand();
248 MachineInstr *RMI = &*UI;
250 MachineBasicBlock *RMBB = RMI->getParent();
254 // Turn a copy use into an implicit_def.
255 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
256 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
258 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
259 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
260 RMI->RemoveOperand(j);
264 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
265 unsigned NewVReg = mri_->createVirtualRegister(RC);
277 void LiveIntervals::computeNumbering() {
278 Index2MiMap OldI2MI = i2miMap_;
279 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
285 terminatorGaps.clear();
286 phiJoinCopies.clear();
290 // Number MachineInstrs and MachineBasicBlocks.
291 // Initialize MBB indexes to a sentinal.
292 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
293 std::make_pair(LiveIndex(),LiveIndex()));
296 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
298 LiveIndex StartIdx = MIIndex;
300 // Insert an empty slot at the beginning of each block.
301 MIIndex = getNextIndex(MIIndex);
302 i2miMap_.push_back(0);
304 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
307 if (I == MBB->getFirstTerminator()) {
308 // Leave a gap for before terminators, this is where we will point
310 LiveIndex tGap(true, MIIndex);
312 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
314 "Multiple 'first' terminators encountered during numbering.");
315 inserted = inserted; // Avoid compiler warning if assertions turned off.
316 i2miMap_.push_back(0);
318 MIIndex = getNextIndex(MIIndex);
321 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
322 assert(inserted && "multiple MachineInstr -> index mappings");
324 i2miMap_.push_back(I);
325 MIIndex = getNextIndex(MIIndex);
328 // Insert max(1, numdefs) empty slots after every instruction.
329 unsigned Slots = I->getDesc().getNumDefs();
333 MIIndex = getNextIndex(MIIndex);
334 i2miMap_.push_back(0);
339 if (MBB->getFirstTerminator() == MBB->end()) {
340 // Leave a gap for before terminators, this is where we will point
342 LiveIndex tGap(true, MIIndex);
344 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
346 "Multiple 'first' terminators encountered during numbering.");
347 inserted = inserted; // Avoid compiler warning if assertions turned off.
348 i2miMap_.push_back(0);
350 MIIndex = getNextIndex(MIIndex);
353 // Set the MBB2IdxMap entry for this MBB.
354 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
355 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
358 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
360 if (!OldI2MI.empty())
361 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
362 for (LiveInterval::iterator LI = OI->second->begin(),
363 LE = OI->second->end(); LI != LE; ++LI) {
365 // Remap the start index of the live range to the corresponding new
366 // number, or our best guess at what it _should_ correspond to if the
367 // original instruction has been erased. This is either the following
368 // instruction or its predecessor.
369 unsigned index = LI->start.getVecIndex();
370 LiveIndex::Slot offset = LI->start.getSlot();
371 if (LI->start.isLoad()) {
372 std::vector<IdxMBBPair>::const_iterator I =
373 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
374 // Take the pair containing the index
375 std::vector<IdxMBBPair>::const_iterator J =
376 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
378 LI->start = getMBBStartIdx(J->second);
380 LI->start = LiveIndex(
381 LiveIndex(mi2iMap_[OldI2MI[index]]),
382 (LiveIndex::Slot)offset);
385 // Remap the ending index in the same way that we remapped the start,
386 // except for the final step where we always map to the immediately
387 // following instruction.
388 index = (getPrevSlot(LI->end)).getVecIndex();
389 offset = LI->end.getSlot();
390 if (LI->end.isLoad()) {
391 // VReg dies at end of block.
392 std::vector<IdxMBBPair>::const_iterator I =
393 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
396 LI->end = getNextSlot(getMBBEndIdx(I->second));
398 unsigned idx = index;
399 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
401 if (index != OldI2MI.size())
403 LiveIndex(mi2iMap_[OldI2MI[index]],
404 (idx == index ? offset : LiveIndex::LOAD));
407 LiveIndex(LiveIndex::NUM * i2miMap_.size());
411 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
412 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
415 // Remap the VNInfo def index, which works the same as the
416 // start indices above. VN's with special sentinel defs
417 // don't need to be remapped.
418 if (vni->isDefAccurate() && !vni->isUnused()) {
419 unsigned index = vni->def.getVecIndex();
420 LiveIndex::Slot offset = vni->def.getSlot();
421 if (vni->def.isLoad()) {
422 std::vector<IdxMBBPair>::const_iterator I =
423 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
424 // Take the pair containing the index
425 std::vector<IdxMBBPair>::const_iterator J =
426 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
428 vni->def = getMBBStartIdx(J->second);
430 vni->def = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
434 // Remap the VNInfo kill indices, which works the same as
435 // the end indices above.
436 for (size_t i = 0; i < vni->kills.size(); ++i) {
437 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
438 LiveIndex::Slot offset = vni->kills[i].getSlot();
440 if (vni->kills[i].isLoad()) {
441 assert("Value killed at a load slot.");
442 /*std::vector<IdxMBBPair>::const_iterator I =
443 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
446 vni->kills[i] = getMBBEndIdx(I->second);*/
448 if (vni->kills[i].isPHIIndex()) {
449 std::vector<IdxMBBPair>::const_iterator I =
450 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
452 vni->kills[i] = terminatorGaps[I->second];
454 assert(OldI2MI[index] != 0 &&
455 "Kill refers to instruction not present in index maps.");
456 vni->kills[i] = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
460 unsigned idx = index;
461 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
463 if (index != OldI2MI.size())
464 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
465 (idx == index ? offset : 0);
467 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
475 void LiveIntervals::scaleNumbering(int factor) {
477 // * scale MBB begin and end points
478 // * scale all ranges.
479 // * Update VNI structures.
480 // * Scale instruction numberings
482 // Scale the MBB indices.
484 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
485 MBB != MBBE; ++MBB) {
486 std::pair<LiveIndex, LiveIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
487 mbbIndices.first = mbbIndices.first.scale(factor);
488 mbbIndices.second = mbbIndices.second.scale(factor);
489 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
491 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
493 // Scale terminator gaps.
494 for (DenseMap<MachineBasicBlock*, LiveIndex>::iterator
495 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
497 terminatorGaps[TGI->first] = TGI->second.scale(factor);
500 // Scale the intervals.
501 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
502 LI->second->scaleNumbering(factor);
505 // Scale MachineInstrs.
506 Mi2IndexMap oldmi2iMap = mi2iMap_;
507 LiveIndex highestSlot;
508 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
510 LiveIndex newSlot = MI->second.scale(factor);
511 mi2iMap_[MI->first] = newSlot;
512 highestSlot = std::max(highestSlot, newSlot);
515 unsigned highestVIndex = highestSlot.getVecIndex();
517 i2miMap_.resize(highestVIndex + 1);
518 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
520 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
526 /// runOnMachineFunction - Register allocate the whole function
528 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
530 mri_ = &mf_->getRegInfo();
531 tm_ = &fn.getTarget();
532 tri_ = tm_->getRegisterInfo();
533 tii_ = tm_->getInstrInfo();
534 aa_ = &getAnalysis<AliasAnalysis>();
535 lv_ = &getAnalysis<LiveVariables>();
536 allocatableRegs_ = tri_->getAllocatableSet(fn);
538 processImplicitDefs();
541 performEarlyCoalescing();
543 numIntervals += getNumIntervals();
549 /// print - Implement the dump method.
550 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
551 OS << "********** INTERVALS **********\n";
552 for (const_iterator I = begin(), E = end(); I != E; ++I) {
553 I->second->print(OS, tri_);
560 void LiveIntervals::printInstrs(raw_ostream &OS) const {
561 OS << "********** MACHINEINSTRS **********\n";
563 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
564 mbbi != mbbe; ++mbbi) {
565 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
566 for (MachineBasicBlock::iterator mii = mbbi->begin(),
567 mie = mbbi->end(); mii != mie; ++mii) {
568 OS << getInstructionIndex(mii) << '\t' << *mii;
573 void LiveIntervals::dumpInstrs() const {
577 /// conflictsWithPhysRegDef - Returns true if the specified register
578 /// is defined during the duration of the specified interval.
579 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
580 VirtRegMap &vrm, unsigned reg) {
581 for (LiveInterval::Ranges::const_iterator
582 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
583 for (LiveIndex index = getBaseIndex(I->start),
584 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
585 index = getNextIndex(index)) {
586 // skip deleted instructions
587 while (index != end && !getInstructionFromIndex(index))
588 index = getNextIndex(index);
589 if (index == end) break;
591 MachineInstr *MI = getInstructionFromIndex(index);
592 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
593 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
594 if (SrcReg == li.reg || DstReg == li.reg)
596 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
597 MachineOperand& mop = MI->getOperand(i);
600 unsigned PhysReg = mop.getReg();
601 if (PhysReg == 0 || PhysReg == li.reg)
603 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
604 if (!vrm.hasPhys(PhysReg))
606 PhysReg = vrm.getPhys(PhysReg);
608 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
617 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
618 /// it can check use as well.
619 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
620 unsigned Reg, bool CheckUse,
621 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
622 for (LiveInterval::Ranges::const_iterator
623 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
624 for (LiveIndex index = getBaseIndex(I->start),
625 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
626 index = getNextIndex(index)) {
627 // Skip deleted instructions.
628 MachineInstr *MI = 0;
629 while (index != end) {
630 MI = getInstructionFromIndex(index);
633 index = getNextIndex(index);
635 if (index == end) break;
637 if (JoinedCopies.count(MI))
639 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
640 MachineOperand& MO = MI->getOperand(i);
643 if (MO.isUse() && !CheckUse)
645 unsigned PhysReg = MO.getReg();
646 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
648 if (tri_->isSubRegister(Reg, PhysReg))
658 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
659 if (TargetRegisterInfo::isPhysicalRegister(reg))
660 errs() << tri_->getName(reg);
662 errs() << "%reg" << reg;
666 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
667 MachineBasicBlock::iterator mi,
671 LiveInterval &interval) {
673 errs() << "\t\tregister: ";
674 printRegName(interval.reg, tri_);
677 // Virtual registers may be defined multiple times (due to phi
678 // elimination and 2-addr elimination). Much of what we do only has to be
679 // done once for the vreg. We use an empty interval to detect the first
680 // time we see a vreg.
681 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
682 if (interval.empty()) {
683 // Get the Idx of the defining instructions.
684 LiveIndex defIndex = getDefIndex(MIIdx);
685 // Earlyclobbers move back one, so that they overlap the live range
687 if (MO.isEarlyClobber())
688 defIndex = getUseIndex(MIIdx);
690 MachineInstr *CopyMI = NULL;
691 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
692 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
693 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
694 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
695 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
697 // Earlyclobbers move back one.
698 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
700 assert(ValNo->id == 0 && "First value in interval is not 0?");
702 // Loop over all of the blocks that the vreg is defined in. There are
703 // two cases we have to handle here. The most common case is a vreg
704 // whose lifetime is contained within a basic block. In this case there
705 // will be a single kill, in MBB, which comes after the definition.
706 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
707 // FIXME: what about dead vars?
709 if (vi.Kills[0] != mi)
710 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
711 else if (MO.isEarlyClobber())
712 // Earlyclobbers that die in this instruction move up one extra, to
713 // compensate for having the starting point moved back one. This
714 // gets them to overlap the live range of other outputs.
715 killIdx = getNextSlot(getNextSlot(defIndex));
717 killIdx = getNextSlot(defIndex);
719 // If the kill happens after the definition, we have an intra-block
721 if (killIdx > defIndex) {
722 assert(vi.AliveBlocks.empty() &&
723 "Shouldn't be alive across any blocks!");
724 LiveRange LR(defIndex, killIdx, ValNo);
725 interval.addRange(LR);
726 DEBUG(errs() << " +" << LR << "\n");
727 ValNo->addKill(killIdx);
732 // The other case we handle is when a virtual register lives to the end
733 // of the defining block, potentially live across some blocks, then is
734 // live into some number of blocks, but gets killed. Start by adding a
735 // range that goes from this definition to the end of the defining block.
736 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
737 DEBUG(errs() << " +" << NewLR);
738 interval.addRange(NewLR);
740 // Iterate over all of the blocks that the variable is completely
741 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
743 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
744 E = vi.AliveBlocks.end(); I != E; ++I) {
745 LiveRange LR(getMBBStartIdx(*I),
746 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
748 interval.addRange(LR);
749 DEBUG(errs() << " +" << LR);
752 // Finally, this virtual register is live from the start of any killing
753 // block to the 'use' slot of the killing instruction.
754 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
755 MachineInstr *Kill = vi.Kills[i];
757 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
758 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
759 interval.addRange(LR);
760 ValNo->addKill(killIdx);
761 DEBUG(errs() << " +" << LR);
765 // If this is the second time we see a virtual register definition, it
766 // must be due to phi elimination or two addr elimination. If this is
767 // the result of two address elimination, then the vreg is one of the
768 // def-and-use register operand.
769 if (mi->isRegTiedToUseOperand(MOIdx)) {
770 // If this is a two-address definition, then we have already processed
771 // the live range. The only problem is that we didn't realize there
772 // are actually two values in the live interval. Because of this we
773 // need to take the LiveRegion that defines this register and split it
775 assert(interval.containsOneValue());
776 LiveIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
777 LiveIndex RedefIndex = getDefIndex(MIIdx);
778 if (MO.isEarlyClobber())
779 RedefIndex = getUseIndex(MIIdx);
781 const LiveRange *OldLR =
782 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
783 VNInfo *OldValNo = OldLR->valno;
785 // Delete the initial value, which should be short and continuous,
786 // because the 2-addr copy must be in the same MBB as the redef.
787 interval.removeRange(DefIndex, RedefIndex);
789 // Two-address vregs should always only be redefined once. This means
790 // that at this point, there should be exactly one value number in it.
791 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
793 // The new value number (#1) is defined by the instruction we claimed
795 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
796 false, // update at *
798 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
800 // Value#0 is now defined by the 2-addr instruction.
801 OldValNo->def = RedefIndex;
802 OldValNo->setCopy(0);
803 if (MO.isEarlyClobber())
804 OldValNo->setHasRedefByEC(true);
806 // Add the new live interval which replaces the range for the input copy.
807 LiveRange LR(DefIndex, RedefIndex, ValNo);
808 DEBUG(errs() << " replace range with " << LR);
809 interval.addRange(LR);
810 ValNo->addKill(RedefIndex);
812 // If this redefinition is dead, we need to add a dummy unit live
813 // range covering the def slot.
816 LiveRange(RedefIndex, MO.isEarlyClobber() ?
817 getNextSlot(getNextSlot(RedefIndex)) :
818 getNextSlot(RedefIndex), OldValNo));
821 errs() << " RESULT: ";
822 interval.print(errs(), tri_);
825 // Otherwise, this must be because of phi elimination. If this is the
826 // first redefinition of the vreg that we have seen, go back and change
827 // the live range in the PHI block to be a different value number.
828 if (interval.containsOneValue()) {
829 // Remove the old range that we now know has an incorrect number.
830 VNInfo *VNI = interval.getValNumInfo(0);
831 MachineInstr *Killer = vi.Kills[0];
832 phiJoinCopies.push_back(Killer);
833 LiveIndex Start = getMBBStartIdx(Killer->getParent());
835 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
837 errs() << " Removing [" << Start << "," << End << "] from: ";
838 interval.print(errs(), tri_);
841 interval.removeRange(Start, End);
842 assert(interval.ranges.size() == 1 &&
843 "Newly discovered PHI interval has >1 ranges.");
844 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
845 VNI->addKill(terminatorGaps[killMBB]);
846 VNI->setHasPHIKill(true);
848 errs() << " RESULT: ";
849 interval.print(errs(), tri_);
852 // Replace the interval with one of a NEW value number. Note that this
853 // value number isn't actually defined by an instruction, weird huh? :)
854 LiveRange LR(Start, End,
855 interval.getNextValue(LiveIndex(mbb->getNumber()),
856 0, false, VNInfoAllocator));
857 LR.valno->setIsPHIDef(true);
858 DEBUG(errs() << " replace range with " << LR);
859 interval.addRange(LR);
860 LR.valno->addKill(End);
862 errs() << " RESULT: ";
863 interval.print(errs(), tri_);
867 // In the case of PHI elimination, each variable definition is only
868 // live until the end of the block. We've already taken care of the
869 // rest of the live range.
870 LiveIndex defIndex = getDefIndex(MIIdx);
871 if (MO.isEarlyClobber())
872 defIndex = getUseIndex(MIIdx);
875 MachineInstr *CopyMI = NULL;
876 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
877 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
878 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
879 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
880 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
882 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
884 LiveIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
885 LiveRange LR(defIndex, killIndex, ValNo);
886 interval.addRange(LR);
887 ValNo->addKill(terminatorGaps[mbb]);
888 ValNo->setHasPHIKill(true);
889 DEBUG(errs() << " +" << LR);
893 DEBUG(errs() << '\n');
896 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
897 MachineBasicBlock::iterator mi,
900 LiveInterval &interval,
901 MachineInstr *CopyMI) {
902 // A physical register cannot be live across basic block, so its
903 // lifetime must end somewhere in its defining basic block.
905 errs() << "\t\tregister: ";
906 printRegName(interval.reg, tri_);
909 LiveIndex baseIndex = MIIdx;
910 LiveIndex start = getDefIndex(baseIndex);
911 // Earlyclobbers move back one.
912 if (MO.isEarlyClobber())
913 start = getUseIndex(MIIdx);
914 LiveIndex end = start;
916 // If it is not used after definition, it is considered dead at
917 // the instruction defining it. Hence its interval is:
918 // [defSlot(def), defSlot(def)+1)
919 // For earlyclobbers, the defSlot was pushed back one; the extra
920 // advance below compensates.
922 DEBUG(errs() << " dead");
923 if (MO.isEarlyClobber())
924 end = getNextSlot(getNextSlot(start));
926 end = getNextSlot(start);
930 // If it is not dead on definition, it must be killed by a
931 // subsequent instruction. Hence its interval is:
932 // [defSlot(def), useSlot(kill)+1)
933 baseIndex = getNextIndex(baseIndex);
934 while (++mi != MBB->end()) {
935 while (baseIndex.getVecIndex() < i2miMap_.size() &&
936 getInstructionFromIndex(baseIndex) == 0)
937 baseIndex = getNextIndex(baseIndex);
938 if (mi->killsRegister(interval.reg, tri_)) {
939 DEBUG(errs() << " killed");
940 end = getNextSlot(getUseIndex(baseIndex));
943 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
945 if (mi->isRegTiedToUseOperand(DefIdx)) {
946 // Two-address instruction.
947 end = getDefIndex(baseIndex);
948 if (mi->getOperand(DefIdx).isEarlyClobber())
949 end = getUseIndex(baseIndex);
951 // Another instruction redefines the register before it is ever read.
952 // Then the register is essentially dead at the instruction that defines
953 // it. Hence its interval is:
954 // [defSlot(def), defSlot(def)+1)
955 DEBUG(errs() << " dead");
956 end = getNextSlot(start);
962 baseIndex = getNextIndex(baseIndex);
965 // The only case we should have a dead physreg here without a killing or
966 // instruction where we know it's dead is if it is live-in to the function
967 // and never used. Another possible case is the implicit use of the
968 // physical register has been deleted by two-address pass.
969 end = getNextSlot(start);
972 assert(start < end && "did not find end of interval?");
974 // Already exists? Extend old live interval.
975 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
976 bool Extend = OldLR != interval.end();
977 VNInfo *ValNo = Extend
978 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
979 if (MO.isEarlyClobber() && Extend)
980 ValNo->setHasRedefByEC(true);
981 LiveRange LR(start, end, ValNo);
982 interval.addRange(LR);
983 LR.valno->addKill(end);
984 DEBUG(errs() << " +" << LR << '\n');
987 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
988 MachineBasicBlock::iterator MI,
992 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
993 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
994 getOrCreateInterval(MO.getReg()));
995 else if (allocatableRegs_[MO.getReg()]) {
996 MachineInstr *CopyMI = NULL;
997 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
998 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
999 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1000 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
1001 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1003 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1004 getOrCreateInterval(MO.getReg()), CopyMI);
1005 // Def of a register also defines its sub-registers.
1006 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1007 // If MI also modifies the sub-register explicitly, avoid processing it
1008 // more than once. Do not pass in TRI here so it checks for exact match.
1009 if (!MI->modifiesRegister(*AS))
1010 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1011 getOrCreateInterval(*AS), 0);
1015 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1017 LiveInterval &interval, bool isAlias) {
1019 errs() << "\t\tlivein register: ";
1020 printRegName(interval.reg, tri_);
1023 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1024 // be considered a livein.
1025 MachineBasicBlock::iterator mi = MBB->begin();
1026 LiveIndex baseIndex = MIIdx;
1027 LiveIndex start = baseIndex;
1028 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1029 getInstructionFromIndex(baseIndex) == 0)
1030 baseIndex = getNextIndex(baseIndex);
1031 LiveIndex end = baseIndex;
1032 bool SeenDefUse = false;
1034 while (mi != MBB->end()) {
1035 if (mi->killsRegister(interval.reg, tri_)) {
1036 DEBUG(errs() << " killed");
1037 end = getNextSlot(getUseIndex(baseIndex));
1040 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1041 // Another instruction redefines the register before it is ever read.
1042 // Then the register is essentially dead at the instruction that defines
1043 // it. Hence its interval is:
1044 // [defSlot(def), defSlot(def)+1)
1045 DEBUG(errs() << " dead");
1046 end = getNextSlot(getDefIndex(start));
1051 baseIndex = getNextIndex(baseIndex);
1053 if (mi != MBB->end()) {
1054 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1055 getInstructionFromIndex(baseIndex) == 0)
1056 baseIndex = getNextIndex(baseIndex);
1060 // Live-in register might not be used at all.
1063 DEBUG(errs() << " dead");
1064 end = getNextSlot(getDefIndex(MIIdx));
1066 DEBUG(errs() << " live through");
1072 interval.getNextValue(LiveIndex(MBB->getNumber()),
1073 0, false, VNInfoAllocator);
1074 vni->setIsPHIDef(true);
1075 LiveRange LR(start, end, vni);
1077 interval.addRange(LR);
1078 LR.valno->addKill(end);
1079 DEBUG(errs() << " +" << LR << '\n');
1083 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1084 SmallVector<MachineInstr*,16> &IdentCopies,
1085 SmallVector<MachineInstr*,16> &OtherCopies) {
1086 bool HaveConflict = false;
1087 unsigned NumIdent = 0;
1088 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
1089 re = mri_->def_end(); ri != re; ++ri) {
1090 MachineInstr *MI = &*ri;
1091 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1092 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1094 if (SrcReg != DstInt.reg) {
1095 OtherCopies.push_back(MI);
1096 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1098 IdentCopies.push_back(MI);
1104 return false; // Let coalescer handle it
1105 return IdentCopies.size() > OtherCopies.size();
1108 void LiveIntervals::performEarlyCoalescing() {
1109 if (!EarlyCoalescing)
1112 /// Perform early coalescing: eliminate copies which feed into phi joins
1113 /// and whose sources are defined by the phi joins.
1114 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1115 MachineInstr *Join = phiJoinCopies[i];
1116 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1119 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1120 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1122 assert(isMove && "PHI join instruction must be a move!");
1127 LiveInterval &DstInt = getInterval(PHIDst);
1128 LiveInterval &SrcInt = getInterval(PHISrc);
1129 SmallVector<MachineInstr*, 16> IdentCopies;
1130 SmallVector<MachineInstr*, 16> OtherCopies;
1131 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1134 DEBUG(errs() << "PHI Join: " << *Join);
1135 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1136 VNInfo *VNI = DstInt.getValNumInfo(0);
1138 // Change the non-identity copies to directly target the phi destination.
1139 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1140 MachineInstr *PHICopy = OtherCopies[i];
1141 DEBUG(errs() << "Moving: " << *PHICopy);
1143 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1144 LiveIndex DefIndex = getDefIndex(MIIndex);
1145 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1146 LiveIndex StartIndex = SLR->start;
1147 LiveIndex EndIndex = SLR->end;
1149 // Delete val# defined by the now identity copy and add the range from
1150 // beginning of the mbb to the end of the range.
1151 SrcInt.removeValNo(SLR->valno);
1152 DEBUG(errs() << " added range [" << StartIndex << ','
1153 << EndIndex << "] to reg" << DstInt.reg << '\n');
1154 if (DstInt.liveAt(StartIndex))
1155 DstInt.removeRange(StartIndex, EndIndex);
1156 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1158 NewVNI->setHasPHIKill(true);
1159 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1160 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1161 MachineOperand &MO = PHICopy->getOperand(j);
1162 if (!MO.isReg() || MO.getReg() != PHISrc)
1168 // Now let's eliminate all the would-be identity copies.
1169 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1170 MachineInstr *PHICopy = IdentCopies[i];
1171 DEBUG(errs() << "Coalescing: " << *PHICopy);
1173 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1174 LiveIndex DefIndex = getDefIndex(MIIndex);
1175 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1176 LiveIndex StartIndex = SLR->start;
1177 LiveIndex EndIndex = SLR->end;
1179 // Delete val# defined by the now identity copy and add the range from
1180 // beginning of the mbb to the end of the range.
1181 SrcInt.removeValNo(SLR->valno);
1182 RemoveMachineInstrFromMaps(PHICopy);
1183 PHICopy->eraseFromParent();
1184 DEBUG(errs() << " added range [" << StartIndex << ','
1185 << EndIndex << "] to reg" << DstInt.reg << '\n');
1186 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1189 // Remove the phi join and update the phi block liveness.
1190 LiveIndex MIIndex = getInstructionIndex(Join);
1191 LiveIndex UseIndex = getUseIndex(MIIndex);
1192 LiveIndex DefIndex = getDefIndex(MIIndex);
1193 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1194 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1195 DLR->valno->setCopy(0);
1196 DLR->valno->setIsDefAccurate(false);
1197 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1198 SrcInt.removeRange(SLR->start, SLR->end);
1199 assert(SrcInt.empty());
1200 removeInterval(PHISrc);
1201 RemoveMachineInstrFromMaps(Join);
1202 Join->eraseFromParent();
1208 /// computeIntervals - computes the live intervals for virtual
1209 /// registers. for some ordering of the machine instructions [1,N] a
1210 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1211 /// which a variable is live
1212 void LiveIntervals::computeIntervals() {
1213 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1214 << "********** Function: "
1215 << ((Value*)mf_->getFunction())->getName() << '\n');
1217 SmallVector<unsigned, 8> UndefUses;
1218 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1219 MBBI != E; ++MBBI) {
1220 MachineBasicBlock *MBB = MBBI;
1221 // Track the index of the current machine instr.
1222 LiveIndex MIIndex = getMBBStartIdx(MBB);
1223 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1225 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1227 // Create intervals for live-ins to this BB first.
1228 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1229 LE = MBB->livein_end(); LI != LE; ++LI) {
1230 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1231 // Multiple live-ins can alias the same register.
1232 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1233 if (!hasInterval(*AS))
1234 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1238 // Skip over empty initial indices.
1239 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1240 getInstructionFromIndex(MIIndex) == 0)
1241 MIIndex = getNextIndex(MIIndex);
1243 for (; MI != miEnd; ++MI) {
1244 DEBUG(errs() << MIIndex << "\t" << *MI);
1247 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1248 MachineOperand &MO = MI->getOperand(i);
1249 if (!MO.isReg() || !MO.getReg())
1252 // handle register defs - build intervals
1254 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1255 else if (MO.isUndef())
1256 UndefUses.push_back(MO.getReg());
1259 // Skip over the empty slots after each instruction.
1260 unsigned Slots = MI->getDesc().getNumDefs();
1265 MIIndex = getNextIndex(MIIndex);
1267 // Skip over empty indices.
1268 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1269 getInstructionFromIndex(MIIndex) == 0)
1270 MIIndex = getNextIndex(MIIndex);
1274 // Create empty intervals for registers defined by implicit_def's (except
1275 // for those implicit_def that define values which are liveout of their
1277 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1278 unsigned UndefReg = UndefUses[i];
1279 (void)getOrCreateInterval(UndefReg);
1283 bool LiveIntervals::findLiveInMBBs(
1284 LiveIndex Start, LiveIndex End,
1285 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1286 std::vector<IdxMBBPair>::const_iterator I =
1287 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1289 bool ResVal = false;
1290 while (I != Idx2MBBMap.end()) {
1291 if (I->first >= End)
1293 MBBs.push_back(I->second);
1300 bool LiveIntervals::findReachableMBBs(
1301 LiveIndex Start, LiveIndex End,
1302 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1303 std::vector<IdxMBBPair>::const_iterator I =
1304 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1306 bool ResVal = false;
1307 while (I != Idx2MBBMap.end()) {
1310 MachineBasicBlock *MBB = I->second;
1311 if (getMBBEndIdx(MBB) > End)
1313 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1314 SE = MBB->succ_end(); SI != SE; ++SI)
1315 MBBs.push_back(*SI);
1322 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1323 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1324 return new LiveInterval(reg, Weight);
1327 /// dupInterval - Duplicate a live interval. The caller is responsible for
1328 /// managing the allocated memory.
1329 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1330 LiveInterval *NewLI = createInterval(li->reg);
1331 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1335 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1336 /// copy field and returns the source register that defines it.
1337 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1338 if (!VNI->getCopy())
1341 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1342 // If it's extracting out of a physical register, return the sub-register.
1343 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1344 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1345 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1347 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1348 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1349 return VNI->getCopy()->getOperand(2).getReg();
1351 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1352 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1354 llvm_unreachable("Unrecognized copy instruction!");
1358 //===----------------------------------------------------------------------===//
1359 // Register allocator hooks.
1362 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1363 /// allow one) virtual register operand, then its uses are implicitly using
1364 /// the register. Returns the virtual register.
1365 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1366 MachineInstr *MI) const {
1368 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1369 MachineOperand &MO = MI->getOperand(i);
1370 if (!MO.isReg() || !MO.isUse())
1372 unsigned Reg = MO.getReg();
1373 if (Reg == 0 || Reg == li.reg)
1376 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1377 !allocatableRegs_[Reg])
1379 // FIXME: For now, only remat MI with at most one register operand.
1381 "Can't rematerialize instruction with multiple register operand!");
1382 RegOp = MO.getReg();
1390 /// isValNoAvailableAt - Return true if the val# of the specified interval
1391 /// which reaches the given instruction also reaches the specified use index.
1392 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1393 LiveIndex UseIdx) const {
1394 LiveIndex Index = getInstructionIndex(MI);
1395 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1396 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1397 return UI != li.end() && UI->valno == ValNo;
1400 /// isReMaterializable - Returns true if the definition MI of the specified
1401 /// val# of the specified interval is re-materializable.
1402 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1403 const VNInfo *ValNo, MachineInstr *MI,
1404 SmallVectorImpl<LiveInterval*> &SpillIs,
1409 if (!tii_->isTriviallyReMaterializable(MI, aa_))
1412 // Target-specific code can mark an instruction as being rematerializable
1413 // if it has one virtual reg use, though it had better be something like
1414 // a PIC base register which is likely to be live everywhere.
1415 unsigned ImpUse = getReMatImplicitUse(li, MI);
1417 const LiveInterval &ImpLi = getInterval(ImpUse);
1418 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1419 re = mri_->use_end(); ri != re; ++ri) {
1420 MachineInstr *UseMI = &*ri;
1421 LiveIndex UseIdx = getInstructionIndex(UseMI);
1422 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1424 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1428 // If a register operand of the re-materialized instruction is going to
1429 // be spilled next, then it's not legal to re-materialize this instruction.
1430 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1431 if (ImpUse == SpillIs[i]->reg)
1437 /// isReMaterializable - Returns true if the definition MI of the specified
1438 /// val# of the specified interval is re-materializable.
1439 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1440 const VNInfo *ValNo, MachineInstr *MI) {
1441 SmallVector<LiveInterval*, 4> Dummy1;
1443 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1446 /// isReMaterializable - Returns true if every definition of MI of every
1447 /// val# of the specified interval is re-materializable.
1448 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1449 SmallVectorImpl<LiveInterval*> &SpillIs,
1452 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1454 const VNInfo *VNI = *i;
1455 if (VNI->isUnused())
1456 continue; // Dead val#.
1457 // Is the def for the val# rematerializable?
1458 if (!VNI->isDefAccurate())
1460 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1461 bool DefIsLoad = false;
1463 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1465 isLoad |= DefIsLoad;
1470 /// FilterFoldedOps - Filter out two-address use operands. Return
1471 /// true if it finds any issue with the operands that ought to prevent
1473 static bool FilterFoldedOps(MachineInstr *MI,
1474 SmallVector<unsigned, 2> &Ops,
1476 SmallVector<unsigned, 2> &FoldOps) {
1478 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1479 unsigned OpIdx = Ops[i];
1480 MachineOperand &MO = MI->getOperand(OpIdx);
1481 // FIXME: fold subreg use.
1485 MRInfo |= (unsigned)VirtRegMap::isMod;
1487 // Filter out two-address use operand(s).
1488 if (MI->isRegTiedToDefOperand(OpIdx)) {
1489 MRInfo = VirtRegMap::isModRef;
1492 MRInfo |= (unsigned)VirtRegMap::isRef;
1494 FoldOps.push_back(OpIdx);
1500 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1501 /// slot / to reg or any rematerialized load into ith operand of specified
1502 /// MI. If it is successul, MI is updated with the newly created MI and
1504 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1505 VirtRegMap &vrm, MachineInstr *DefMI,
1507 SmallVector<unsigned, 2> &Ops,
1508 bool isSS, int Slot, unsigned Reg) {
1509 // If it is an implicit def instruction, just delete it.
1510 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1511 RemoveMachineInstrFromMaps(MI);
1512 vrm.RemoveMachineInstrFromMaps(MI);
1513 MI->eraseFromParent();
1518 // Filter the list of operand indexes that are to be folded. Abort if
1519 // any operand will prevent folding.
1520 unsigned MRInfo = 0;
1521 SmallVector<unsigned, 2> FoldOps;
1522 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1525 // The only time it's safe to fold into a two address instruction is when
1526 // it's folding reload and spill from / into a spill stack slot.
1527 if (DefMI && (MRInfo & VirtRegMap::isMod))
1530 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1531 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1533 // Remember this instruction uses the spill slot.
1534 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1536 // Attempt to fold the memory reference into the instruction. If
1537 // we can do this, we don't need to insert spill code.
1538 MachineBasicBlock &MBB = *MI->getParent();
1539 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1540 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1541 vrm.transferSpillPts(MI, fmi);
1542 vrm.transferRestorePts(MI, fmi);
1543 vrm.transferEmergencySpills(MI, fmi);
1545 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1546 mi2iMap_[fmi] = InstrIdx;
1547 MI = MBB.insert(MBB.erase(MI), fmi);
1554 /// canFoldMemoryOperand - Returns true if the specified load / store
1555 /// folding is possible.
1556 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1557 SmallVector<unsigned, 2> &Ops,
1559 // Filter the list of operand indexes that are to be folded. Abort if
1560 // any operand will prevent folding.
1561 unsigned MRInfo = 0;
1562 SmallVector<unsigned, 2> FoldOps;
1563 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1566 // It's only legal to remat for a use, not a def.
1567 if (ReMat && (MRInfo & VirtRegMap::isMod))
1570 return tii_->canFoldMemoryOperand(MI, FoldOps);
1573 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1574 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1575 for (LiveInterval::Ranges::const_iterator
1576 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1577 std::vector<IdxMBBPair>::const_iterator II =
1578 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1579 if (II == Idx2MBBMap.end())
1581 if (I->end > II->first) // crossing a MBB.
1583 MBBs.insert(II->second);
1584 if (MBBs.size() > 1)
1590 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1591 /// interval on to-be re-materialized operands of MI) with new register.
1592 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1593 MachineInstr *MI, unsigned NewVReg,
1595 // There is an implicit use. That means one of the other operand is
1596 // being remat'ed and the remat'ed instruction has li.reg as an
1597 // use operand. Make sure we rewrite that as well.
1598 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1599 MachineOperand &MO = MI->getOperand(i);
1602 unsigned Reg = MO.getReg();
1603 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1605 if (!vrm.isReMaterialized(Reg))
1607 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1608 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1610 UseMO->setReg(NewVReg);
1614 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1615 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1616 bool LiveIntervals::
1617 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1618 bool TrySplit, LiveIndex index, LiveIndex end,
1620 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1621 unsigned Slot, int LdSlot,
1622 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1624 const TargetRegisterClass* rc,
1625 SmallVector<int, 4> &ReMatIds,
1626 const MachineLoopInfo *loopInfo,
1627 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1628 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1629 std::vector<LiveInterval*> &NewLIs) {
1630 bool CanFold = false;
1632 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1633 MachineOperand& mop = MI->getOperand(i);
1636 unsigned Reg = mop.getReg();
1637 unsigned RegI = Reg;
1638 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1643 bool TryFold = !DefIsReMat;
1644 bool FoldSS = true; // Default behavior unless it's a remat.
1645 int FoldSlot = Slot;
1647 // If this is the rematerializable definition MI itself and
1648 // all of its uses are rematerialized, simply delete it.
1649 if (MI == ReMatOrigDefMI && CanDelete) {
1650 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1652 RemoveMachineInstrFromMaps(MI);
1653 vrm.RemoveMachineInstrFromMaps(MI);
1654 MI->eraseFromParent();
1658 // If def for this use can't be rematerialized, then try folding.
1659 // If def is rematerializable and it's a load, also try folding.
1660 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1662 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1668 // Scan all of the operands of this instruction rewriting operands
1669 // to use NewVReg instead of li.reg as appropriate. We do this for
1672 // 1. If the instr reads the same spilled vreg multiple times, we
1673 // want to reuse the NewVReg.
1674 // 2. If the instr is a two-addr instruction, we are required to
1675 // keep the src/dst regs pinned.
1677 // Keep track of whether we replace a use and/or def so that we can
1678 // create the spill interval with the appropriate range.
1680 HasUse = mop.isUse();
1681 HasDef = mop.isDef();
1682 SmallVector<unsigned, 2> Ops;
1684 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1685 const MachineOperand &MOj = MI->getOperand(j);
1688 unsigned RegJ = MOj.getReg();
1689 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1693 if (!MOj.isUndef()) {
1694 HasUse |= MOj.isUse();
1695 HasDef |= MOj.isDef();
1700 // Create a new virtual register for the spill interval.
1701 // Create the new register now so we can map the fold instruction
1702 // to the new register so when it is unfolded we get the correct
1704 bool CreatedNewVReg = false;
1706 NewVReg = mri_->createVirtualRegister(rc);
1708 CreatedNewVReg = true;
1714 // Do not fold load / store here if we are splitting. We'll find an
1715 // optimal point to insert a load / store later.
1717 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1718 Ops, FoldSS, FoldSlot, NewVReg)) {
1719 // Folding the load/store can completely change the instruction in
1720 // unpredictable ways, rescan it from the beginning.
1723 // We need to give the new vreg the same stack slot as the
1724 // spilled interval.
1725 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1731 if (isNotInMIMap(MI))
1733 goto RestartInstruction;
1736 // We'll try to fold it later if it's profitable.
1737 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1741 mop.setReg(NewVReg);
1742 if (mop.isImplicit())
1743 rewriteImplicitOps(li, MI, NewVReg, vrm);
1745 // Reuse NewVReg for other reads.
1746 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1747 MachineOperand &mopj = MI->getOperand(Ops[j]);
1748 mopj.setReg(NewVReg);
1749 if (mopj.isImplicit())
1750 rewriteImplicitOps(li, MI, NewVReg, vrm);
1753 if (CreatedNewVReg) {
1755 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1756 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1757 // Each valnum may have its own remat id.
1758 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1760 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1762 if (!CanDelete || (HasUse && HasDef)) {
1763 // If this is a two-addr instruction then its use operands are
1764 // rematerializable but its def is not. It should be assigned a
1766 vrm.assignVirt2StackSlot(NewVReg, Slot);
1769 vrm.assignVirt2StackSlot(NewVReg, Slot);
1771 } else if (HasUse && HasDef &&
1772 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1773 // If this interval hasn't been assigned a stack slot (because earlier
1774 // def is a deleted remat def), do it now.
1775 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1776 vrm.assignVirt2StackSlot(NewVReg, Slot);
1779 // Re-matting an instruction with virtual register use. Add the
1780 // register as an implicit use on the use MI.
1781 if (DefIsReMat && ImpUse)
1782 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1784 // Create a new register interval for this spill / remat.
1785 LiveInterval &nI = getOrCreateInterval(NewVReg);
1786 if (CreatedNewVReg) {
1787 NewLIs.push_back(&nI);
1788 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1790 vrm.setIsSplitFromReg(NewVReg, li.reg);
1794 if (CreatedNewVReg) {
1795 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1796 nI.getNextValue(LiveIndex(), 0, false,
1798 DEBUG(errs() << " +" << LR);
1801 // Extend the split live interval to this def / use.
1802 LiveIndex End = getNextSlot(getUseIndex(index));
1803 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1804 nI.getValNumInfo(nI.getNumValNums()-1));
1805 DEBUG(errs() << " +" << LR);
1810 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1811 nI.getNextValue(LiveIndex(), 0, false,
1813 DEBUG(errs() << " +" << LR);
1818 errs() << "\t\t\t\tAdded new interval: ";
1819 nI.print(errs(), tri_);
1825 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1827 MachineBasicBlock *MBB,
1828 LiveIndex Idx) const {
1829 LiveIndex End = getMBBEndIdx(MBB);
1830 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1831 if (VNI->kills[j].isPHIIndex())
1834 LiveIndex KillIdx = VNI->kills[j];
1835 if (KillIdx > Idx && KillIdx < End)
1841 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1842 /// during spilling.
1844 struct RewriteInfo {
1849 RewriteInfo(LiveIndex i, MachineInstr *mi, bool u, bool d)
1850 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1853 struct RewriteInfoCompare {
1854 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1855 return LHS.Index < RHS.Index;
1860 void LiveIntervals::
1861 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1862 LiveInterval::Ranges::const_iterator &I,
1863 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1864 unsigned Slot, int LdSlot,
1865 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1867 const TargetRegisterClass* rc,
1868 SmallVector<int, 4> &ReMatIds,
1869 const MachineLoopInfo *loopInfo,
1870 BitVector &SpillMBBs,
1871 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1872 BitVector &RestoreMBBs,
1873 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1874 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1875 std::vector<LiveInterval*> &NewLIs) {
1876 bool AllCanFold = true;
1877 unsigned NewVReg = 0;
1878 LiveIndex start = getBaseIndex(I->start);
1879 LiveIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1881 // First collect all the def / use in this live range that will be rewritten.
1882 // Make sure they are sorted according to instruction index.
1883 std::vector<RewriteInfo> RewriteMIs;
1884 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1885 re = mri_->reg_end(); ri != re; ) {
1886 MachineInstr *MI = &*ri;
1887 MachineOperand &O = ri.getOperand();
1889 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1890 LiveIndex index = getInstructionIndex(MI);
1891 if (index < start || index >= end)
1895 // Must be defined by an implicit def. It should not be spilled. Note,
1896 // this is for correctness reason. e.g.
1897 // 8 %reg1024<def> = IMPLICIT_DEF
1898 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1899 // The live range [12, 14) are not part of the r1024 live interval since
1900 // it's defined by an implicit def. It will not conflicts with live
1901 // interval of r1025. Now suppose both registers are spilled, you can
1902 // easily see a situation where both registers are reloaded before
1903 // the INSERT_SUBREG and both target registers that would overlap.
1905 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1907 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1909 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1910 // Now rewrite the defs and uses.
1911 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1912 RewriteInfo &rwi = RewriteMIs[i];
1914 LiveIndex index = rwi.Index;
1915 bool MIHasUse = rwi.HasUse;
1916 bool MIHasDef = rwi.HasDef;
1917 MachineInstr *MI = rwi.MI;
1918 // If MI def and/or use the same register multiple times, then there
1919 // are multiple entries.
1920 unsigned NumUses = MIHasUse;
1921 while (i != e && RewriteMIs[i].MI == MI) {
1922 assert(RewriteMIs[i].Index == index);
1923 bool isUse = RewriteMIs[i].HasUse;
1924 if (isUse) ++NumUses;
1926 MIHasDef |= RewriteMIs[i].HasDef;
1929 MachineBasicBlock *MBB = MI->getParent();
1931 if (ImpUse && MI != ReMatDefMI) {
1932 // Re-matting an instruction with virtual register use. Update the
1933 // register interval's spill weight to HUGE_VALF to prevent it from
1935 LiveInterval &ImpLi = getInterval(ImpUse);
1936 ImpLi.weight = HUGE_VALF;
1939 unsigned MBBId = MBB->getNumber();
1940 unsigned ThisVReg = 0;
1942 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1943 if (NVI != MBBVRegsMap.end()) {
1944 ThisVReg = NVI->second;
1951 // It's better to start a new interval to avoid artifically
1952 // extend the new interval.
1953 if (MIHasDef && !MIHasUse) {
1954 MBBVRegsMap.erase(MBB->getNumber());
1960 bool IsNew = ThisVReg == 0;
1962 // This ends the previous live interval. If all of its def / use
1963 // can be folded, give it a low spill weight.
1964 if (NewVReg && TrySplit && AllCanFold) {
1965 LiveInterval &nI = getOrCreateInterval(NewVReg);
1972 bool HasDef = false;
1973 bool HasUse = false;
1974 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1975 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1976 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1977 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1978 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1979 if (!HasDef && !HasUse)
1982 AllCanFold &= CanFold;
1984 // Update weight of spill interval.
1985 LiveInterval &nI = getOrCreateInterval(NewVReg);
1987 // The spill weight is now infinity as it cannot be spilled again.
1988 nI.weight = HUGE_VALF;
1992 // Keep track of the last def and first use in each MBB.
1994 if (MI != ReMatOrigDefMI || !CanDelete) {
1995 bool HasKill = false;
1997 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1999 // If this is a two-address code, then this index starts a new VNInfo.
2000 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2002 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2004 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2005 SpillIdxes.find(MBBId);
2007 if (SII == SpillIdxes.end()) {
2008 std::vector<SRInfo> S;
2009 S.push_back(SRInfo(index, NewVReg, true));
2010 SpillIdxes.insert(std::make_pair(MBBId, S));
2011 } else if (SII->second.back().vreg != NewVReg) {
2012 SII->second.push_back(SRInfo(index, NewVReg, true));
2013 } else if (index > SII->second.back().index) {
2014 // If there is an earlier def and this is a two-address
2015 // instruction, then it's not possible to fold the store (which
2016 // would also fold the load).
2017 SRInfo &Info = SII->second.back();
2019 Info.canFold = !HasUse;
2021 SpillMBBs.set(MBBId);
2022 } else if (SII != SpillIdxes.end() &&
2023 SII->second.back().vreg == NewVReg &&
2024 index > SII->second.back().index) {
2025 // There is an earlier def that's not killed (must be two-address).
2026 // The spill is no longer needed.
2027 SII->second.pop_back();
2028 if (SII->second.empty()) {
2029 SpillIdxes.erase(MBBId);
2030 SpillMBBs.reset(MBBId);
2037 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2038 SpillIdxes.find(MBBId);
2039 if (SII != SpillIdxes.end() &&
2040 SII->second.back().vreg == NewVReg &&
2041 index > SII->second.back().index)
2042 // Use(s) following the last def, it's not safe to fold the spill.
2043 SII->second.back().canFold = false;
2044 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2045 RestoreIdxes.find(MBBId);
2046 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2047 // If we are splitting live intervals, only fold if it's the first
2048 // use and there isn't another use later in the MBB.
2049 RII->second.back().canFold = false;
2051 // Only need a reload if there isn't an earlier def / use.
2052 if (RII == RestoreIdxes.end()) {
2053 std::vector<SRInfo> Infos;
2054 Infos.push_back(SRInfo(index, NewVReg, true));
2055 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2057 RII->second.push_back(SRInfo(index, NewVReg, true));
2059 RestoreMBBs.set(MBBId);
2063 // Update spill weight.
2064 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2065 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2068 if (NewVReg && TrySplit && AllCanFold) {
2069 // If all of its def / use can be folded, give it a low spill weight.
2070 LiveInterval &nI = getOrCreateInterval(NewVReg);
2075 bool LiveIntervals::alsoFoldARestore(int Id, LiveIndex index,
2076 unsigned vr, BitVector &RestoreMBBs,
2077 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2078 if (!RestoreMBBs[Id])
2080 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2081 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2082 if (Restores[i].index == index &&
2083 Restores[i].vreg == vr &&
2084 Restores[i].canFold)
2089 void LiveIntervals::eraseRestoreInfo(int Id, LiveIndex index,
2090 unsigned vr, BitVector &RestoreMBBs,
2091 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2092 if (!RestoreMBBs[Id])
2094 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2095 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2096 if (Restores[i].index == index && Restores[i].vreg)
2097 Restores[i].index = LiveIndex();
2100 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2101 /// spilled and create empty intervals for their uses.
2103 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2104 const TargetRegisterClass* rc,
2105 std::vector<LiveInterval*> &NewLIs) {
2106 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2107 re = mri_->reg_end(); ri != re; ) {
2108 MachineOperand &O = ri.getOperand();
2109 MachineInstr *MI = &*ri;
2112 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2113 "Register def was not rewritten?");
2114 RemoveMachineInstrFromMaps(MI);
2115 vrm.RemoveMachineInstrFromMaps(MI);
2116 MI->eraseFromParent();
2118 // This must be an use of an implicit_def so it's not part of the live
2119 // interval. Create a new empty live interval for it.
2120 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2121 unsigned NewVReg = mri_->createVirtualRegister(rc);
2123 vrm.setIsImplicitlyDefined(NewVReg);
2124 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2125 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2126 MachineOperand &MO = MI->getOperand(i);
2127 if (MO.isReg() && MO.getReg() == li.reg) {
2136 std::vector<LiveInterval*> LiveIntervals::
2137 addIntervalsForSpillsFast(const LiveInterval &li,
2138 const MachineLoopInfo *loopInfo,
2140 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2142 std::vector<LiveInterval*> added;
2144 assert(li.weight != HUGE_VALF &&
2145 "attempt to spill already spilled interval!");
2148 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2153 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2155 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2156 while (RI != mri_->reg_end()) {
2157 MachineInstr* MI = &*RI;
2159 SmallVector<unsigned, 2> Indices;
2160 bool HasUse = false;
2161 bool HasDef = false;
2163 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2164 MachineOperand& mop = MI->getOperand(i);
2165 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2167 HasUse |= MI->getOperand(i).isUse();
2168 HasDef |= MI->getOperand(i).isDef();
2170 Indices.push_back(i);
2173 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2174 Indices, true, slot, li.reg)) {
2175 unsigned NewVReg = mri_->createVirtualRegister(rc);
2177 vrm.assignVirt2StackSlot(NewVReg, slot);
2179 // create a new register for this spill
2180 LiveInterval &nI = getOrCreateInterval(NewVReg);
2182 // the spill weight is now infinity as it
2183 // cannot be spilled again
2184 nI.weight = HUGE_VALF;
2186 // Rewrite register operands to use the new vreg.
2187 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2188 E = Indices.end(); I != E; ++I) {
2189 MI->getOperand(*I).setReg(NewVReg);
2191 if (MI->getOperand(*I).isUse())
2192 MI->getOperand(*I).setIsKill(true);
2195 // Fill in the new live interval.
2196 LiveIndex index = getInstructionIndex(MI);
2198 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2199 nI.getNextValue(LiveIndex(), 0, false,
2200 getVNInfoAllocator()));
2201 DEBUG(errs() << " +" << LR);
2203 vrm.addRestorePoint(NewVReg, MI);
2206 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2207 nI.getNextValue(LiveIndex(), 0, false,
2208 getVNInfoAllocator()));
2209 DEBUG(errs() << " +" << LR);
2211 vrm.addSpillPoint(NewVReg, true, MI);
2214 added.push_back(&nI);
2217 errs() << "\t\t\t\tadded new interval: ";
2224 RI = mri_->reg_begin(li.reg);
2230 std::vector<LiveInterval*> LiveIntervals::
2231 addIntervalsForSpills(const LiveInterval &li,
2232 SmallVectorImpl<LiveInterval*> &SpillIs,
2233 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2235 if (EnableFastSpilling)
2236 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2238 assert(li.weight != HUGE_VALF &&
2239 "attempt to spill already spilled interval!");
2242 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2243 li.print(errs(), tri_);
2247 // Each bit specify whether a spill is required in the MBB.
2248 BitVector SpillMBBs(mf_->getNumBlockIDs());
2249 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2250 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2251 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2252 DenseMap<unsigned,unsigned> MBBVRegsMap;
2253 std::vector<LiveInterval*> NewLIs;
2254 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2256 unsigned NumValNums = li.getNumValNums();
2257 SmallVector<MachineInstr*, 4> ReMatDefs;
2258 ReMatDefs.resize(NumValNums, NULL);
2259 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2260 ReMatOrigDefs.resize(NumValNums, NULL);
2261 SmallVector<int, 4> ReMatIds;
2262 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2263 BitVector ReMatDelete(NumValNums);
2264 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2266 // Spilling a split live interval. It cannot be split any further. Also,
2267 // it's also guaranteed to be a single val# / range interval.
2268 if (vrm.getPreSplitReg(li.reg)) {
2269 vrm.setIsSplitFromReg(li.reg, 0);
2270 // Unset the split kill marker on the last use.
2271 LiveIndex KillIdx = vrm.getKillPoint(li.reg);
2272 if (KillIdx != LiveIndex()) {
2273 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2274 assert(KillMI && "Last use disappeared?");
2275 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2276 assert(KillOp != -1 && "Last use disappeared?");
2277 KillMI->getOperand(KillOp).setIsKill(false);
2279 vrm.removeKillPoint(li.reg);
2280 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2281 Slot = vrm.getStackSlot(li.reg);
2282 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2283 MachineInstr *ReMatDefMI = DefIsReMat ?
2284 vrm.getReMaterializedMI(li.reg) : NULL;
2286 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2287 bool isLoad = isLoadSS ||
2288 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2289 bool IsFirstRange = true;
2290 for (LiveInterval::Ranges::const_iterator
2291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2292 // If this is a split live interval with multiple ranges, it means there
2293 // are two-address instructions that re-defined the value. Only the
2294 // first def can be rematerialized!
2296 // Note ReMatOrigDefMI has already been deleted.
2297 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2298 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2299 false, vrm, rc, ReMatIds, loopInfo,
2300 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2301 MBBVRegsMap, NewLIs);
2303 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2304 Slot, 0, false, false, false,
2305 false, vrm, rc, ReMatIds, loopInfo,
2306 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2307 MBBVRegsMap, NewLIs);
2309 IsFirstRange = false;
2312 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2316 bool TrySplit = !intervalIsInOneMBB(li);
2319 bool NeedStackSlot = false;
2320 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2322 const VNInfo *VNI = *i;
2323 unsigned VN = VNI->id;
2324 if (VNI->isUnused())
2325 continue; // Dead val#.
2326 // Is the def for the val# rematerializable?
2327 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2328 ? getInstructionFromIndex(VNI->def) : 0;
2330 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2331 // Remember how to remat the def of this val#.
2332 ReMatOrigDefs[VN] = ReMatDefMI;
2333 // Original def may be modified so we have to make a copy here.
2334 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2335 CloneMIs.push_back(Clone);
2336 ReMatDefs[VN] = Clone;
2338 bool CanDelete = true;
2339 if (VNI->hasPHIKill()) {
2340 // A kill is a phi node, not all of its uses can be rematerialized.
2341 // It must not be deleted.
2343 // Need a stack slot if there is any live range where uses cannot be
2345 NeedStackSlot = true;
2348 ReMatDelete.set(VN);
2350 // Need a stack slot if there is any live range where uses cannot be
2352 NeedStackSlot = true;
2356 // One stack slot per live interval.
2357 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2358 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2359 Slot = vrm.assignVirt2StackSlot(li.reg);
2361 // This case only occurs when the prealloc splitter has already assigned
2362 // a stack slot to this vreg.
2364 Slot = vrm.getStackSlot(li.reg);
2367 // Create new intervals and rewrite defs and uses.
2368 for (LiveInterval::Ranges::const_iterator
2369 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2370 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2371 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2372 bool DefIsReMat = ReMatDefMI != NULL;
2373 bool CanDelete = ReMatDelete[I->valno->id];
2375 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2376 bool isLoad = isLoadSS ||
2377 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2378 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2379 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2380 CanDelete, vrm, rc, ReMatIds, loopInfo,
2381 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2382 MBBVRegsMap, NewLIs);
2385 // Insert spills / restores if we are splitting.
2387 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2391 SmallPtrSet<LiveInterval*, 4> AddedKill;
2392 SmallVector<unsigned, 2> Ops;
2393 if (NeedStackSlot) {
2394 int Id = SpillMBBs.find_first();
2396 std::vector<SRInfo> &spills = SpillIdxes[Id];
2397 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2398 LiveIndex index = spills[i].index;
2399 unsigned VReg = spills[i].vreg;
2400 LiveInterval &nI = getOrCreateInterval(VReg);
2401 bool isReMat = vrm.isReMaterialized(VReg);
2402 MachineInstr *MI = getInstructionFromIndex(index);
2403 bool CanFold = false;
2404 bool FoundUse = false;
2406 if (spills[i].canFold) {
2408 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2409 MachineOperand &MO = MI->getOperand(j);
2410 if (!MO.isReg() || MO.getReg() != VReg)
2417 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2418 RestoreMBBs, RestoreIdxes))) {
2419 // MI has two-address uses of the same register. If the use
2420 // isn't the first and only use in the BB, then we can't fold
2421 // it. FIXME: Move this to rewriteInstructionsForSpills.
2428 // Fold the store into the def if possible.
2429 bool Folded = false;
2430 if (CanFold && !Ops.empty()) {
2431 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2434 // Also folded uses, do not issue a load.
2435 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2436 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2438 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2442 // Otherwise tell the spiller to issue a spill.
2444 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2445 bool isKill = LR->end == getStoreIndex(index);
2446 if (!MI->registerDefIsDead(nI.reg))
2447 // No need to spill a dead def.
2448 vrm.addSpillPoint(VReg, isKill, MI);
2450 AddedKill.insert(&nI);
2453 Id = SpillMBBs.find_next(Id);
2457 int Id = RestoreMBBs.find_first();
2459 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2460 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2461 LiveIndex index = restores[i].index;
2462 if (index == LiveIndex())
2464 unsigned VReg = restores[i].vreg;
2465 LiveInterval &nI = getOrCreateInterval(VReg);
2466 bool isReMat = vrm.isReMaterialized(VReg);
2467 MachineInstr *MI = getInstructionFromIndex(index);
2468 bool CanFold = false;
2470 if (restores[i].canFold) {
2472 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2473 MachineOperand &MO = MI->getOperand(j);
2474 if (!MO.isReg() || MO.getReg() != VReg)
2478 // If this restore were to be folded, it would have been folded
2487 // Fold the load into the use if possible.
2488 bool Folded = false;
2489 if (CanFold && !Ops.empty()) {
2491 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2493 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2495 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2496 // If the rematerializable def is a load, also try to fold it.
2497 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2498 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2499 Ops, isLoadSS, LdSlot, VReg);
2501 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2503 // Re-matting an instruction with virtual register use. Add the
2504 // register as an implicit use on the use MI and update the register
2505 // interval's spill weight to HUGE_VALF to prevent it from being
2507 LiveInterval &ImpLi = getInterval(ImpUse);
2508 ImpLi.weight = HUGE_VALF;
2509 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2514 // If folding is not possible / failed, then tell the spiller to issue a
2515 // load / rematerialization for us.
2517 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2519 vrm.addRestorePoint(VReg, MI);
2521 Id = RestoreMBBs.find_next(Id);
2524 // Finalize intervals: add kills, finalize spill weights, and filter out
2526 std::vector<LiveInterval*> RetNewLIs;
2527 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2528 LiveInterval *LI = NewLIs[i];
2530 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2531 if (!AddedKill.count(LI)) {
2532 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2533 LiveIndex LastUseIdx = getBaseIndex(LR->end);
2534 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2535 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2536 assert(UseIdx != -1);
2537 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2538 LastUse->getOperand(UseIdx).setIsKill();
2539 vrm.addKillPoint(LI->reg, LastUseIdx);
2542 RetNewLIs.push_back(LI);
2546 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2550 /// hasAllocatableSuperReg - Return true if the specified physical register has
2551 /// any super register that's allocatable.
2552 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2553 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2554 if (allocatableRegs_[*AS] && hasInterval(*AS))
2559 /// getRepresentativeReg - Find the largest super register of the specified
2560 /// physical register.
2561 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2562 // Find the largest super-register that is allocatable.
2563 unsigned BestReg = Reg;
2564 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2565 unsigned SuperReg = *AS;
2566 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2574 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2575 /// specified interval that conflicts with the specified physical register.
2576 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2577 unsigned PhysReg) const {
2578 unsigned NumConflicts = 0;
2579 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2580 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2581 E = mri_->reg_end(); I != E; ++I) {
2582 MachineOperand &O = I.getOperand();
2583 MachineInstr *MI = O.getParent();
2584 LiveIndex Index = getInstructionIndex(MI);
2585 if (pli.liveAt(Index))
2588 return NumConflicts;
2591 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2592 /// around all defs and uses of the specified interval. Return true if it
2593 /// was able to cut its interval.
2594 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2595 unsigned PhysReg, VirtRegMap &vrm) {
2596 unsigned SpillReg = getRepresentativeReg(PhysReg);
2598 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2599 // If there are registers which alias PhysReg, but which are not a
2600 // sub-register of the chosen representative super register. Assert
2601 // since we can't handle it yet.
2602 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2603 tri_->isSuperRegister(*AS, SpillReg));
2606 LiveInterval &pli = getInterval(SpillReg);
2607 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2608 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2609 E = mri_->reg_end(); I != E; ++I) {
2610 MachineOperand &O = I.getOperand();
2611 MachineInstr *MI = O.getParent();
2612 if (SeenMIs.count(MI))
2615 LiveIndex Index = getInstructionIndex(MI);
2616 if (pli.liveAt(Index)) {
2617 vrm.addEmergencySpill(SpillReg, MI);
2618 LiveIndex StartIdx = getLoadIndex(Index);
2619 LiveIndex EndIdx = getNextSlot(getStoreIndex(Index));
2620 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2621 pli.removeRange(StartIdx, EndIdx);
2625 raw_string_ostream Msg(msg);
2626 Msg << "Ran out of registers during register allocation!";
2627 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2628 Msg << "\nPlease check your inline asm statement for invalid "
2629 << "constraints:\n";
2630 MI->print(Msg, tm_);
2632 llvm_report_error(Msg.str());
2634 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2635 if (!hasInterval(*AS))
2637 LiveInterval &spli = getInterval(*AS);
2638 if (spli.liveAt(Index))
2639 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2646 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2647 MachineInstr* startInst) {
2648 LiveInterval& Interval = getOrCreateInterval(reg);
2649 VNInfo* VN = Interval.getNextValue(
2650 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2651 startInst, true, getVNInfoAllocator());
2652 VN->setHasPHIKill(true);
2653 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2655 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2656 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2657 Interval.addRange(LR);