1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> EnableFastSpilling("fast-spill",
53 cl::init(false), cl::Hidden);
55 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
57 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
58 cl::init(-1), cl::Hidden);
60 STATISTIC(numIntervals , "Number of original intervals");
61 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
62 STATISTIC(numSplits , "Number of intervals split");
63 STATISTIC(numCoalescing, "Number of early coalescing performed");
65 char LiveIntervals::ID = 0;
66 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.addRequired<AliasAnalysis>();
71 AU.addPreserved<AliasAnalysis>();
72 AU.addPreserved<LiveVariables>();
73 AU.addRequired<LiveVariables>();
74 AU.addPreservedID(MachineLoopInfoID);
75 AU.addPreservedID(MachineDominatorsID);
78 AU.addPreservedID(PHIEliminationID);
79 AU.addRequiredID(PHIEliminationID);
82 AU.addRequiredID(TwoAddressInstructionPassID);
83 MachineFunctionPass::getAnalysisUsage(AU);
86 void LiveIntervals::releaseMemory() {
87 // Free the live intervals themselves.
88 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
89 E = r2iMap_.end(); I != E; ++I)
97 terminatorGaps.clear();
98 phiJoinCopies.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!CloneMIs.empty()) {
103 MachineInstr *MI = CloneMIs.back();
105 mf_->DeleteMachineInstr(MI);
109 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110 unsigned OpIdx, const TargetInstrInfo *tii_){
111 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
116 if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
118 if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
123 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
124 /// there is one implicit_def for each use. Add isUndef marker to
125 /// implicit_def defs and their uses.
126 void LiveIntervals::processImplicitDefs() {
127 SmallSet<unsigned, 8> ImpDefRegs;
128 SmallVector<MachineInstr*, 8> ImpDefMIs;
129 MachineBasicBlock *Entry = mf_->begin();
130 SmallPtrSet<MachineBasicBlock*,16> Visited;
131 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
132 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
134 MachineBasicBlock *MBB = *DFI;
135 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
137 MachineInstr *MI = &*I;
139 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
140 unsigned Reg = MI->getOperand(0).getReg();
141 ImpDefRegs.insert(Reg);
142 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
143 for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
144 ImpDefRegs.insert(*SS);
146 ImpDefMIs.push_back(MI);
150 if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
151 MachineOperand &MO = MI->getOperand(2);
152 if (ImpDefRegs.count(MO.getReg())) {
153 // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
154 // This is an identity copy, eliminate it now.
156 LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
159 MI->eraseFromParent();
164 bool ChangedToImpDef = false;
165 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
166 MachineOperand& MO = MI->getOperand(i);
167 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
169 unsigned Reg = MO.getReg();
172 if (!ImpDefRegs.count(Reg))
174 // Use is a copy, just turn it into an implicit_def.
175 if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
176 bool isKill = MO.isKill();
177 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
178 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
179 MI->RemoveOperand(j);
181 ImpDefRegs.erase(Reg);
182 LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
185 ChangedToImpDef = true;
190 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
191 // Make sure other uses of
192 for (unsigned j = i+1; j != e; ++j) {
193 MachineOperand &MOJ = MI->getOperand(j);
194 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
197 ImpDefRegs.erase(Reg);
201 if (ChangedToImpDef) {
202 // Backtrack to process this new implicit_def.
205 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
206 MachineOperand& MO = MI->getOperand(i);
207 if (!MO.isReg() || !MO.isDef())
209 ImpDefRegs.erase(MO.getReg());
214 // Any outstanding liveout implicit_def's?
215 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
216 MachineInstr *MI = ImpDefMIs[i];
217 unsigned Reg = MI->getOperand(0).getReg();
218 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
219 !ImpDefRegs.count(Reg)) {
220 // Delete all "local" implicit_def's. That include those which define
221 // physical registers since they cannot be liveout.
222 MI->eraseFromParent();
226 // If there are multiple defs of the same register and at least one
227 // is not an implicit_def, do not insert implicit_def's before the
230 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
231 DE = mri_->def_end(); DI != DE; ++DI) {
232 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
240 // The only implicit_def which we want to keep are those that are live
242 MI->eraseFromParent();
244 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
245 UE = mri_->use_end(); UI != UE; ) {
246 MachineOperand &RMO = UI.getOperand();
247 MachineInstr *RMI = &*UI;
249 MachineBasicBlock *RMBB = RMI->getParent();
253 // Turn a copy use into an implicit_def.
254 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
255 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
257 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
258 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
259 RMI->RemoveOperand(j);
263 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
264 unsigned NewVReg = mri_->createVirtualRegister(RC);
276 void LiveIntervals::computeNumbering() {
277 Index2MiMap OldI2MI = i2miMap_;
278 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
284 terminatorGaps.clear();
285 phiJoinCopies.clear();
289 // Number MachineInstrs and MachineBasicBlocks.
290 // Initialize MBB indexes to a sentinal.
291 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
292 std::make_pair(LiveIndex(),LiveIndex()));
295 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
297 LiveIndex StartIdx = MIIndex;
299 // Insert an empty slot at the beginning of each block.
300 MIIndex = getNextIndex(MIIndex);
301 i2miMap_.push_back(0);
303 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
306 if (I == MBB->getFirstTerminator()) {
307 // Leave a gap for before terminators, this is where we will point
309 LiveIndex tGap(true, MIIndex);
311 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
313 "Multiple 'first' terminators encountered during numbering.");
314 inserted = inserted; // Avoid compiler warning if assertions turned off.
315 i2miMap_.push_back(0);
317 MIIndex = getNextIndex(MIIndex);
320 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
321 assert(inserted && "multiple MachineInstr -> index mappings");
323 i2miMap_.push_back(I);
324 MIIndex = getNextIndex(MIIndex);
327 // Insert max(1, numdefs) empty slots after every instruction.
328 unsigned Slots = I->getDesc().getNumDefs();
332 MIIndex = getNextIndex(MIIndex);
333 i2miMap_.push_back(0);
338 if (MBB->getFirstTerminator() == MBB->end()) {
339 // Leave a gap for before terminators, this is where we will point
341 LiveIndex tGap(true, MIIndex);
343 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
345 "Multiple 'first' terminators encountered during numbering.");
346 inserted = inserted; // Avoid compiler warning if assertions turned off.
347 i2miMap_.push_back(0);
349 MIIndex = getNextIndex(MIIndex);
352 // Set the MBB2IdxMap entry for this MBB.
353 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
354 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
357 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
359 if (!OldI2MI.empty())
360 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
361 for (LiveInterval::iterator LI = OI->second->begin(),
362 LE = OI->second->end(); LI != LE; ++LI) {
364 // Remap the start index of the live range to the corresponding new
365 // number, or our best guess at what it _should_ correspond to if the
366 // original instruction has been erased. This is either the following
367 // instruction or its predecessor.
368 unsigned index = LI->start.getVecIndex();
369 LiveIndex::Slot offset = LI->start.getSlot();
370 if (LI->start.isLoad()) {
371 std::vector<IdxMBBPair>::const_iterator I =
372 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
373 // Take the pair containing the index
374 std::vector<IdxMBBPair>::const_iterator J =
375 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
377 LI->start = getMBBStartIdx(J->second);
379 LI->start = LiveIndex(
380 LiveIndex(mi2iMap_[OldI2MI[index]]),
381 (LiveIndex::Slot)offset);
384 // Remap the ending index in the same way that we remapped the start,
385 // except for the final step where we always map to the immediately
386 // following instruction.
387 index = (getPrevSlot(LI->end)).getVecIndex();
388 offset = LI->end.getSlot();
389 if (LI->end.isLoad()) {
390 // VReg dies at end of block.
391 std::vector<IdxMBBPair>::const_iterator I =
392 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
395 LI->end = getNextSlot(getMBBEndIdx(I->second));
397 unsigned idx = index;
398 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
400 if (index != OldI2MI.size())
402 LiveIndex(mi2iMap_[OldI2MI[index]],
403 (idx == index ? offset : LiveIndex::LOAD));
406 LiveIndex(LiveIndex::NUM * i2miMap_.size());
410 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
411 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
414 // Remap the VNInfo def index, which works the same as the
415 // start indices above. VN's with special sentinel defs
416 // don't need to be remapped.
417 if (vni->isDefAccurate() && !vni->isUnused()) {
418 unsigned index = vni->def.getVecIndex();
419 LiveIndex::Slot offset = vni->def.getSlot();
420 if (vni->def.isLoad()) {
421 std::vector<IdxMBBPair>::const_iterator I =
422 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
423 // Take the pair containing the index
424 std::vector<IdxMBBPair>::const_iterator J =
425 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
427 vni->def = getMBBStartIdx(J->second);
429 vni->def = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
433 // Remap the VNInfo kill indices, which works the same as
434 // the end indices above.
435 for (size_t i = 0; i < vni->kills.size(); ++i) {
436 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
437 LiveIndex::Slot offset = vni->kills[i].getSlot();
439 if (vni->kills[i].isLoad()) {
440 assert("Value killed at a load slot.");
441 /*std::vector<IdxMBBPair>::const_iterator I =
442 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
445 vni->kills[i] = getMBBEndIdx(I->second);*/
447 if (vni->kills[i].isPHIIndex()) {
448 std::vector<IdxMBBPair>::const_iterator I =
449 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
451 vni->kills[i] = terminatorGaps[I->second];
453 assert(OldI2MI[index] != 0 &&
454 "Kill refers to instruction not present in index maps.");
455 vni->kills[i] = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
459 unsigned idx = index;
460 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
462 if (index != OldI2MI.size())
463 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
464 (idx == index ? offset : 0);
466 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
474 void LiveIntervals::scaleNumbering(int factor) {
476 // * scale MBB begin and end points
477 // * scale all ranges.
478 // * Update VNI structures.
479 // * Scale instruction numberings
481 // Scale the MBB indices.
483 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
484 MBB != MBBE; ++MBB) {
485 std::pair<LiveIndex, LiveIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
486 mbbIndices.first = mbbIndices.first.scale(factor);
487 mbbIndices.second = mbbIndices.second.scale(factor);
488 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
490 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
492 // Scale terminator gaps.
493 for (DenseMap<MachineBasicBlock*, LiveIndex>::iterator
494 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
496 terminatorGaps[TGI->first] = TGI->second.scale(factor);
499 // Scale the intervals.
500 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
501 LI->second->scaleNumbering(factor);
504 // Scale MachineInstrs.
505 Mi2IndexMap oldmi2iMap = mi2iMap_;
506 LiveIndex highestSlot;
507 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
509 LiveIndex newSlot = MI->second.scale(factor);
510 mi2iMap_[MI->first] = newSlot;
511 highestSlot = std::max(highestSlot, newSlot);
514 unsigned highestVIndex = highestSlot.getVecIndex();
516 i2miMap_.resize(highestVIndex + 1);
517 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
519 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
525 /// runOnMachineFunction - Register allocate the whole function
527 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
529 mri_ = &mf_->getRegInfo();
530 tm_ = &fn.getTarget();
531 tri_ = tm_->getRegisterInfo();
532 tii_ = tm_->getInstrInfo();
533 aa_ = &getAnalysis<AliasAnalysis>();
534 lv_ = &getAnalysis<LiveVariables>();
535 allocatableRegs_ = tri_->getAllocatableSet(fn);
537 processImplicitDefs();
540 performEarlyCoalescing();
542 numIntervals += getNumIntervals();
548 /// print - Implement the dump method.
549 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
550 OS << "********** INTERVALS **********\n";
551 for (const_iterator I = begin(), E = end(); I != E; ++I) {
552 I->second->print(OS, tri_);
559 void LiveIntervals::printInstrs(raw_ostream &OS) const {
560 OS << "********** MACHINEINSTRS **********\n";
562 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
563 mbbi != mbbe; ++mbbi) {
564 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
565 for (MachineBasicBlock::iterator mii = mbbi->begin(),
566 mie = mbbi->end(); mii != mie; ++mii) {
567 OS << getInstructionIndex(mii) << '\t' << *mii;
572 void LiveIntervals::dumpInstrs() const {
576 /// conflictsWithPhysRegDef - Returns true if the specified register
577 /// is defined during the duration of the specified interval.
578 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
579 VirtRegMap &vrm, unsigned reg) {
580 for (LiveInterval::Ranges::const_iterator
581 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
582 for (LiveIndex index = getBaseIndex(I->start),
583 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
584 index = getNextIndex(index)) {
585 // skip deleted instructions
586 while (index != end && !getInstructionFromIndex(index))
587 index = getNextIndex(index);
588 if (index == end) break;
590 MachineInstr *MI = getInstructionFromIndex(index);
591 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
592 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
593 if (SrcReg == li.reg || DstReg == li.reg)
595 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
596 MachineOperand& mop = MI->getOperand(i);
599 unsigned PhysReg = mop.getReg();
600 if (PhysReg == 0 || PhysReg == li.reg)
602 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
603 if (!vrm.hasPhys(PhysReg))
605 PhysReg = vrm.getPhys(PhysReg);
607 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
616 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
617 /// it can check use as well.
618 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
619 unsigned Reg, bool CheckUse,
620 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
621 for (LiveInterval::Ranges::const_iterator
622 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
623 for (LiveIndex index = getBaseIndex(I->start),
624 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
625 index = getNextIndex(index)) {
626 // Skip deleted instructions.
627 MachineInstr *MI = 0;
628 while (index != end) {
629 MI = getInstructionFromIndex(index);
632 index = getNextIndex(index);
634 if (index == end) break;
636 if (JoinedCopies.count(MI))
638 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
639 MachineOperand& MO = MI->getOperand(i);
642 if (MO.isUse() && !CheckUse)
644 unsigned PhysReg = MO.getReg();
645 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
647 if (tri_->isSubRegister(Reg, PhysReg))
657 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
658 if (TargetRegisterInfo::isPhysicalRegister(reg))
659 errs() << tri_->getName(reg);
661 errs() << "%reg" << reg;
665 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
666 MachineBasicBlock::iterator mi,
670 LiveInterval &interval) {
672 errs() << "\t\tregister: ";
673 printRegName(interval.reg, tri_);
676 // Virtual registers may be defined multiple times (due to phi
677 // elimination and 2-addr elimination). Much of what we do only has to be
678 // done once for the vreg. We use an empty interval to detect the first
679 // time we see a vreg.
680 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
681 if (interval.empty()) {
682 // Get the Idx of the defining instructions.
683 LiveIndex defIndex = getDefIndex(MIIdx);
684 // Earlyclobbers move back one, so that they overlap the live range
686 if (MO.isEarlyClobber())
687 defIndex = getUseIndex(MIIdx);
689 MachineInstr *CopyMI = NULL;
690 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
691 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
692 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
693 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
694 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
696 // Earlyclobbers move back one.
697 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
699 assert(ValNo->id == 0 && "First value in interval is not 0?");
701 // Loop over all of the blocks that the vreg is defined in. There are
702 // two cases we have to handle here. The most common case is a vreg
703 // whose lifetime is contained within a basic block. In this case there
704 // will be a single kill, in MBB, which comes after the definition.
705 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
706 // FIXME: what about dead vars?
708 if (vi.Kills[0] != mi)
709 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
710 else if (MO.isEarlyClobber())
711 // Earlyclobbers that die in this instruction move up one extra, to
712 // compensate for having the starting point moved back one. This
713 // gets them to overlap the live range of other outputs.
714 killIdx = getNextSlot(getNextSlot(defIndex));
716 killIdx = getNextSlot(defIndex);
718 // If the kill happens after the definition, we have an intra-block
720 if (killIdx > defIndex) {
721 assert(vi.AliveBlocks.empty() &&
722 "Shouldn't be alive across any blocks!");
723 LiveRange LR(defIndex, killIdx, ValNo);
724 interval.addRange(LR);
725 DEBUG(errs() << " +" << LR << "\n");
726 ValNo->addKill(killIdx);
731 // The other case we handle is when a virtual register lives to the end
732 // of the defining block, potentially live across some blocks, then is
733 // live into some number of blocks, but gets killed. Start by adding a
734 // range that goes from this definition to the end of the defining block.
735 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
736 DEBUG(errs() << " +" << NewLR);
737 interval.addRange(NewLR);
739 // Iterate over all of the blocks that the variable is completely
740 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
742 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
743 E = vi.AliveBlocks.end(); I != E; ++I) {
744 LiveRange LR(getMBBStartIdx(*I),
745 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
747 interval.addRange(LR);
748 DEBUG(errs() << " +" << LR);
751 // Finally, this virtual register is live from the start of any killing
752 // block to the 'use' slot of the killing instruction.
753 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
754 MachineInstr *Kill = vi.Kills[i];
756 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
757 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
758 interval.addRange(LR);
759 ValNo->addKill(killIdx);
760 DEBUG(errs() << " +" << LR);
764 // If this is the second time we see a virtual register definition, it
765 // must be due to phi elimination or two addr elimination. If this is
766 // the result of two address elimination, then the vreg is one of the
767 // def-and-use register operand.
768 if (mi->isRegTiedToUseOperand(MOIdx)) {
769 // If this is a two-address definition, then we have already processed
770 // the live range. The only problem is that we didn't realize there
771 // are actually two values in the live interval. Because of this we
772 // need to take the LiveRegion that defines this register and split it
774 assert(interval.containsOneValue());
775 LiveIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
776 LiveIndex RedefIndex = getDefIndex(MIIdx);
777 if (MO.isEarlyClobber())
778 RedefIndex = getUseIndex(MIIdx);
780 const LiveRange *OldLR =
781 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
782 VNInfo *OldValNo = OldLR->valno;
784 // Delete the initial value, which should be short and continuous,
785 // because the 2-addr copy must be in the same MBB as the redef.
786 interval.removeRange(DefIndex, RedefIndex);
788 // Two-address vregs should always only be redefined once. This means
789 // that at this point, there should be exactly one value number in it.
790 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
792 // The new value number (#1) is defined by the instruction we claimed
794 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
795 false, // update at *
797 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
799 // Value#0 is now defined by the 2-addr instruction.
800 OldValNo->def = RedefIndex;
801 OldValNo->setCopy(0);
802 if (MO.isEarlyClobber())
803 OldValNo->setHasRedefByEC(true);
805 // Add the new live interval which replaces the range for the input copy.
806 LiveRange LR(DefIndex, RedefIndex, ValNo);
807 DEBUG(errs() << " replace range with " << LR);
808 interval.addRange(LR);
809 ValNo->addKill(RedefIndex);
811 // If this redefinition is dead, we need to add a dummy unit live
812 // range covering the def slot.
815 LiveRange(RedefIndex, MO.isEarlyClobber() ?
816 getNextSlot(getNextSlot(RedefIndex)) :
817 getNextSlot(RedefIndex), OldValNo));
820 errs() << " RESULT: ";
821 interval.print(errs(), tri_);
824 // Otherwise, this must be because of phi elimination. If this is the
825 // first redefinition of the vreg that we have seen, go back and change
826 // the live range in the PHI block to be a different value number.
827 if (interval.containsOneValue()) {
828 // Remove the old range that we now know has an incorrect number.
829 VNInfo *VNI = interval.getValNumInfo(0);
830 MachineInstr *Killer = vi.Kills[0];
831 phiJoinCopies.push_back(Killer);
832 LiveIndex Start = getMBBStartIdx(Killer->getParent());
834 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
836 errs() << " Removing [" << Start << "," << End << "] from: ";
837 interval.print(errs(), tri_);
840 interval.removeRange(Start, End);
841 assert(interval.ranges.size() == 1 &&
842 "Newly discovered PHI interval has >1 ranges.");
843 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
844 VNI->addKill(terminatorGaps[killMBB]);
845 VNI->setHasPHIKill(true);
847 errs() << " RESULT: ";
848 interval.print(errs(), tri_);
851 // Replace the interval with one of a NEW value number. Note that this
852 // value number isn't actually defined by an instruction, weird huh? :)
853 LiveRange LR(Start, End,
854 interval.getNextValue(LiveIndex(mbb->getNumber()),
855 0, false, VNInfoAllocator));
856 LR.valno->setIsPHIDef(true);
857 DEBUG(errs() << " replace range with " << LR);
858 interval.addRange(LR);
859 LR.valno->addKill(End);
861 errs() << " RESULT: ";
862 interval.print(errs(), tri_);
866 // In the case of PHI elimination, each variable definition is only
867 // live until the end of the block. We've already taken care of the
868 // rest of the live range.
869 LiveIndex defIndex = getDefIndex(MIIdx);
870 if (MO.isEarlyClobber())
871 defIndex = getUseIndex(MIIdx);
874 MachineInstr *CopyMI = NULL;
875 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
876 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
877 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
878 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
879 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
881 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
883 LiveIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
884 LiveRange LR(defIndex, killIndex, ValNo);
885 interval.addRange(LR);
886 ValNo->addKill(terminatorGaps[mbb]);
887 ValNo->setHasPHIKill(true);
888 DEBUG(errs() << " +" << LR);
892 DEBUG(errs() << '\n');
895 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
896 MachineBasicBlock::iterator mi,
899 LiveInterval &interval,
900 MachineInstr *CopyMI) {
901 // A physical register cannot be live across basic block, so its
902 // lifetime must end somewhere in its defining basic block.
904 errs() << "\t\tregister: ";
905 printRegName(interval.reg, tri_);
908 LiveIndex baseIndex = MIIdx;
909 LiveIndex start = getDefIndex(baseIndex);
910 // Earlyclobbers move back one.
911 if (MO.isEarlyClobber())
912 start = getUseIndex(MIIdx);
913 LiveIndex end = start;
915 // If it is not used after definition, it is considered dead at
916 // the instruction defining it. Hence its interval is:
917 // [defSlot(def), defSlot(def)+1)
918 // For earlyclobbers, the defSlot was pushed back one; the extra
919 // advance below compensates.
921 DEBUG(errs() << " dead");
922 if (MO.isEarlyClobber())
923 end = getNextSlot(getNextSlot(start));
925 end = getNextSlot(start);
929 // If it is not dead on definition, it must be killed by a
930 // subsequent instruction. Hence its interval is:
931 // [defSlot(def), useSlot(kill)+1)
932 baseIndex = getNextIndex(baseIndex);
933 while (++mi != MBB->end()) {
934 while (baseIndex.getVecIndex() < i2miMap_.size() &&
935 getInstructionFromIndex(baseIndex) == 0)
936 baseIndex = getNextIndex(baseIndex);
937 if (mi->killsRegister(interval.reg, tri_)) {
938 DEBUG(errs() << " killed");
939 end = getNextSlot(getUseIndex(baseIndex));
942 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
944 if (mi->isRegTiedToUseOperand(DefIdx)) {
945 // Two-address instruction.
946 end = getDefIndex(baseIndex);
947 if (mi->getOperand(DefIdx).isEarlyClobber())
948 end = getUseIndex(baseIndex);
950 // Another instruction redefines the register before it is ever read.
951 // Then the register is essentially dead at the instruction that defines
952 // it. Hence its interval is:
953 // [defSlot(def), defSlot(def)+1)
954 DEBUG(errs() << " dead");
955 end = getNextSlot(start);
961 baseIndex = getNextIndex(baseIndex);
964 // The only case we should have a dead physreg here without a killing or
965 // instruction where we know it's dead is if it is live-in to the function
966 // and never used. Another possible case is the implicit use of the
967 // physical register has been deleted by two-address pass.
968 end = getNextSlot(start);
971 assert(start < end && "did not find end of interval?");
973 // Already exists? Extend old live interval.
974 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
975 bool Extend = OldLR != interval.end();
976 VNInfo *ValNo = Extend
977 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
978 if (MO.isEarlyClobber() && Extend)
979 ValNo->setHasRedefByEC(true);
980 LiveRange LR(start, end, ValNo);
981 interval.addRange(LR);
982 LR.valno->addKill(end);
983 DEBUG(errs() << " +" << LR << '\n');
986 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
987 MachineBasicBlock::iterator MI,
991 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
992 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
993 getOrCreateInterval(MO.getReg()));
994 else if (allocatableRegs_[MO.getReg()]) {
995 MachineInstr *CopyMI = NULL;
996 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
997 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
998 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
999 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
1000 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1002 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1003 getOrCreateInterval(MO.getReg()), CopyMI);
1004 // Def of a register also defines its sub-registers.
1005 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1006 // If MI also modifies the sub-register explicitly, avoid processing it
1007 // more than once. Do not pass in TRI here so it checks for exact match.
1008 if (!MI->modifiesRegister(*AS))
1009 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1010 getOrCreateInterval(*AS), 0);
1014 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1016 LiveInterval &interval, bool isAlias) {
1018 errs() << "\t\tlivein register: ";
1019 printRegName(interval.reg, tri_);
1022 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1023 // be considered a livein.
1024 MachineBasicBlock::iterator mi = MBB->begin();
1025 LiveIndex baseIndex = MIIdx;
1026 LiveIndex start = baseIndex;
1027 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1028 getInstructionFromIndex(baseIndex) == 0)
1029 baseIndex = getNextIndex(baseIndex);
1030 LiveIndex end = baseIndex;
1031 bool SeenDefUse = false;
1033 while (mi != MBB->end()) {
1034 if (mi->killsRegister(interval.reg, tri_)) {
1035 DEBUG(errs() << " killed");
1036 end = getNextSlot(getUseIndex(baseIndex));
1039 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1040 // Another instruction redefines the register before it is ever read.
1041 // Then the register is essentially dead at the instruction that defines
1042 // it. Hence its interval is:
1043 // [defSlot(def), defSlot(def)+1)
1044 DEBUG(errs() << " dead");
1045 end = getNextSlot(getDefIndex(start));
1050 baseIndex = getNextIndex(baseIndex);
1052 if (mi != MBB->end()) {
1053 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1054 getInstructionFromIndex(baseIndex) == 0)
1055 baseIndex = getNextIndex(baseIndex);
1059 // Live-in register might not be used at all.
1062 DEBUG(errs() << " dead");
1063 end = getNextSlot(getDefIndex(MIIdx));
1065 DEBUG(errs() << " live through");
1071 interval.getNextValue(LiveIndex(MBB->getNumber()),
1072 0, false, VNInfoAllocator);
1073 vni->setIsPHIDef(true);
1074 LiveRange LR(start, end, vni);
1076 interval.addRange(LR);
1077 LR.valno->addKill(end);
1078 DEBUG(errs() << " +" << LR << '\n');
1082 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1083 SmallVector<MachineInstr*,16> &IdentCopies,
1084 SmallVector<MachineInstr*,16> &OtherCopies) {
1085 bool HaveConflict = false;
1086 unsigned NumIdent = 0;
1087 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
1088 re = mri_->def_end(); ri != re; ++ri) {
1089 MachineInstr *MI = &*ri;
1090 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1091 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1093 if (SrcReg != DstInt.reg) {
1094 OtherCopies.push_back(MI);
1095 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1097 IdentCopies.push_back(MI);
1103 return false; // Let coalescer handle it
1104 return IdentCopies.size() > OtherCopies.size();
1107 void LiveIntervals::performEarlyCoalescing() {
1108 if (!EarlyCoalescing)
1111 /// Perform early coalescing: eliminate copies which feed into phi joins
1112 /// and whose sources are defined by the phi joins.
1113 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1114 MachineInstr *Join = phiJoinCopies[i];
1115 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1118 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1119 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1121 assert(isMove && "PHI join instruction must be a move!");
1126 LiveInterval &DstInt = getInterval(PHIDst);
1127 LiveInterval &SrcInt = getInterval(PHISrc);
1128 SmallVector<MachineInstr*, 16> IdentCopies;
1129 SmallVector<MachineInstr*, 16> OtherCopies;
1130 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1133 DEBUG(errs() << "PHI Join: " << *Join);
1134 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1135 VNInfo *VNI = DstInt.getValNumInfo(0);
1137 // Change the non-identity copies to directly target the phi destination.
1138 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1139 MachineInstr *PHICopy = OtherCopies[i];
1140 DEBUG(errs() << "Moving: " << *PHICopy);
1142 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1143 LiveIndex DefIndex = getDefIndex(MIIndex);
1144 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1145 LiveIndex StartIndex = SLR->start;
1146 LiveIndex EndIndex = SLR->end;
1148 // Delete val# defined by the now identity copy and add the range from
1149 // beginning of the mbb to the end of the range.
1150 SrcInt.removeValNo(SLR->valno);
1151 DEBUG(errs() << " added range [" << StartIndex << ','
1152 << EndIndex << "] to reg" << DstInt.reg << '\n');
1153 if (DstInt.liveAt(StartIndex))
1154 DstInt.removeRange(StartIndex, EndIndex);
1155 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1157 NewVNI->setHasPHIKill(true);
1158 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1159 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1160 MachineOperand &MO = PHICopy->getOperand(j);
1161 if (!MO.isReg() || MO.getReg() != PHISrc)
1167 // Now let's eliminate all the would-be identity copies.
1168 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1169 MachineInstr *PHICopy = IdentCopies[i];
1170 DEBUG(errs() << "Coalescing: " << *PHICopy);
1172 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1173 LiveIndex DefIndex = getDefIndex(MIIndex);
1174 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1175 LiveIndex StartIndex = SLR->start;
1176 LiveIndex EndIndex = SLR->end;
1178 // Delete val# defined by the now identity copy and add the range from
1179 // beginning of the mbb to the end of the range.
1180 SrcInt.removeValNo(SLR->valno);
1181 RemoveMachineInstrFromMaps(PHICopy);
1182 PHICopy->eraseFromParent();
1183 DEBUG(errs() << " added range [" << StartIndex << ','
1184 << EndIndex << "] to reg" << DstInt.reg << '\n');
1185 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1188 // Remove the phi join and update the phi block liveness.
1189 LiveIndex MIIndex = getInstructionIndex(Join);
1190 LiveIndex UseIndex = getUseIndex(MIIndex);
1191 LiveIndex DefIndex = getDefIndex(MIIndex);
1192 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1193 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1194 DLR->valno->setCopy(0);
1195 DLR->valno->setIsDefAccurate(false);
1196 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1197 SrcInt.removeRange(SLR->start, SLR->end);
1198 assert(SrcInt.empty());
1199 removeInterval(PHISrc);
1200 RemoveMachineInstrFromMaps(Join);
1201 Join->eraseFromParent();
1207 /// computeIntervals - computes the live intervals for virtual
1208 /// registers. for some ordering of the machine instructions [1,N] a
1209 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1210 /// which a variable is live
1211 void LiveIntervals::computeIntervals() {
1212 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1213 << "********** Function: "
1214 << ((Value*)mf_->getFunction())->getName() << '\n');
1216 SmallVector<unsigned, 8> UndefUses;
1217 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1218 MBBI != E; ++MBBI) {
1219 MachineBasicBlock *MBB = MBBI;
1220 // Track the index of the current machine instr.
1221 LiveIndex MIIndex = getMBBStartIdx(MBB);
1222 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1224 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1226 // Create intervals for live-ins to this BB first.
1227 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1228 LE = MBB->livein_end(); LI != LE; ++LI) {
1229 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1230 // Multiple live-ins can alias the same register.
1231 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1232 if (!hasInterval(*AS))
1233 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1237 // Skip over empty initial indices.
1238 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1239 getInstructionFromIndex(MIIndex) == 0)
1240 MIIndex = getNextIndex(MIIndex);
1242 for (; MI != miEnd; ++MI) {
1243 DEBUG(errs() << MIIndex << "\t" << *MI);
1246 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1247 MachineOperand &MO = MI->getOperand(i);
1248 if (!MO.isReg() || !MO.getReg())
1251 // handle register defs - build intervals
1253 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1254 else if (MO.isUndef())
1255 UndefUses.push_back(MO.getReg());
1258 // Skip over the empty slots after each instruction.
1259 unsigned Slots = MI->getDesc().getNumDefs();
1264 MIIndex = getNextIndex(MIIndex);
1266 // Skip over empty indices.
1267 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1268 getInstructionFromIndex(MIIndex) == 0)
1269 MIIndex = getNextIndex(MIIndex);
1273 // Create empty intervals for registers defined by implicit_def's (except
1274 // for those implicit_def that define values which are liveout of their
1276 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1277 unsigned UndefReg = UndefUses[i];
1278 (void)getOrCreateInterval(UndefReg);
1282 bool LiveIntervals::findLiveInMBBs(
1283 LiveIndex Start, LiveIndex End,
1284 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1285 std::vector<IdxMBBPair>::const_iterator I =
1286 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1288 bool ResVal = false;
1289 while (I != Idx2MBBMap.end()) {
1290 if (I->first >= End)
1292 MBBs.push_back(I->second);
1299 bool LiveIntervals::findReachableMBBs(
1300 LiveIndex Start, LiveIndex End,
1301 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1302 std::vector<IdxMBBPair>::const_iterator I =
1303 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1305 bool ResVal = false;
1306 while (I != Idx2MBBMap.end()) {
1309 MachineBasicBlock *MBB = I->second;
1310 if (getMBBEndIdx(MBB) > End)
1312 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1313 SE = MBB->succ_end(); SI != SE; ++SI)
1314 MBBs.push_back(*SI);
1321 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1322 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1323 return new LiveInterval(reg, Weight);
1326 /// dupInterval - Duplicate a live interval. The caller is responsible for
1327 /// managing the allocated memory.
1328 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1329 LiveInterval *NewLI = createInterval(li->reg);
1330 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1334 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1335 /// copy field and returns the source register that defines it.
1336 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1337 if (!VNI->getCopy())
1340 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1341 // If it's extracting out of a physical register, return the sub-register.
1342 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1343 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1344 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1346 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1347 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1348 return VNI->getCopy()->getOperand(2).getReg();
1350 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1351 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1353 llvm_unreachable("Unrecognized copy instruction!");
1357 //===----------------------------------------------------------------------===//
1358 // Register allocator hooks.
1361 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1362 /// allow one) virtual register operand, then its uses are implicitly using
1363 /// the register. Returns the virtual register.
1364 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1365 MachineInstr *MI) const {
1367 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1368 MachineOperand &MO = MI->getOperand(i);
1369 if (!MO.isReg() || !MO.isUse())
1371 unsigned Reg = MO.getReg();
1372 if (Reg == 0 || Reg == li.reg)
1375 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1376 !allocatableRegs_[Reg])
1378 // FIXME: For now, only remat MI with at most one register operand.
1380 "Can't rematerialize instruction with multiple register operand!");
1381 RegOp = MO.getReg();
1389 /// isValNoAvailableAt - Return true if the val# of the specified interval
1390 /// which reaches the given instruction also reaches the specified use index.
1391 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1392 LiveIndex UseIdx) const {
1393 LiveIndex Index = getInstructionIndex(MI);
1394 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1395 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1396 return UI != li.end() && UI->valno == ValNo;
1399 /// isReMaterializable - Returns true if the definition MI of the specified
1400 /// val# of the specified interval is re-materializable.
1401 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1402 const VNInfo *ValNo, MachineInstr *MI,
1403 SmallVectorImpl<LiveInterval*> &SpillIs,
1408 if (!tii_->isTriviallyReMaterializable(MI, aa_))
1411 // Target-specific code can mark an instruction as being rematerializable
1412 // if it has one virtual reg use, though it had better be something like
1413 // a PIC base register which is likely to be live everywhere.
1414 unsigned ImpUse = getReMatImplicitUse(li, MI);
1416 const LiveInterval &ImpLi = getInterval(ImpUse);
1417 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1418 re = mri_->use_end(); ri != re; ++ri) {
1419 MachineInstr *UseMI = &*ri;
1420 LiveIndex UseIdx = getInstructionIndex(UseMI);
1421 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1423 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1427 // If a register operand of the re-materialized instruction is going to
1428 // be spilled next, then it's not legal to re-materialize this instruction.
1429 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1430 if (ImpUse == SpillIs[i]->reg)
1436 /// isReMaterializable - Returns true if the definition MI of the specified
1437 /// val# of the specified interval is re-materializable.
1438 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1439 const VNInfo *ValNo, MachineInstr *MI) {
1440 SmallVector<LiveInterval*, 4> Dummy1;
1442 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1445 /// isReMaterializable - Returns true if every definition of MI of every
1446 /// val# of the specified interval is re-materializable.
1447 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1448 SmallVectorImpl<LiveInterval*> &SpillIs,
1451 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1453 const VNInfo *VNI = *i;
1454 if (VNI->isUnused())
1455 continue; // Dead val#.
1456 // Is the def for the val# rematerializable?
1457 if (!VNI->isDefAccurate())
1459 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1460 bool DefIsLoad = false;
1462 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1464 isLoad |= DefIsLoad;
1469 /// FilterFoldedOps - Filter out two-address use operands. Return
1470 /// true if it finds any issue with the operands that ought to prevent
1472 static bool FilterFoldedOps(MachineInstr *MI,
1473 SmallVector<unsigned, 2> &Ops,
1475 SmallVector<unsigned, 2> &FoldOps) {
1477 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1478 unsigned OpIdx = Ops[i];
1479 MachineOperand &MO = MI->getOperand(OpIdx);
1480 // FIXME: fold subreg use.
1484 MRInfo |= (unsigned)VirtRegMap::isMod;
1486 // Filter out two-address use operand(s).
1487 if (MI->isRegTiedToDefOperand(OpIdx)) {
1488 MRInfo = VirtRegMap::isModRef;
1491 MRInfo |= (unsigned)VirtRegMap::isRef;
1493 FoldOps.push_back(OpIdx);
1499 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1500 /// slot / to reg or any rematerialized load into ith operand of specified
1501 /// MI. If it is successul, MI is updated with the newly created MI and
1503 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1504 VirtRegMap &vrm, MachineInstr *DefMI,
1506 SmallVector<unsigned, 2> &Ops,
1507 bool isSS, int Slot, unsigned Reg) {
1508 // If it is an implicit def instruction, just delete it.
1509 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1510 RemoveMachineInstrFromMaps(MI);
1511 vrm.RemoveMachineInstrFromMaps(MI);
1512 MI->eraseFromParent();
1517 // Filter the list of operand indexes that are to be folded. Abort if
1518 // any operand will prevent folding.
1519 unsigned MRInfo = 0;
1520 SmallVector<unsigned, 2> FoldOps;
1521 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1524 // The only time it's safe to fold into a two address instruction is when
1525 // it's folding reload and spill from / into a spill stack slot.
1526 if (DefMI && (MRInfo & VirtRegMap::isMod))
1529 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1530 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1532 // Remember this instruction uses the spill slot.
1533 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1535 // Attempt to fold the memory reference into the instruction. If
1536 // we can do this, we don't need to insert spill code.
1537 MachineBasicBlock &MBB = *MI->getParent();
1538 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1539 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1540 vrm.transferSpillPts(MI, fmi);
1541 vrm.transferRestorePts(MI, fmi);
1542 vrm.transferEmergencySpills(MI, fmi);
1544 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1545 mi2iMap_[fmi] = InstrIdx;
1546 MI = MBB.insert(MBB.erase(MI), fmi);
1553 /// canFoldMemoryOperand - Returns true if the specified load / store
1554 /// folding is possible.
1555 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1556 SmallVector<unsigned, 2> &Ops,
1558 // Filter the list of operand indexes that are to be folded. Abort if
1559 // any operand will prevent folding.
1560 unsigned MRInfo = 0;
1561 SmallVector<unsigned, 2> FoldOps;
1562 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1565 // It's only legal to remat for a use, not a def.
1566 if (ReMat && (MRInfo & VirtRegMap::isMod))
1569 return tii_->canFoldMemoryOperand(MI, FoldOps);
1572 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1573 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1574 for (LiveInterval::Ranges::const_iterator
1575 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1576 std::vector<IdxMBBPair>::const_iterator II =
1577 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1578 if (II == Idx2MBBMap.end())
1580 if (I->end > II->first) // crossing a MBB.
1582 MBBs.insert(II->second);
1583 if (MBBs.size() > 1)
1589 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1590 /// interval on to-be re-materialized operands of MI) with new register.
1591 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1592 MachineInstr *MI, unsigned NewVReg,
1594 // There is an implicit use. That means one of the other operand is
1595 // being remat'ed and the remat'ed instruction has li.reg as an
1596 // use operand. Make sure we rewrite that as well.
1597 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1598 MachineOperand &MO = MI->getOperand(i);
1601 unsigned Reg = MO.getReg();
1602 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1604 if (!vrm.isReMaterialized(Reg))
1606 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1607 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1609 UseMO->setReg(NewVReg);
1613 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1614 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1615 bool LiveIntervals::
1616 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1617 bool TrySplit, LiveIndex index, LiveIndex end,
1619 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1620 unsigned Slot, int LdSlot,
1621 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1623 const TargetRegisterClass* rc,
1624 SmallVector<int, 4> &ReMatIds,
1625 const MachineLoopInfo *loopInfo,
1626 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1627 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1628 std::vector<LiveInterval*> &NewLIs) {
1629 bool CanFold = false;
1631 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1632 MachineOperand& mop = MI->getOperand(i);
1635 unsigned Reg = mop.getReg();
1636 unsigned RegI = Reg;
1637 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1642 bool TryFold = !DefIsReMat;
1643 bool FoldSS = true; // Default behavior unless it's a remat.
1644 int FoldSlot = Slot;
1646 // If this is the rematerializable definition MI itself and
1647 // all of its uses are rematerialized, simply delete it.
1648 if (MI == ReMatOrigDefMI && CanDelete) {
1649 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1651 RemoveMachineInstrFromMaps(MI);
1652 vrm.RemoveMachineInstrFromMaps(MI);
1653 MI->eraseFromParent();
1657 // If def for this use can't be rematerialized, then try folding.
1658 // If def is rematerializable and it's a load, also try folding.
1659 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1661 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1667 // Scan all of the operands of this instruction rewriting operands
1668 // to use NewVReg instead of li.reg as appropriate. We do this for
1671 // 1. If the instr reads the same spilled vreg multiple times, we
1672 // want to reuse the NewVReg.
1673 // 2. If the instr is a two-addr instruction, we are required to
1674 // keep the src/dst regs pinned.
1676 // Keep track of whether we replace a use and/or def so that we can
1677 // create the spill interval with the appropriate range.
1679 HasUse = mop.isUse();
1680 HasDef = mop.isDef();
1681 SmallVector<unsigned, 2> Ops;
1683 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1684 const MachineOperand &MOj = MI->getOperand(j);
1687 unsigned RegJ = MOj.getReg();
1688 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1692 if (!MOj.isUndef()) {
1693 HasUse |= MOj.isUse();
1694 HasDef |= MOj.isDef();
1699 // Create a new virtual register for the spill interval.
1700 // Create the new register now so we can map the fold instruction
1701 // to the new register so when it is unfolded we get the correct
1703 bool CreatedNewVReg = false;
1705 NewVReg = mri_->createVirtualRegister(rc);
1707 CreatedNewVReg = true;
1713 // Do not fold load / store here if we are splitting. We'll find an
1714 // optimal point to insert a load / store later.
1716 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1717 Ops, FoldSS, FoldSlot, NewVReg)) {
1718 // Folding the load/store can completely change the instruction in
1719 // unpredictable ways, rescan it from the beginning.
1722 // We need to give the new vreg the same stack slot as the
1723 // spilled interval.
1724 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1730 if (isNotInMIMap(MI))
1732 goto RestartInstruction;
1735 // We'll try to fold it later if it's profitable.
1736 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1740 mop.setReg(NewVReg);
1741 if (mop.isImplicit())
1742 rewriteImplicitOps(li, MI, NewVReg, vrm);
1744 // Reuse NewVReg for other reads.
1745 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1746 MachineOperand &mopj = MI->getOperand(Ops[j]);
1747 mopj.setReg(NewVReg);
1748 if (mopj.isImplicit())
1749 rewriteImplicitOps(li, MI, NewVReg, vrm);
1752 if (CreatedNewVReg) {
1754 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1755 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1756 // Each valnum may have its own remat id.
1757 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1759 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1761 if (!CanDelete || (HasUse && HasDef)) {
1762 // If this is a two-addr instruction then its use operands are
1763 // rematerializable but its def is not. It should be assigned a
1765 vrm.assignVirt2StackSlot(NewVReg, Slot);
1768 vrm.assignVirt2StackSlot(NewVReg, Slot);
1770 } else if (HasUse && HasDef &&
1771 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1772 // If this interval hasn't been assigned a stack slot (because earlier
1773 // def is a deleted remat def), do it now.
1774 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1775 vrm.assignVirt2StackSlot(NewVReg, Slot);
1778 // Re-matting an instruction with virtual register use. Add the
1779 // register as an implicit use on the use MI.
1780 if (DefIsReMat && ImpUse)
1781 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1783 // Create a new register interval for this spill / remat.
1784 LiveInterval &nI = getOrCreateInterval(NewVReg);
1785 if (CreatedNewVReg) {
1786 NewLIs.push_back(&nI);
1787 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1789 vrm.setIsSplitFromReg(NewVReg, li.reg);
1793 if (CreatedNewVReg) {
1794 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1795 nI.getNextValue(LiveIndex(), 0, false,
1797 DEBUG(errs() << " +" << LR);
1800 // Extend the split live interval to this def / use.
1801 LiveIndex End = getNextSlot(getUseIndex(index));
1802 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1803 nI.getValNumInfo(nI.getNumValNums()-1));
1804 DEBUG(errs() << " +" << LR);
1809 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1810 nI.getNextValue(LiveIndex(), 0, false,
1812 DEBUG(errs() << " +" << LR);
1817 errs() << "\t\t\t\tAdded new interval: ";
1818 nI.print(errs(), tri_);
1824 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1826 MachineBasicBlock *MBB,
1827 LiveIndex Idx) const {
1828 LiveIndex End = getMBBEndIdx(MBB);
1829 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1830 if (VNI->kills[j].isPHIIndex())
1833 LiveIndex KillIdx = VNI->kills[j];
1834 if (KillIdx > Idx && KillIdx < End)
1840 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1841 /// during spilling.
1843 struct RewriteInfo {
1848 RewriteInfo(LiveIndex i, MachineInstr *mi, bool u, bool d)
1849 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1852 struct RewriteInfoCompare {
1853 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1854 return LHS.Index < RHS.Index;
1859 void LiveIntervals::
1860 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1861 LiveInterval::Ranges::const_iterator &I,
1862 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1863 unsigned Slot, int LdSlot,
1864 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1866 const TargetRegisterClass* rc,
1867 SmallVector<int, 4> &ReMatIds,
1868 const MachineLoopInfo *loopInfo,
1869 BitVector &SpillMBBs,
1870 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1871 BitVector &RestoreMBBs,
1872 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1873 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1874 std::vector<LiveInterval*> &NewLIs) {
1875 bool AllCanFold = true;
1876 unsigned NewVReg = 0;
1877 LiveIndex start = getBaseIndex(I->start);
1878 LiveIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1880 // First collect all the def / use in this live range that will be rewritten.
1881 // Make sure they are sorted according to instruction index.
1882 std::vector<RewriteInfo> RewriteMIs;
1883 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1884 re = mri_->reg_end(); ri != re; ) {
1885 MachineInstr *MI = &*ri;
1886 MachineOperand &O = ri.getOperand();
1888 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1889 LiveIndex index = getInstructionIndex(MI);
1890 if (index < start || index >= end)
1894 // Must be defined by an implicit def. It should not be spilled. Note,
1895 // this is for correctness reason. e.g.
1896 // 8 %reg1024<def> = IMPLICIT_DEF
1897 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1898 // The live range [12, 14) are not part of the r1024 live interval since
1899 // it's defined by an implicit def. It will not conflicts with live
1900 // interval of r1025. Now suppose both registers are spilled, you can
1901 // easily see a situation where both registers are reloaded before
1902 // the INSERT_SUBREG and both target registers that would overlap.
1904 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1906 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1908 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1909 // Now rewrite the defs and uses.
1910 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1911 RewriteInfo &rwi = RewriteMIs[i];
1913 LiveIndex index = rwi.Index;
1914 bool MIHasUse = rwi.HasUse;
1915 bool MIHasDef = rwi.HasDef;
1916 MachineInstr *MI = rwi.MI;
1917 // If MI def and/or use the same register multiple times, then there
1918 // are multiple entries.
1919 unsigned NumUses = MIHasUse;
1920 while (i != e && RewriteMIs[i].MI == MI) {
1921 assert(RewriteMIs[i].Index == index);
1922 bool isUse = RewriteMIs[i].HasUse;
1923 if (isUse) ++NumUses;
1925 MIHasDef |= RewriteMIs[i].HasDef;
1928 MachineBasicBlock *MBB = MI->getParent();
1930 if (ImpUse && MI != ReMatDefMI) {
1931 // Re-matting an instruction with virtual register use. Update the
1932 // register interval's spill weight to HUGE_VALF to prevent it from
1934 LiveInterval &ImpLi = getInterval(ImpUse);
1935 ImpLi.weight = HUGE_VALF;
1938 unsigned MBBId = MBB->getNumber();
1939 unsigned ThisVReg = 0;
1941 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1942 if (NVI != MBBVRegsMap.end()) {
1943 ThisVReg = NVI->second;
1950 // It's better to start a new interval to avoid artifically
1951 // extend the new interval.
1952 if (MIHasDef && !MIHasUse) {
1953 MBBVRegsMap.erase(MBB->getNumber());
1959 bool IsNew = ThisVReg == 0;
1961 // This ends the previous live interval. If all of its def / use
1962 // can be folded, give it a low spill weight.
1963 if (NewVReg && TrySplit && AllCanFold) {
1964 LiveInterval &nI = getOrCreateInterval(NewVReg);
1971 bool HasDef = false;
1972 bool HasUse = false;
1973 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1974 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1975 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1976 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1977 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1978 if (!HasDef && !HasUse)
1981 AllCanFold &= CanFold;
1983 // Update weight of spill interval.
1984 LiveInterval &nI = getOrCreateInterval(NewVReg);
1986 // The spill weight is now infinity as it cannot be spilled again.
1987 nI.weight = HUGE_VALF;
1991 // Keep track of the last def and first use in each MBB.
1993 if (MI != ReMatOrigDefMI || !CanDelete) {
1994 bool HasKill = false;
1996 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1998 // If this is a two-address code, then this index starts a new VNInfo.
1999 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2001 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2003 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2004 SpillIdxes.find(MBBId);
2006 if (SII == SpillIdxes.end()) {
2007 std::vector<SRInfo> S;
2008 S.push_back(SRInfo(index, NewVReg, true));
2009 SpillIdxes.insert(std::make_pair(MBBId, S));
2010 } else if (SII->second.back().vreg != NewVReg) {
2011 SII->second.push_back(SRInfo(index, NewVReg, true));
2012 } else if (index > SII->second.back().index) {
2013 // If there is an earlier def and this is a two-address
2014 // instruction, then it's not possible to fold the store (which
2015 // would also fold the load).
2016 SRInfo &Info = SII->second.back();
2018 Info.canFold = !HasUse;
2020 SpillMBBs.set(MBBId);
2021 } else if (SII != SpillIdxes.end() &&
2022 SII->second.back().vreg == NewVReg &&
2023 index > SII->second.back().index) {
2024 // There is an earlier def that's not killed (must be two-address).
2025 // The spill is no longer needed.
2026 SII->second.pop_back();
2027 if (SII->second.empty()) {
2028 SpillIdxes.erase(MBBId);
2029 SpillMBBs.reset(MBBId);
2036 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2037 SpillIdxes.find(MBBId);
2038 if (SII != SpillIdxes.end() &&
2039 SII->second.back().vreg == NewVReg &&
2040 index > SII->second.back().index)
2041 // Use(s) following the last def, it's not safe to fold the spill.
2042 SII->second.back().canFold = false;
2043 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2044 RestoreIdxes.find(MBBId);
2045 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2046 // If we are splitting live intervals, only fold if it's the first
2047 // use and there isn't another use later in the MBB.
2048 RII->second.back().canFold = false;
2050 // Only need a reload if there isn't an earlier def / use.
2051 if (RII == RestoreIdxes.end()) {
2052 std::vector<SRInfo> Infos;
2053 Infos.push_back(SRInfo(index, NewVReg, true));
2054 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2056 RII->second.push_back(SRInfo(index, NewVReg, true));
2058 RestoreMBBs.set(MBBId);
2062 // Update spill weight.
2063 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2064 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2067 if (NewVReg && TrySplit && AllCanFold) {
2068 // If all of its def / use can be folded, give it a low spill weight.
2069 LiveInterval &nI = getOrCreateInterval(NewVReg);
2074 bool LiveIntervals::alsoFoldARestore(int Id, LiveIndex index,
2075 unsigned vr, BitVector &RestoreMBBs,
2076 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2077 if (!RestoreMBBs[Id])
2079 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2080 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2081 if (Restores[i].index == index &&
2082 Restores[i].vreg == vr &&
2083 Restores[i].canFold)
2088 void LiveIntervals::eraseRestoreInfo(int Id, LiveIndex index,
2089 unsigned vr, BitVector &RestoreMBBs,
2090 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2091 if (!RestoreMBBs[Id])
2093 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2094 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2095 if (Restores[i].index == index && Restores[i].vreg)
2096 Restores[i].index = LiveIndex();
2099 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2100 /// spilled and create empty intervals for their uses.
2102 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2103 const TargetRegisterClass* rc,
2104 std::vector<LiveInterval*> &NewLIs) {
2105 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2106 re = mri_->reg_end(); ri != re; ) {
2107 MachineOperand &O = ri.getOperand();
2108 MachineInstr *MI = &*ri;
2111 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2112 "Register def was not rewritten?");
2113 RemoveMachineInstrFromMaps(MI);
2114 vrm.RemoveMachineInstrFromMaps(MI);
2115 MI->eraseFromParent();
2117 // This must be an use of an implicit_def so it's not part of the live
2118 // interval. Create a new empty live interval for it.
2119 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2120 unsigned NewVReg = mri_->createVirtualRegister(rc);
2122 vrm.setIsImplicitlyDefined(NewVReg);
2123 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2124 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2125 MachineOperand &MO = MI->getOperand(i);
2126 if (MO.isReg() && MO.getReg() == li.reg) {
2135 std::vector<LiveInterval*> LiveIntervals::
2136 addIntervalsForSpillsFast(const LiveInterval &li,
2137 const MachineLoopInfo *loopInfo,
2139 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2141 std::vector<LiveInterval*> added;
2143 assert(li.weight != HUGE_VALF &&
2144 "attempt to spill already spilled interval!");
2147 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2152 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2154 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2155 while (RI != mri_->reg_end()) {
2156 MachineInstr* MI = &*RI;
2158 SmallVector<unsigned, 2> Indices;
2159 bool HasUse = false;
2160 bool HasDef = false;
2162 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2163 MachineOperand& mop = MI->getOperand(i);
2164 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2166 HasUse |= MI->getOperand(i).isUse();
2167 HasDef |= MI->getOperand(i).isDef();
2169 Indices.push_back(i);
2172 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2173 Indices, true, slot, li.reg)) {
2174 unsigned NewVReg = mri_->createVirtualRegister(rc);
2176 vrm.assignVirt2StackSlot(NewVReg, slot);
2178 // create a new register for this spill
2179 LiveInterval &nI = getOrCreateInterval(NewVReg);
2181 // the spill weight is now infinity as it
2182 // cannot be spilled again
2183 nI.weight = HUGE_VALF;
2185 // Rewrite register operands to use the new vreg.
2186 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2187 E = Indices.end(); I != E; ++I) {
2188 MI->getOperand(*I).setReg(NewVReg);
2190 if (MI->getOperand(*I).isUse())
2191 MI->getOperand(*I).setIsKill(true);
2194 // Fill in the new live interval.
2195 LiveIndex index = getInstructionIndex(MI);
2197 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2198 nI.getNextValue(LiveIndex(), 0, false,
2199 getVNInfoAllocator()));
2200 DEBUG(errs() << " +" << LR);
2202 vrm.addRestorePoint(NewVReg, MI);
2205 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2206 nI.getNextValue(LiveIndex(), 0, false,
2207 getVNInfoAllocator()));
2208 DEBUG(errs() << " +" << LR);
2210 vrm.addSpillPoint(NewVReg, true, MI);
2213 added.push_back(&nI);
2216 errs() << "\t\t\t\tadded new interval: ";
2223 RI = mri_->reg_begin(li.reg);
2229 std::vector<LiveInterval*> LiveIntervals::
2230 addIntervalsForSpills(const LiveInterval &li,
2231 SmallVectorImpl<LiveInterval*> &SpillIs,
2232 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2234 if (EnableFastSpilling)
2235 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2237 assert(li.weight != HUGE_VALF &&
2238 "attempt to spill already spilled interval!");
2241 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2242 li.print(errs(), tri_);
2246 // Each bit specify whether a spill is required in the MBB.
2247 BitVector SpillMBBs(mf_->getNumBlockIDs());
2248 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2249 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2250 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2251 DenseMap<unsigned,unsigned> MBBVRegsMap;
2252 std::vector<LiveInterval*> NewLIs;
2253 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2255 unsigned NumValNums = li.getNumValNums();
2256 SmallVector<MachineInstr*, 4> ReMatDefs;
2257 ReMatDefs.resize(NumValNums, NULL);
2258 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2259 ReMatOrigDefs.resize(NumValNums, NULL);
2260 SmallVector<int, 4> ReMatIds;
2261 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2262 BitVector ReMatDelete(NumValNums);
2263 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2265 // Spilling a split live interval. It cannot be split any further. Also,
2266 // it's also guaranteed to be a single val# / range interval.
2267 if (vrm.getPreSplitReg(li.reg)) {
2268 vrm.setIsSplitFromReg(li.reg, 0);
2269 // Unset the split kill marker on the last use.
2270 LiveIndex KillIdx = vrm.getKillPoint(li.reg);
2271 if (KillIdx != LiveIndex()) {
2272 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2273 assert(KillMI && "Last use disappeared?");
2274 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2275 assert(KillOp != -1 && "Last use disappeared?");
2276 KillMI->getOperand(KillOp).setIsKill(false);
2278 vrm.removeKillPoint(li.reg);
2279 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2280 Slot = vrm.getStackSlot(li.reg);
2281 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2282 MachineInstr *ReMatDefMI = DefIsReMat ?
2283 vrm.getReMaterializedMI(li.reg) : NULL;
2285 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2286 bool isLoad = isLoadSS ||
2287 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2288 bool IsFirstRange = true;
2289 for (LiveInterval::Ranges::const_iterator
2290 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2291 // If this is a split live interval with multiple ranges, it means there
2292 // are two-address instructions that re-defined the value. Only the
2293 // first def can be rematerialized!
2295 // Note ReMatOrigDefMI has already been deleted.
2296 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2297 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2298 false, vrm, rc, ReMatIds, loopInfo,
2299 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2300 MBBVRegsMap, NewLIs);
2302 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2303 Slot, 0, false, false, false,
2304 false, vrm, rc, ReMatIds, loopInfo,
2305 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2306 MBBVRegsMap, NewLIs);
2308 IsFirstRange = false;
2311 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2315 bool TrySplit = !intervalIsInOneMBB(li);
2318 bool NeedStackSlot = false;
2319 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2321 const VNInfo *VNI = *i;
2322 unsigned VN = VNI->id;
2323 if (VNI->isUnused())
2324 continue; // Dead val#.
2325 // Is the def for the val# rematerializable?
2326 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2327 ? getInstructionFromIndex(VNI->def) : 0;
2329 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2330 // Remember how to remat the def of this val#.
2331 ReMatOrigDefs[VN] = ReMatDefMI;
2332 // Original def may be modified so we have to make a copy here.
2333 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2334 CloneMIs.push_back(Clone);
2335 ReMatDefs[VN] = Clone;
2337 bool CanDelete = true;
2338 if (VNI->hasPHIKill()) {
2339 // A kill is a phi node, not all of its uses can be rematerialized.
2340 // It must not be deleted.
2342 // Need a stack slot if there is any live range where uses cannot be
2344 NeedStackSlot = true;
2347 ReMatDelete.set(VN);
2349 // Need a stack slot if there is any live range where uses cannot be
2351 NeedStackSlot = true;
2355 // One stack slot per live interval.
2356 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2357 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2358 Slot = vrm.assignVirt2StackSlot(li.reg);
2360 // This case only occurs when the prealloc splitter has already assigned
2361 // a stack slot to this vreg.
2363 Slot = vrm.getStackSlot(li.reg);
2366 // Create new intervals and rewrite defs and uses.
2367 for (LiveInterval::Ranges::const_iterator
2368 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2369 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2370 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2371 bool DefIsReMat = ReMatDefMI != NULL;
2372 bool CanDelete = ReMatDelete[I->valno->id];
2374 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2375 bool isLoad = isLoadSS ||
2376 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2377 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2378 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2379 CanDelete, vrm, rc, ReMatIds, loopInfo,
2380 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2381 MBBVRegsMap, NewLIs);
2384 // Insert spills / restores if we are splitting.
2386 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2390 SmallPtrSet<LiveInterval*, 4> AddedKill;
2391 SmallVector<unsigned, 2> Ops;
2392 if (NeedStackSlot) {
2393 int Id = SpillMBBs.find_first();
2395 std::vector<SRInfo> &spills = SpillIdxes[Id];
2396 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2397 LiveIndex index = spills[i].index;
2398 unsigned VReg = spills[i].vreg;
2399 LiveInterval &nI = getOrCreateInterval(VReg);
2400 bool isReMat = vrm.isReMaterialized(VReg);
2401 MachineInstr *MI = getInstructionFromIndex(index);
2402 bool CanFold = false;
2403 bool FoundUse = false;
2405 if (spills[i].canFold) {
2407 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2408 MachineOperand &MO = MI->getOperand(j);
2409 if (!MO.isReg() || MO.getReg() != VReg)
2416 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2417 RestoreMBBs, RestoreIdxes))) {
2418 // MI has two-address uses of the same register. If the use
2419 // isn't the first and only use in the BB, then we can't fold
2420 // it. FIXME: Move this to rewriteInstructionsForSpills.
2427 // Fold the store into the def if possible.
2428 bool Folded = false;
2429 if (CanFold && !Ops.empty()) {
2430 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2433 // Also folded uses, do not issue a load.
2434 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2435 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2437 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2441 // Otherwise tell the spiller to issue a spill.
2443 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2444 bool isKill = LR->end == getStoreIndex(index);
2445 if (!MI->registerDefIsDead(nI.reg))
2446 // No need to spill a dead def.
2447 vrm.addSpillPoint(VReg, isKill, MI);
2449 AddedKill.insert(&nI);
2452 Id = SpillMBBs.find_next(Id);
2456 int Id = RestoreMBBs.find_first();
2458 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2459 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2460 LiveIndex index = restores[i].index;
2461 if (index == LiveIndex())
2463 unsigned VReg = restores[i].vreg;
2464 LiveInterval &nI = getOrCreateInterval(VReg);
2465 bool isReMat = vrm.isReMaterialized(VReg);
2466 MachineInstr *MI = getInstructionFromIndex(index);
2467 bool CanFold = false;
2469 if (restores[i].canFold) {
2471 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2472 MachineOperand &MO = MI->getOperand(j);
2473 if (!MO.isReg() || MO.getReg() != VReg)
2477 // If this restore were to be folded, it would have been folded
2486 // Fold the load into the use if possible.
2487 bool Folded = false;
2488 if (CanFold && !Ops.empty()) {
2490 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2492 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2494 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2495 // If the rematerializable def is a load, also try to fold it.
2496 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2497 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2498 Ops, isLoadSS, LdSlot, VReg);
2500 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2502 // Re-matting an instruction with virtual register use. Add the
2503 // register as an implicit use on the use MI and update the register
2504 // interval's spill weight to HUGE_VALF to prevent it from being
2506 LiveInterval &ImpLi = getInterval(ImpUse);
2507 ImpLi.weight = HUGE_VALF;
2508 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2513 // If folding is not possible / failed, then tell the spiller to issue a
2514 // load / rematerialization for us.
2516 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2518 vrm.addRestorePoint(VReg, MI);
2520 Id = RestoreMBBs.find_next(Id);
2523 // Finalize intervals: add kills, finalize spill weights, and filter out
2525 std::vector<LiveInterval*> RetNewLIs;
2526 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2527 LiveInterval *LI = NewLIs[i];
2529 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2530 if (!AddedKill.count(LI)) {
2531 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2532 LiveIndex LastUseIdx = getBaseIndex(LR->end);
2533 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2534 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2535 assert(UseIdx != -1);
2536 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2537 LastUse->getOperand(UseIdx).setIsKill();
2538 vrm.addKillPoint(LI->reg, LastUseIdx);
2541 RetNewLIs.push_back(LI);
2545 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2549 /// hasAllocatableSuperReg - Return true if the specified physical register has
2550 /// any super register that's allocatable.
2551 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2552 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2553 if (allocatableRegs_[*AS] && hasInterval(*AS))
2558 /// getRepresentativeReg - Find the largest super register of the specified
2559 /// physical register.
2560 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2561 // Find the largest super-register that is allocatable.
2562 unsigned BestReg = Reg;
2563 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2564 unsigned SuperReg = *AS;
2565 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2573 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2574 /// specified interval that conflicts with the specified physical register.
2575 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2576 unsigned PhysReg) const {
2577 unsigned NumConflicts = 0;
2578 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2579 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2580 E = mri_->reg_end(); I != E; ++I) {
2581 MachineOperand &O = I.getOperand();
2582 MachineInstr *MI = O.getParent();
2583 LiveIndex Index = getInstructionIndex(MI);
2584 if (pli.liveAt(Index))
2587 return NumConflicts;
2590 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2591 /// around all defs and uses of the specified interval. Return true if it
2592 /// was able to cut its interval.
2593 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2594 unsigned PhysReg, VirtRegMap &vrm) {
2595 unsigned SpillReg = getRepresentativeReg(PhysReg);
2597 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2598 // If there are registers which alias PhysReg, but which are not a
2599 // sub-register of the chosen representative super register. Assert
2600 // since we can't handle it yet.
2601 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2602 tri_->isSuperRegister(*AS, SpillReg));
2605 SmallVector<unsigned, 4> PRegs;
2606 if (hasInterval(SpillReg))
2607 PRegs.push_back(SpillReg);
2609 SmallSet<unsigned, 4> Added;
2610 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2611 if (Added.insert(*AS) && hasInterval(*AS)) {
2612 PRegs.push_back(*AS);
2613 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2618 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2619 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2620 E = mri_->reg_end(); I != E; ++I) {
2621 MachineOperand &O = I.getOperand();
2622 MachineInstr *MI = O.getParent();
2623 if (SeenMIs.count(MI))
2626 LiveIndex Index = getInstructionIndex(MI);
2627 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2628 unsigned PReg = PRegs[i];
2629 LiveInterval &pli = getInterval(PReg);
2630 if (!pli.liveAt(Index))
2632 vrm.addEmergencySpill(PReg, MI);
2633 LiveIndex StartIdx = getLoadIndex(Index);
2634 LiveIndex EndIdx = getNextSlot(getStoreIndex(Index));
2635 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2636 pli.removeRange(StartIdx, EndIdx);
2640 raw_string_ostream Msg(msg);
2641 Msg << "Ran out of registers during register allocation!";
2642 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2643 Msg << "\nPlease check your inline asm statement for invalid "
2644 << "constraints:\n";
2645 MI->print(Msg, tm_);
2647 llvm_report_error(Msg.str());
2649 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2650 if (!hasInterval(*AS))
2652 LiveInterval &spli = getInterval(*AS);
2653 if (spli.liveAt(Index))
2654 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2661 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2662 MachineInstr* startInst) {
2663 LiveInterval& Interval = getOrCreateInterval(reg);
2664 VNInfo* VN = Interval.getNextValue(
2665 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2666 startInst, true, getVNInfoAllocator());
2667 VN->setHasPHIKill(true);
2668 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2670 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2671 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2672 Interval.addRange(LR);