1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
59 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden);
62 STATISTIC(numIntervals , "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
65 STATISTIC(numCoalescing, "Number of early coalescing performed");
67 char LiveIntervals::ID = 0;
68 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addPreserved<LiveVariables>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreservedID(MachineLoopInfoID);
77 AU.addPreservedID(MachineDominatorsID);
80 AU.addPreservedID(PHIEliminationID);
81 AU.addRequiredID(PHIEliminationID);
84 AU.addRequiredID(TwoAddressInstructionPassID);
85 MachineFunctionPass::getAnalysisUsage(AU);
88 void LiveIntervals::releaseMemory() {
89 // Free the live intervals themselves.
90 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
91 E = r2iMap_.end(); I != E; ++I)
99 terminatorGaps.clear();
100 phiJoinCopies.clear();
102 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
112 const TargetInstrInfo *tii_) {
113 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
114 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
118 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
119 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
120 MI->getOperand(2).getReg() == Reg)
122 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
123 MI->getOperand(1).getReg() == Reg)
128 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
129 /// there is one implicit_def for each use. Add isUndef marker to
130 /// implicit_def defs and their uses.
131 void LiveIntervals::processImplicitDefs() {
132 SmallSet<unsigned, 8> ImpDefRegs;
133 SmallVector<MachineInstr*, 8> ImpDefMIs;
134 MachineBasicBlock *Entry = mf_->begin();
135 SmallPtrSet<MachineBasicBlock*,16> Visited;
136 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
137 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
139 MachineBasicBlock *MBB = *DFI;
140 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
142 MachineInstr *MI = &*I;
144 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
145 unsigned Reg = MI->getOperand(0).getReg();
146 ImpDefRegs.insert(Reg);
147 ImpDefMIs.push_back(MI);
151 bool ChangedToImpDef = false;
152 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
153 MachineOperand& MO = MI->getOperand(i);
154 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
156 unsigned Reg = MO.getReg();
159 if (!ImpDefRegs.count(Reg))
161 // Use is a copy, just turn it into an implicit_def.
162 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
163 bool isKill = MO.isKill();
164 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
165 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
166 MI->RemoveOperand(j);
168 ImpDefRegs.erase(Reg);
169 ChangedToImpDef = true;
174 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
175 // Make sure other uses of
176 for (unsigned j = i+1; j != e; ++j) {
177 MachineOperand &MOJ = MI->getOperand(j);
178 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
181 ImpDefRegs.erase(Reg);
185 if (ChangedToImpDef) {
186 // Backtrack to process this new implicit_def.
189 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
190 MachineOperand& MO = MI->getOperand(i);
191 if (!MO.isReg() || !MO.isDef())
193 ImpDefRegs.erase(MO.getReg());
198 // Any outstanding liveout implicit_def's?
199 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
200 MachineInstr *MI = ImpDefMIs[i];
201 unsigned Reg = MI->getOperand(0).getReg();
202 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
203 !ImpDefRegs.count(Reg)) {
204 // Delete all "local" implicit_def's. That include those which define
205 // physical registers since they cannot be liveout.
206 MI->eraseFromParent();
210 // If there are multiple defs of the same register and at least one
211 // is not an implicit_def, do not insert implicit_def's before the
214 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
215 DE = mri_->def_end(); DI != DE; ++DI) {
216 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
224 // The only implicit_def which we want to keep are those that are live
226 MI->eraseFromParent();
228 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
229 UE = mri_->use_end(); UI != UE; ) {
230 MachineOperand &RMO = UI.getOperand();
231 MachineInstr *RMI = &*UI;
233 MachineBasicBlock *RMBB = RMI->getParent();
237 // Turn a copy use into an implicit_def.
238 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
239 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
241 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
242 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
243 RMI->RemoveOperand(j);
247 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
248 unsigned NewVReg = mri_->createVirtualRegister(RC);
260 void LiveIntervals::computeNumbering() {
261 Index2MiMap OldI2MI = i2miMap_;
262 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
268 terminatorGaps.clear();
269 phiJoinCopies.clear();
273 // Number MachineInstrs and MachineBasicBlocks.
274 // Initialize MBB indexes to a sentinal.
275 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
276 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
278 MachineInstrIndex MIIndex;
279 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
281 MachineInstrIndex StartIdx = MIIndex;
283 // Insert an empty slot at the beginning of each block.
284 MIIndex = getNextIndex(MIIndex);
285 i2miMap_.push_back(0);
287 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
290 if (I == MBB->getFirstTerminator()) {
291 // Leave a gap for before terminators, this is where we will point
293 MachineInstrIndex tGap(true, MIIndex);
295 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
297 "Multiple 'first' terminators encountered during numbering.");
298 inserted = inserted; // Avoid compiler warning if assertions turned off.
299 i2miMap_.push_back(0);
301 MIIndex = getNextIndex(MIIndex);
304 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
305 assert(inserted && "multiple MachineInstr -> index mappings");
307 i2miMap_.push_back(I);
308 MIIndex = getNextIndex(MIIndex);
311 // Insert max(1, numdefs) empty slots after every instruction.
312 unsigned Slots = I->getDesc().getNumDefs();
316 MIIndex = getNextIndex(MIIndex);
317 i2miMap_.push_back(0);
322 if (MBB->getFirstTerminator() == MBB->end()) {
323 // Leave a gap for before terminators, this is where we will point
325 MachineInstrIndex tGap(true, MIIndex);
327 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
329 "Multiple 'first' terminators encountered during numbering.");
330 inserted = inserted; // Avoid compiler warning if assertions turned off.
331 i2miMap_.push_back(0);
333 MIIndex = getNextIndex(MIIndex);
336 // Set the MBB2IdxMap entry for this MBB.
337 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
338 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
341 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
343 if (!OldI2MI.empty())
344 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
345 for (LiveInterval::iterator LI = OI->second->begin(),
346 LE = OI->second->end(); LI != LE; ++LI) {
348 // Remap the start index of the live range to the corresponding new
349 // number, or our best guess at what it _should_ correspond to if the
350 // original instruction has been erased. This is either the following
351 // instruction or its predecessor.
352 unsigned index = LI->start.getVecIndex();
353 MachineInstrIndex::Slot offset = LI->start.getSlot();
354 if (LI->start.isLoad()) {
355 std::vector<IdxMBBPair>::const_iterator I =
356 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
357 // Take the pair containing the index
358 std::vector<IdxMBBPair>::const_iterator J =
359 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
361 LI->start = getMBBStartIdx(J->second);
363 LI->start = MachineInstrIndex(
364 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
365 (MachineInstrIndex::Slot)offset);
368 // Remap the ending index in the same way that we remapped the start,
369 // except for the final step where we always map to the immediately
370 // following instruction.
371 index = (getPrevSlot(LI->end)).getVecIndex();
372 offset = LI->end.getSlot();
373 if (LI->end.isLoad()) {
374 // VReg dies at end of block.
375 std::vector<IdxMBBPair>::const_iterator I =
376 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
379 LI->end = getNextSlot(getMBBEndIdx(I->second));
381 unsigned idx = index;
382 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
384 if (index != OldI2MI.size())
386 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
387 (idx == index ? offset : MachineInstrIndex::LOAD));
390 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
394 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
395 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
398 // Remap the VNInfo def index, which works the same as the
399 // start indices above. VN's with special sentinel defs
400 // don't need to be remapped.
401 if (vni->isDefAccurate() && !vni->isUnused()) {
402 unsigned index = vni->def.getVecIndex();
403 MachineInstrIndex::Slot offset = vni->def.getSlot();
404 if (vni->def.isLoad()) {
405 std::vector<IdxMBBPair>::const_iterator I =
406 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
407 // Take the pair containing the index
408 std::vector<IdxMBBPair>::const_iterator J =
409 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
411 vni->def = getMBBStartIdx(J->second);
413 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
417 // Remap the VNInfo kill indices, which works the same as
418 // the end indices above.
419 for (size_t i = 0; i < vni->kills.size(); ++i) {
420 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
421 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
423 if (vni->kills[i].isLoad()) {
424 assert("Value killed at a load slot.");
425 /*std::vector<IdxMBBPair>::const_iterator I =
426 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
429 vni->kills[i] = getMBBEndIdx(I->second);*/
431 if (vni->kills[i].isPHIIndex()) {
432 std::vector<IdxMBBPair>::const_iterator I =
433 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
435 vni->kills[i] = terminatorGaps[I->second];
437 assert(OldI2MI[index] != 0 &&
438 "Kill refers to instruction not present in index maps.");
439 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
443 unsigned idx = index;
444 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
446 if (index != OldI2MI.size())
447 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
448 (idx == index ? offset : 0);
450 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
458 void LiveIntervals::scaleNumbering(int factor) {
460 // * scale MBB begin and end points
461 // * scale all ranges.
462 // * Update VNI structures.
463 // * Scale instruction numberings
465 // Scale the MBB indices.
467 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
468 MBB != MBBE; ++MBB) {
469 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
470 mbbIndices.first = mbbIndices.first.scale(factor);
471 mbbIndices.second = mbbIndices.second.scale(factor);
472 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
474 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
476 // Scale terminator gaps.
477 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
478 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
480 terminatorGaps[TGI->first] = TGI->second.scale(factor);
483 // Scale the intervals.
484 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
485 LI->second->scaleNumbering(factor);
488 // Scale MachineInstrs.
489 Mi2IndexMap oldmi2iMap = mi2iMap_;
490 MachineInstrIndex highestSlot;
491 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
493 MachineInstrIndex newSlot = MI->second.scale(factor);
494 mi2iMap_[MI->first] = newSlot;
495 highestSlot = std::max(highestSlot, newSlot);
498 unsigned highestVIndex = highestSlot.getVecIndex();
500 i2miMap_.resize(highestVIndex + 1);
501 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
503 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
509 /// runOnMachineFunction - Register allocate the whole function
511 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
513 mri_ = &mf_->getRegInfo();
514 tm_ = &fn.getTarget();
515 tri_ = tm_->getRegisterInfo();
516 tii_ = tm_->getInstrInfo();
517 aa_ = &getAnalysis<AliasAnalysis>();
518 lv_ = &getAnalysis<LiveVariables>();
519 allocatableRegs_ = tri_->getAllocatableSet(fn);
521 processImplicitDefs();
524 performEarlyCoalescing();
526 numIntervals += getNumIntervals();
532 /// print - Implement the dump method.
533 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
534 OS << "********** INTERVALS **********\n";
535 for (const_iterator I = begin(), E = end(); I != E; ++I) {
536 I->second->print(OS, tri_);
543 void LiveIntervals::printInstrs(raw_ostream &OS) const {
544 OS << "********** MACHINEINSTRS **********\n";
546 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
547 mbbi != mbbe; ++mbbi) {
548 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
549 for (MachineBasicBlock::iterator mii = mbbi->begin(),
550 mie = mbbi->end(); mii != mie; ++mii) {
551 OS << getInstructionIndex(mii) << '\t' << *mii;
556 void LiveIntervals::dumpInstrs() const {
560 /// conflictsWithPhysRegDef - Returns true if the specified register
561 /// is defined during the duration of the specified interval.
562 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
563 VirtRegMap &vrm, unsigned reg) {
564 for (LiveInterval::Ranges::const_iterator
565 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
566 for (MachineInstrIndex index = getBaseIndex(I->start),
567 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
568 index = getNextIndex(index)) {
569 // skip deleted instructions
570 while (index != end && !getInstructionFromIndex(index))
571 index = getNextIndex(index);
572 if (index == end) break;
574 MachineInstr *MI = getInstructionFromIndex(index);
575 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
576 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
577 if (SrcReg == li.reg || DstReg == li.reg)
579 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
580 MachineOperand& mop = MI->getOperand(i);
583 unsigned PhysReg = mop.getReg();
584 if (PhysReg == 0 || PhysReg == li.reg)
586 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
587 if (!vrm.hasPhys(PhysReg))
589 PhysReg = vrm.getPhys(PhysReg);
591 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
600 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
601 /// it can check use as well.
602 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
603 unsigned Reg, bool CheckUse,
604 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
605 for (LiveInterval::Ranges::const_iterator
606 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
607 for (MachineInstrIndex index = getBaseIndex(I->start),
608 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
609 index = getNextIndex(index)) {
610 // Skip deleted instructions.
611 MachineInstr *MI = 0;
612 while (index != end) {
613 MI = getInstructionFromIndex(index);
616 index = getNextIndex(index);
618 if (index == end) break;
620 if (JoinedCopies.count(MI))
622 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
623 MachineOperand& MO = MI->getOperand(i);
626 if (MO.isUse() && !CheckUse)
628 unsigned PhysReg = MO.getReg();
629 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
631 if (tri_->isSubRegister(Reg, PhysReg))
641 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
642 if (TargetRegisterInfo::isPhysicalRegister(reg))
643 errs() << tri_->getName(reg);
645 errs() << "%reg" << reg;
648 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
649 MachineBasicBlock::iterator mi,
650 MachineInstrIndex MIIdx,
653 LiveInterval &interval) {
655 errs() << "\t\tregister: ";
656 printRegName(interval.reg, tri_);
659 // Virtual registers may be defined multiple times (due to phi
660 // elimination and 2-addr elimination). Much of what we do only has to be
661 // done once for the vreg. We use an empty interval to detect the first
662 // time we see a vreg.
663 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
664 if (interval.empty()) {
665 // Get the Idx of the defining instructions.
666 MachineInstrIndex defIndex = getDefIndex(MIIdx);
667 // Earlyclobbers move back one.
668 if (MO.isEarlyClobber())
669 defIndex = getUseIndex(MIIdx);
671 MachineInstr *CopyMI = NULL;
672 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
673 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
674 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
675 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
676 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
678 // Earlyclobbers move back one.
679 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
681 assert(ValNo->id == 0 && "First value in interval is not 0?");
683 // Loop over all of the blocks that the vreg is defined in. There are
684 // two cases we have to handle here. The most common case is a vreg
685 // whose lifetime is contained within a basic block. In this case there
686 // will be a single kill, in MBB, which comes after the definition.
687 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
688 // FIXME: what about dead vars?
689 MachineInstrIndex killIdx;
690 if (vi.Kills[0] != mi)
691 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
693 killIdx = getNextSlot(defIndex);
695 // If the kill happens after the definition, we have an intra-block
697 if (killIdx > defIndex) {
698 assert(vi.AliveBlocks.empty() &&
699 "Shouldn't be alive across any blocks!");
700 LiveRange LR(defIndex, killIdx, ValNo);
701 interval.addRange(LR);
702 DEBUG(errs() << " +" << LR << "\n");
703 ValNo->addKill(killIdx);
708 // The other case we handle is when a virtual register lives to the end
709 // of the defining block, potentially live across some blocks, then is
710 // live into some number of blocks, but gets killed. Start by adding a
711 // range that goes from this definition to the end of the defining block.
712 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
713 DEBUG(errs() << " +" << NewLR);
714 interval.addRange(NewLR);
716 // Iterate over all of the blocks that the variable is completely
717 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
719 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
720 E = vi.AliveBlocks.end(); I != E; ++I) {
721 LiveRange LR(getMBBStartIdx(*I),
722 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
724 interval.addRange(LR);
725 DEBUG(errs() << " +" << LR);
728 // Finally, this virtual register is live from the start of any killing
729 // block to the 'use' slot of the killing instruction.
730 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
731 MachineInstr *Kill = vi.Kills[i];
732 MachineInstrIndex killIdx =
733 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
734 LiveRange LR(getMBBStartIdx(Kill->getParent()),
736 interval.addRange(LR);
737 ValNo->addKill(killIdx);
738 DEBUG(errs() << " +" << LR);
742 // If this is the second time we see a virtual register definition, it
743 // must be due to phi elimination or two addr elimination. If this is
744 // the result of two address elimination, then the vreg is one of the
745 // def-and-use register operand.
746 if (mi->isRegTiedToUseOperand(MOIdx)) {
747 // If this is a two-address definition, then we have already processed
748 // the live range. The only problem is that we didn't realize there
749 // are actually two values in the live interval. Because of this we
750 // need to take the LiveRegion that defines this register and split it
752 assert(interval.containsOneValue());
753 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
754 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
755 if (MO.isEarlyClobber())
756 RedefIndex = getUseIndex(MIIdx);
758 const LiveRange *OldLR =
759 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
760 VNInfo *OldValNo = OldLR->valno;
762 // Delete the initial value, which should be short and continuous,
763 // because the 2-addr copy must be in the same MBB as the redef.
764 interval.removeRange(DefIndex, RedefIndex);
766 // Two-address vregs should always only be redefined once. This means
767 // that at this point, there should be exactly one value number in it.
768 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
770 // The new value number (#1) is defined by the instruction we claimed
772 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
773 false, // update at *
775 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
777 // Value#0 is now defined by the 2-addr instruction.
778 OldValNo->def = RedefIndex;
779 OldValNo->setCopy(0);
780 if (MO.isEarlyClobber())
781 OldValNo->setHasRedefByEC(true);
783 // Add the new live interval which replaces the range for the input copy.
784 LiveRange LR(DefIndex, RedefIndex, ValNo);
785 DEBUG(errs() << " replace range with " << LR);
786 interval.addRange(LR);
787 ValNo->addKill(RedefIndex);
789 // If this redefinition is dead, we need to add a dummy unit live
790 // range covering the def slot.
793 LiveRange(RedefIndex, getNextSlot(RedefIndex), OldValNo));
796 errs() << " RESULT: ";
797 interval.print(errs(), tri_);
800 // Otherwise, this must be because of phi elimination. If this is the
801 // first redefinition of the vreg that we have seen, go back and change
802 // the live range in the PHI block to be a different value number.
803 if (interval.containsOneValue()) {
804 // Remove the old range that we now know has an incorrect number.
805 VNInfo *VNI = interval.getValNumInfo(0);
806 MachineInstr *Killer = vi.Kills[0];
807 phiJoinCopies.push_back(Killer);
808 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
809 MachineInstrIndex End =
810 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
812 errs() << " Removing [" << Start << "," << End << "] from: ";
813 interval.print(errs(), tri_);
816 interval.removeRange(Start, End);
817 assert(interval.ranges.size() == 1 &&
818 "Newly discovered PHI interval has >1 ranges.");
819 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
820 VNI->addKill(terminatorGaps[killMBB]);
821 VNI->setHasPHIKill(true);
823 errs() << " RESULT: ";
824 interval.print(errs(), tri_);
827 // Replace the interval with one of a NEW value number. Note that this
828 // value number isn't actually defined by an instruction, weird huh? :)
829 LiveRange LR(Start, End,
830 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
831 0, false, VNInfoAllocator));
832 LR.valno->setIsPHIDef(true);
833 DEBUG(errs() << " replace range with " << LR);
834 interval.addRange(LR);
835 LR.valno->addKill(End);
837 errs() << " RESULT: ";
838 interval.print(errs(), tri_);
842 // In the case of PHI elimination, each variable definition is only
843 // live until the end of the block. We've already taken care of the
844 // rest of the live range.
845 MachineInstrIndex defIndex = getDefIndex(MIIdx);
846 if (MO.isEarlyClobber())
847 defIndex = getUseIndex(MIIdx);
850 MachineInstr *CopyMI = NULL;
851 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
852 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
853 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
854 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
855 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
857 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
859 MachineInstrIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
860 LiveRange LR(defIndex, killIndex, ValNo);
861 interval.addRange(LR);
862 ValNo->addKill(terminatorGaps[mbb]);
863 ValNo->setHasPHIKill(true);
864 DEBUG(errs() << " +" << LR);
868 DEBUG(errs() << '\n');
871 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
872 MachineBasicBlock::iterator mi,
873 MachineInstrIndex MIIdx,
875 LiveInterval &interval,
876 MachineInstr *CopyMI) {
877 // A physical register cannot be live across basic block, so its
878 // lifetime must end somewhere in its defining basic block.
880 errs() << "\t\tregister: ";
881 printRegName(interval.reg, tri_);
884 MachineInstrIndex baseIndex = MIIdx;
885 MachineInstrIndex start = getDefIndex(baseIndex);
886 // Earlyclobbers move back one.
887 if (MO.isEarlyClobber())
888 start = getUseIndex(MIIdx);
889 MachineInstrIndex end = start;
891 // If it is not used after definition, it is considered dead at
892 // the instruction defining it. Hence its interval is:
893 // [defSlot(def), defSlot(def)+1)
895 DEBUG(errs() << " dead");
896 end = getNextSlot(start);
900 // If it is not dead on definition, it must be killed by a
901 // subsequent instruction. Hence its interval is:
902 // [defSlot(def), useSlot(kill)+1)
903 baseIndex = getNextIndex(baseIndex);
904 while (++mi != MBB->end()) {
905 while (baseIndex.getVecIndex() < i2miMap_.size() &&
906 getInstructionFromIndex(baseIndex) == 0)
907 baseIndex = getNextIndex(baseIndex);
908 if (mi->killsRegister(interval.reg, tri_)) {
909 DEBUG(errs() << " killed");
910 end = getNextSlot(getUseIndex(baseIndex));
913 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
915 if (mi->isRegTiedToUseOperand(DefIdx)) {
916 // Two-address instruction.
917 end = getDefIndex(baseIndex);
918 if (mi->getOperand(DefIdx).isEarlyClobber())
919 end = getUseIndex(baseIndex);
921 // Another instruction redefines the register before it is ever read.
922 // Then the register is essentially dead at the instruction that defines
923 // it. Hence its interval is:
924 // [defSlot(def), defSlot(def)+1)
925 DEBUG(errs() << " dead");
926 end = getNextSlot(start);
932 baseIndex = getNextIndex(baseIndex);
935 // The only case we should have a dead physreg here without a killing or
936 // instruction where we know it's dead is if it is live-in to the function
937 // and never used. Another possible case is the implicit use of the
938 // physical register has been deleted by two-address pass.
939 end = getNextSlot(start);
942 assert(start < end && "did not find end of interval?");
944 // Already exists? Extend old live interval.
945 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
946 bool Extend = OldLR != interval.end();
947 VNInfo *ValNo = Extend
948 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
949 if (MO.isEarlyClobber() && Extend)
950 ValNo->setHasRedefByEC(true);
951 LiveRange LR(start, end, ValNo);
952 interval.addRange(LR);
953 LR.valno->addKill(end);
954 DEBUG(errs() << " +" << LR << '\n');
957 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
958 MachineBasicBlock::iterator MI,
959 MachineInstrIndex MIIdx,
962 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
963 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
964 getOrCreateInterval(MO.getReg()));
965 else if (allocatableRegs_[MO.getReg()]) {
966 MachineInstr *CopyMI = NULL;
967 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
968 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
969 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
970 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
971 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
973 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
974 getOrCreateInterval(MO.getReg()), CopyMI);
975 // Def of a register also defines its sub-registers.
976 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
977 // If MI also modifies the sub-register explicitly, avoid processing it
978 // more than once. Do not pass in TRI here so it checks for exact match.
979 if (!MI->modifiesRegister(*AS))
980 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
981 getOrCreateInterval(*AS), 0);
985 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
986 MachineInstrIndex MIIdx,
987 LiveInterval &interval, bool isAlias) {
989 errs() << "\t\tlivein register: ";
990 printRegName(interval.reg, tri_);
993 // Look for kills, if it reaches a def before it's killed, then it shouldn't
994 // be considered a livein.
995 MachineBasicBlock::iterator mi = MBB->begin();
996 MachineInstrIndex baseIndex = MIIdx;
997 MachineInstrIndex start = baseIndex;
998 while (baseIndex.getVecIndex() < i2miMap_.size() &&
999 getInstructionFromIndex(baseIndex) == 0)
1000 baseIndex = getNextIndex(baseIndex);
1001 MachineInstrIndex end = baseIndex;
1002 bool SeenDefUse = false;
1004 while (mi != MBB->end()) {
1005 if (mi->killsRegister(interval.reg, tri_)) {
1006 DEBUG(errs() << " killed");
1007 end = getNextSlot(getUseIndex(baseIndex));
1010 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1011 // Another instruction redefines the register before it is ever read.
1012 // Then the register is essentially dead at the instruction that defines
1013 // it. Hence its interval is:
1014 // [defSlot(def), defSlot(def)+1)
1015 DEBUG(errs() << " dead");
1016 end = getNextSlot(getDefIndex(start));
1021 baseIndex = getNextIndex(baseIndex);
1023 if (mi != MBB->end()) {
1024 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1025 getInstructionFromIndex(baseIndex) == 0)
1026 baseIndex = getNextIndex(baseIndex);
1030 // Live-in register might not be used at all.
1033 DEBUG(errs() << " dead");
1034 end = getNextSlot(getDefIndex(MIIdx));
1036 DEBUG(errs() << " live through");
1042 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1043 0, false, VNInfoAllocator);
1044 vni->setIsPHIDef(true);
1045 LiveRange LR(start, end, vni);
1047 interval.addRange(LR);
1048 LR.valno->addKill(end);
1049 DEBUG(errs() << " +" << LR << '\n');
1053 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1054 SmallVector<MachineInstr*,16> &IdentCopies,
1055 SmallVector<MachineInstr*,16> &OtherCopies) {
1056 bool HaveConflict = false;
1057 unsigned NumIdent = 0;
1058 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(SrcInt.reg),
1059 re = mri_->reg_end(); ri != re; ++ri) {
1060 MachineOperand &O = ri.getOperand();
1064 MachineInstr *MI = &*ri;
1065 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1066 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1068 if (SrcReg != DstInt.reg) {
1069 OtherCopies.push_back(MI);
1070 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1072 IdentCopies.push_back(MI);
1078 return false; // Let coalescer handle it
1079 return IdentCopies.size() > OtherCopies.size();
1082 void LiveIntervals::performEarlyCoalescing() {
1083 if (!EarlyCoalescing)
1086 /// Perform early coalescing: eliminate copies which feed into phi joins
1087 /// and whose sources are defined by the phi joins.
1088 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1089 MachineInstr *Join = phiJoinCopies[i];
1090 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1093 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1094 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1096 assert(isMove && "PHI join instruction must be a move!");
1101 LiveInterval &DstInt = getInterval(PHIDst);
1102 LiveInterval &SrcInt = getInterval(PHISrc);
1103 SmallVector<MachineInstr*, 16> IdentCopies;
1104 SmallVector<MachineInstr*, 16> OtherCopies;
1105 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1108 DEBUG(errs() << "PHI Join: " << *Join);
1109 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1110 VNInfo *VNI = DstInt.getValNumInfo(0);
1112 // Change the non-identity copies to directly target the phi destination.
1113 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1114 MachineInstr *PHICopy = OtherCopies[i];
1115 DEBUG(errs() << "Moving: " << *PHICopy);
1117 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1118 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1119 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1120 MachineInstrIndex StartIndex = SLR->start;
1121 MachineInstrIndex EndIndex = SLR->end;
1123 // Delete val# defined by the now identity copy and add the range from
1124 // beginning of the mbb to the end of the range.
1125 SrcInt.removeValNo(SLR->valno);
1126 DEBUG(errs() << " added range [" << StartIndex << ','
1127 << EndIndex << "] to reg" << DstInt.reg << '\n');
1128 if (DstInt.liveAt(StartIndex))
1129 DstInt.removeRange(StartIndex, EndIndex);
1130 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1132 NewVNI->setHasPHIKill(true);
1133 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1134 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1135 MachineOperand &MO = PHICopy->getOperand(j);
1136 if (!MO.isReg() || MO.getReg() != PHISrc)
1142 // Now let's eliminate all the would-be identity copies.
1143 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1144 MachineInstr *PHICopy = IdentCopies[i];
1145 DEBUG(errs() << "Coalescing: " << *PHICopy);
1147 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1148 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1149 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1150 MachineInstrIndex StartIndex = SLR->start;
1151 MachineInstrIndex EndIndex = SLR->end;
1153 // Delete val# defined by the now identity copy and add the range from
1154 // beginning of the mbb to the end of the range.
1155 SrcInt.removeValNo(SLR->valno);
1156 RemoveMachineInstrFromMaps(PHICopy);
1157 PHICopy->eraseFromParent();
1158 DEBUG(errs() << " added range [" << StartIndex << ','
1159 << EndIndex << "] to reg" << DstInt.reg << '\n');
1160 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1163 // Remove the phi join and update the phi block liveness.
1164 MachineInstrIndex MIIndex = getInstructionIndex(Join);
1165 MachineInstrIndex UseIndex = getUseIndex(MIIndex);
1166 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1167 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1168 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1169 DLR->valno->setCopy(0);
1170 DLR->valno->setIsDefAccurate(false);
1171 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1172 SrcInt.removeRange(SLR->start, SLR->end);
1173 assert(SrcInt.empty());
1174 removeInterval(PHISrc);
1175 RemoveMachineInstrFromMaps(Join);
1176 Join->eraseFromParent();
1182 /// computeIntervals - computes the live intervals for virtual
1183 /// registers. for some ordering of the machine instructions [1,N] a
1184 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1185 /// which a variable is live
1186 void LiveIntervals::computeIntervals() {
1187 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1188 << "********** Function: "
1189 << ((Value*)mf_->getFunction())->getName() << '\n');
1191 SmallVector<unsigned, 8> UndefUses;
1192 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1193 MBBI != E; ++MBBI) {
1194 MachineBasicBlock *MBB = MBBI;
1195 // Track the index of the current machine instr.
1196 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1197 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1199 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1201 // Create intervals for live-ins to this BB first.
1202 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1203 LE = MBB->livein_end(); LI != LE; ++LI) {
1204 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1205 // Multiple live-ins can alias the same register.
1206 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1207 if (!hasInterval(*AS))
1208 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1212 // Skip over empty initial indices.
1213 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1214 getInstructionFromIndex(MIIndex) == 0)
1215 MIIndex = getNextIndex(MIIndex);
1217 for (; MI != miEnd; ++MI) {
1218 DEBUG(errs() << MIIndex << "\t" << *MI);
1221 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1222 MachineOperand &MO = MI->getOperand(i);
1223 if (!MO.isReg() || !MO.getReg())
1226 // handle register defs - build intervals
1228 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1229 else if (MO.isUndef())
1230 UndefUses.push_back(MO.getReg());
1233 // Skip over the empty slots after each instruction.
1234 unsigned Slots = MI->getDesc().getNumDefs();
1239 MIIndex = getNextIndex(MIIndex);
1241 // Skip over empty indices.
1242 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1243 getInstructionFromIndex(MIIndex) == 0)
1244 MIIndex = getNextIndex(MIIndex);
1248 // Create empty intervals for registers defined by implicit_def's (except
1249 // for those implicit_def that define values which are liveout of their
1251 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1252 unsigned UndefReg = UndefUses[i];
1253 (void)getOrCreateInterval(UndefReg);
1257 bool LiveIntervals::findLiveInMBBs(
1258 MachineInstrIndex Start, MachineInstrIndex End,
1259 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1260 std::vector<IdxMBBPair>::const_iterator I =
1261 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1263 bool ResVal = false;
1264 while (I != Idx2MBBMap.end()) {
1265 if (I->first >= End)
1267 MBBs.push_back(I->second);
1274 bool LiveIntervals::findReachableMBBs(
1275 MachineInstrIndex Start, MachineInstrIndex End,
1276 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1277 std::vector<IdxMBBPair>::const_iterator I =
1278 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1280 bool ResVal = false;
1281 while (I != Idx2MBBMap.end()) {
1284 MachineBasicBlock *MBB = I->second;
1285 if (getMBBEndIdx(MBB) > End)
1287 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1288 SE = MBB->succ_end(); SI != SE; ++SI)
1289 MBBs.push_back(*SI);
1296 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1297 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1298 return new LiveInterval(reg, Weight);
1301 /// dupInterval - Duplicate a live interval. The caller is responsible for
1302 /// managing the allocated memory.
1303 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1304 LiveInterval *NewLI = createInterval(li->reg);
1305 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1309 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1310 /// copy field and returns the source register that defines it.
1311 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1312 if (!VNI->getCopy())
1315 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1316 // If it's extracting out of a physical register, return the sub-register.
1317 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1318 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1319 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1321 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1322 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1323 return VNI->getCopy()->getOperand(2).getReg();
1325 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1326 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1328 llvm_unreachable("Unrecognized copy instruction!");
1332 //===----------------------------------------------------------------------===//
1333 // Register allocator hooks.
1336 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1337 /// allow one) virtual register operand, then its uses are implicitly using
1338 /// the register. Returns the virtual register.
1339 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1340 MachineInstr *MI) const {
1342 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1343 MachineOperand &MO = MI->getOperand(i);
1344 if (!MO.isReg() || !MO.isUse())
1346 unsigned Reg = MO.getReg();
1347 if (Reg == 0 || Reg == li.reg)
1350 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1351 !allocatableRegs_[Reg])
1353 // FIXME: For now, only remat MI with at most one register operand.
1355 "Can't rematerialize instruction with multiple register operand!");
1356 RegOp = MO.getReg();
1364 /// isValNoAvailableAt - Return true if the val# of the specified interval
1365 /// which reaches the given instruction also reaches the specified use index.
1366 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1367 MachineInstrIndex UseIdx) const {
1368 MachineInstrIndex Index = getInstructionIndex(MI);
1369 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1370 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1371 return UI != li.end() && UI->valno == ValNo;
1374 /// isReMaterializable - Returns true if the definition MI of the specified
1375 /// val# of the specified interval is re-materializable.
1376 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1377 const VNInfo *ValNo, MachineInstr *MI,
1378 SmallVectorImpl<LiveInterval*> &SpillIs,
1383 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1387 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1388 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1389 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1390 // this but remember this is not safe to fold into a two-address
1392 // This is a load from fixed stack slot. It can be rematerialized.
1395 // If the target-specific rules don't identify an instruction as
1396 // being trivially rematerializable, use some target-independent
1398 if (!MI->getDesc().isRematerializable() ||
1399 !tii_->isTriviallyReMaterializable(MI)) {
1400 if (!EnableAggressiveRemat)
1403 // If the instruction accesses memory but the memoperands have been lost,
1404 // we can't analyze it.
1405 const TargetInstrDesc &TID = MI->getDesc();
1406 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1409 // Avoid instructions obviously unsafe for remat.
1410 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1413 // If the instruction accesses memory and the memory could be non-constant,
1414 // assume the instruction is not rematerializable.
1415 for (std::list<MachineMemOperand>::const_iterator
1416 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1417 const MachineMemOperand &MMO = *I;
1418 if (MMO.isVolatile() || MMO.isStore())
1420 const Value *V = MMO.getValue();
1423 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1424 if (!PSV->isConstant(mf_->getFrameInfo()))
1426 } else if (!aa_->pointsToConstantMemory(V))
1430 // If any of the registers accessed are non-constant, conservatively assume
1431 // the instruction is not rematerializable.
1432 unsigned ImpUse = 0;
1433 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1434 const MachineOperand &MO = MI->getOperand(i);
1436 unsigned Reg = MO.getReg();
1439 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1442 // Only allow one def, and that in the first operand.
1443 if (MO.isDef() != (i == 0))
1446 // Only allow constant-valued registers.
1447 bool IsLiveIn = mri_->isLiveIn(Reg);
1448 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1449 E = mri_->def_end();
1451 // For the def, it should be the only def of that register.
1452 if (MO.isDef() && (next(I) != E || IsLiveIn))
1456 // Only allow one use other register use, as that's all the
1457 // remat mechanisms support currently.
1458 if (Reg != li.reg) {
1461 else if (Reg != ImpUse)
1464 // For the use, there should be only one associated def.
1465 if (I != E && (next(I) != E || IsLiveIn))
1472 unsigned ImpUse = getReMatImplicitUse(li, MI);
1474 const LiveInterval &ImpLi = getInterval(ImpUse);
1475 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1476 re = mri_->use_end(); ri != re; ++ri) {
1477 MachineInstr *UseMI = &*ri;
1478 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1479 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1481 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1485 // If a register operand of the re-materialized instruction is going to
1486 // be spilled next, then it's not legal to re-materialize this instruction.
1487 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1488 if (ImpUse == SpillIs[i]->reg)
1494 /// isReMaterializable - Returns true if the definition MI of the specified
1495 /// val# of the specified interval is re-materializable.
1496 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1497 const VNInfo *ValNo, MachineInstr *MI) {
1498 SmallVector<LiveInterval*, 4> Dummy1;
1500 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1503 /// isReMaterializable - Returns true if every definition of MI of every
1504 /// val# of the specified interval is re-materializable.
1505 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1506 SmallVectorImpl<LiveInterval*> &SpillIs,
1509 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1511 const VNInfo *VNI = *i;
1512 if (VNI->isUnused())
1513 continue; // Dead val#.
1514 // Is the def for the val# rematerializable?
1515 if (!VNI->isDefAccurate())
1517 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1518 bool DefIsLoad = false;
1520 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1522 isLoad |= DefIsLoad;
1527 /// FilterFoldedOps - Filter out two-address use operands. Return
1528 /// true if it finds any issue with the operands that ought to prevent
1530 static bool FilterFoldedOps(MachineInstr *MI,
1531 SmallVector<unsigned, 2> &Ops,
1533 SmallVector<unsigned, 2> &FoldOps) {
1535 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1536 unsigned OpIdx = Ops[i];
1537 MachineOperand &MO = MI->getOperand(OpIdx);
1538 // FIXME: fold subreg use.
1542 MRInfo |= (unsigned)VirtRegMap::isMod;
1544 // Filter out two-address use operand(s).
1545 if (MI->isRegTiedToDefOperand(OpIdx)) {
1546 MRInfo = VirtRegMap::isModRef;
1549 MRInfo |= (unsigned)VirtRegMap::isRef;
1551 FoldOps.push_back(OpIdx);
1557 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1558 /// slot / to reg or any rematerialized load into ith operand of specified
1559 /// MI. If it is successul, MI is updated with the newly created MI and
1561 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1562 VirtRegMap &vrm, MachineInstr *DefMI,
1563 MachineInstrIndex InstrIdx,
1564 SmallVector<unsigned, 2> &Ops,
1565 bool isSS, int Slot, unsigned Reg) {
1566 // If it is an implicit def instruction, just delete it.
1567 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1568 RemoveMachineInstrFromMaps(MI);
1569 vrm.RemoveMachineInstrFromMaps(MI);
1570 MI->eraseFromParent();
1575 // Filter the list of operand indexes that are to be folded. Abort if
1576 // any operand will prevent folding.
1577 unsigned MRInfo = 0;
1578 SmallVector<unsigned, 2> FoldOps;
1579 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1582 // The only time it's safe to fold into a two address instruction is when
1583 // it's folding reload and spill from / into a spill stack slot.
1584 if (DefMI && (MRInfo & VirtRegMap::isMod))
1587 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1588 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1590 // Remember this instruction uses the spill slot.
1591 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1593 // Attempt to fold the memory reference into the instruction. If
1594 // we can do this, we don't need to insert spill code.
1595 MachineBasicBlock &MBB = *MI->getParent();
1596 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1597 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1598 vrm.transferSpillPts(MI, fmi);
1599 vrm.transferRestorePts(MI, fmi);
1600 vrm.transferEmergencySpills(MI, fmi);
1602 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1603 mi2iMap_[fmi] = InstrIdx;
1604 MI = MBB.insert(MBB.erase(MI), fmi);
1611 /// canFoldMemoryOperand - Returns true if the specified load / store
1612 /// folding is possible.
1613 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1614 SmallVector<unsigned, 2> &Ops,
1616 // Filter the list of operand indexes that are to be folded. Abort if
1617 // any operand will prevent folding.
1618 unsigned MRInfo = 0;
1619 SmallVector<unsigned, 2> FoldOps;
1620 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1623 // It's only legal to remat for a use, not a def.
1624 if (ReMat && (MRInfo & VirtRegMap::isMod))
1627 return tii_->canFoldMemoryOperand(MI, FoldOps);
1630 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1631 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1632 for (LiveInterval::Ranges::const_iterator
1633 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1634 std::vector<IdxMBBPair>::const_iterator II =
1635 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1636 if (II == Idx2MBBMap.end())
1638 if (I->end > II->first) // crossing a MBB.
1640 MBBs.insert(II->second);
1641 if (MBBs.size() > 1)
1647 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1648 /// interval on to-be re-materialized operands of MI) with new register.
1649 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1650 MachineInstr *MI, unsigned NewVReg,
1652 // There is an implicit use. That means one of the other operand is
1653 // being remat'ed and the remat'ed instruction has li.reg as an
1654 // use operand. Make sure we rewrite that as well.
1655 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1656 MachineOperand &MO = MI->getOperand(i);
1659 unsigned Reg = MO.getReg();
1660 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1662 if (!vrm.isReMaterialized(Reg))
1664 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1665 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1667 UseMO->setReg(NewVReg);
1671 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1672 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1673 bool LiveIntervals::
1674 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1675 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1677 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1678 unsigned Slot, int LdSlot,
1679 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1681 const TargetRegisterClass* rc,
1682 SmallVector<int, 4> &ReMatIds,
1683 const MachineLoopInfo *loopInfo,
1684 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1685 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1686 std::vector<LiveInterval*> &NewLIs) {
1687 bool CanFold = false;
1689 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1690 MachineOperand& mop = MI->getOperand(i);
1693 unsigned Reg = mop.getReg();
1694 unsigned RegI = Reg;
1695 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1700 bool TryFold = !DefIsReMat;
1701 bool FoldSS = true; // Default behavior unless it's a remat.
1702 int FoldSlot = Slot;
1704 // If this is the rematerializable definition MI itself and
1705 // all of its uses are rematerialized, simply delete it.
1706 if (MI == ReMatOrigDefMI && CanDelete) {
1707 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1709 RemoveMachineInstrFromMaps(MI);
1710 vrm.RemoveMachineInstrFromMaps(MI);
1711 MI->eraseFromParent();
1715 // If def for this use can't be rematerialized, then try folding.
1716 // If def is rematerializable and it's a load, also try folding.
1717 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1719 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1725 // Scan all of the operands of this instruction rewriting operands
1726 // to use NewVReg instead of li.reg as appropriate. We do this for
1729 // 1. If the instr reads the same spilled vreg multiple times, we
1730 // want to reuse the NewVReg.
1731 // 2. If the instr is a two-addr instruction, we are required to
1732 // keep the src/dst regs pinned.
1734 // Keep track of whether we replace a use and/or def so that we can
1735 // create the spill interval with the appropriate range.
1737 HasUse = mop.isUse();
1738 HasDef = mop.isDef();
1739 SmallVector<unsigned, 2> Ops;
1741 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1742 const MachineOperand &MOj = MI->getOperand(j);
1745 unsigned RegJ = MOj.getReg();
1746 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1750 if (!MOj.isUndef()) {
1751 HasUse |= MOj.isUse();
1752 HasDef |= MOj.isDef();
1757 // Create a new virtual register for the spill interval.
1758 // Create the new register now so we can map the fold instruction
1759 // to the new register so when it is unfolded we get the correct
1761 bool CreatedNewVReg = false;
1763 NewVReg = mri_->createVirtualRegister(rc);
1765 CreatedNewVReg = true;
1771 // Do not fold load / store here if we are splitting. We'll find an
1772 // optimal point to insert a load / store later.
1774 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1775 Ops, FoldSS, FoldSlot, NewVReg)) {
1776 // Folding the load/store can completely change the instruction in
1777 // unpredictable ways, rescan it from the beginning.
1780 // We need to give the new vreg the same stack slot as the
1781 // spilled interval.
1782 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1788 if (isNotInMIMap(MI))
1790 goto RestartInstruction;
1793 // We'll try to fold it later if it's profitable.
1794 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1798 mop.setReg(NewVReg);
1799 if (mop.isImplicit())
1800 rewriteImplicitOps(li, MI, NewVReg, vrm);
1802 // Reuse NewVReg for other reads.
1803 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1804 MachineOperand &mopj = MI->getOperand(Ops[j]);
1805 mopj.setReg(NewVReg);
1806 if (mopj.isImplicit())
1807 rewriteImplicitOps(li, MI, NewVReg, vrm);
1810 if (CreatedNewVReg) {
1812 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1813 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1814 // Each valnum may have its own remat id.
1815 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1817 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1819 if (!CanDelete || (HasUse && HasDef)) {
1820 // If this is a two-addr instruction then its use operands are
1821 // rematerializable but its def is not. It should be assigned a
1823 vrm.assignVirt2StackSlot(NewVReg, Slot);
1826 vrm.assignVirt2StackSlot(NewVReg, Slot);
1828 } else if (HasUse && HasDef &&
1829 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1830 // If this interval hasn't been assigned a stack slot (because earlier
1831 // def is a deleted remat def), do it now.
1832 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1833 vrm.assignVirt2StackSlot(NewVReg, Slot);
1836 // Re-matting an instruction with virtual register use. Add the
1837 // register as an implicit use on the use MI.
1838 if (DefIsReMat && ImpUse)
1839 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1841 // Create a new register interval for this spill / remat.
1842 LiveInterval &nI = getOrCreateInterval(NewVReg);
1843 if (CreatedNewVReg) {
1844 NewLIs.push_back(&nI);
1845 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1847 vrm.setIsSplitFromReg(NewVReg, li.reg);
1851 if (CreatedNewVReg) {
1852 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1853 nI.getNextValue(MachineInstrIndex(), 0, false,
1855 DEBUG(errs() << " +" << LR);
1858 // Extend the split live interval to this def / use.
1859 MachineInstrIndex End = getNextSlot(getUseIndex(index));
1860 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1861 nI.getValNumInfo(nI.getNumValNums()-1));
1862 DEBUG(errs() << " +" << LR);
1867 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1868 nI.getNextValue(MachineInstrIndex(), 0, false,
1870 DEBUG(errs() << " +" << LR);
1875 errs() << "\t\t\t\tAdded new interval: ";
1876 nI.print(errs(), tri_);
1882 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1884 MachineBasicBlock *MBB,
1885 MachineInstrIndex Idx) const {
1886 MachineInstrIndex End = getMBBEndIdx(MBB);
1887 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1888 if (VNI->kills[j].isPHIIndex())
1891 MachineInstrIndex KillIdx = VNI->kills[j];
1892 if (KillIdx > Idx && KillIdx < End)
1898 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1899 /// during spilling.
1901 struct RewriteInfo {
1902 MachineInstrIndex Index;
1906 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1907 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1910 struct RewriteInfoCompare {
1911 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1912 return LHS.Index < RHS.Index;
1917 void LiveIntervals::
1918 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1919 LiveInterval::Ranges::const_iterator &I,
1920 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1921 unsigned Slot, int LdSlot,
1922 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1924 const TargetRegisterClass* rc,
1925 SmallVector<int, 4> &ReMatIds,
1926 const MachineLoopInfo *loopInfo,
1927 BitVector &SpillMBBs,
1928 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1929 BitVector &RestoreMBBs,
1930 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1931 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1932 std::vector<LiveInterval*> &NewLIs) {
1933 bool AllCanFold = true;
1934 unsigned NewVReg = 0;
1935 MachineInstrIndex start = getBaseIndex(I->start);
1936 MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1938 // First collect all the def / use in this live range that will be rewritten.
1939 // Make sure they are sorted according to instruction index.
1940 std::vector<RewriteInfo> RewriteMIs;
1941 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1942 re = mri_->reg_end(); ri != re; ) {
1943 MachineInstr *MI = &*ri;
1944 MachineOperand &O = ri.getOperand();
1946 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1947 MachineInstrIndex index = getInstructionIndex(MI);
1948 if (index < start || index >= end)
1952 // Must be defined by an implicit def. It should not be spilled. Note,
1953 // this is for correctness reason. e.g.
1954 // 8 %reg1024<def> = IMPLICIT_DEF
1955 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1956 // The live range [12, 14) are not part of the r1024 live interval since
1957 // it's defined by an implicit def. It will not conflicts with live
1958 // interval of r1025. Now suppose both registers are spilled, you can
1959 // easily see a situation where both registers are reloaded before
1960 // the INSERT_SUBREG and both target registers that would overlap.
1962 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1964 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1966 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1967 // Now rewrite the defs and uses.
1968 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1969 RewriteInfo &rwi = RewriteMIs[i];
1971 MachineInstrIndex index = rwi.Index;
1972 bool MIHasUse = rwi.HasUse;
1973 bool MIHasDef = rwi.HasDef;
1974 MachineInstr *MI = rwi.MI;
1975 // If MI def and/or use the same register multiple times, then there
1976 // are multiple entries.
1977 unsigned NumUses = MIHasUse;
1978 while (i != e && RewriteMIs[i].MI == MI) {
1979 assert(RewriteMIs[i].Index == index);
1980 bool isUse = RewriteMIs[i].HasUse;
1981 if (isUse) ++NumUses;
1983 MIHasDef |= RewriteMIs[i].HasDef;
1986 MachineBasicBlock *MBB = MI->getParent();
1988 if (ImpUse && MI != ReMatDefMI) {
1989 // Re-matting an instruction with virtual register use. Update the
1990 // register interval's spill weight to HUGE_VALF to prevent it from
1992 LiveInterval &ImpLi = getInterval(ImpUse);
1993 ImpLi.weight = HUGE_VALF;
1996 unsigned MBBId = MBB->getNumber();
1997 unsigned ThisVReg = 0;
1999 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2000 if (NVI != MBBVRegsMap.end()) {
2001 ThisVReg = NVI->second;
2008 // It's better to start a new interval to avoid artifically
2009 // extend the new interval.
2010 if (MIHasDef && !MIHasUse) {
2011 MBBVRegsMap.erase(MBB->getNumber());
2017 bool IsNew = ThisVReg == 0;
2019 // This ends the previous live interval. If all of its def / use
2020 // can be folded, give it a low spill weight.
2021 if (NewVReg && TrySplit && AllCanFold) {
2022 LiveInterval &nI = getOrCreateInterval(NewVReg);
2029 bool HasDef = false;
2030 bool HasUse = false;
2031 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2032 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2033 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2034 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2035 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2036 if (!HasDef && !HasUse)
2039 AllCanFold &= CanFold;
2041 // Update weight of spill interval.
2042 LiveInterval &nI = getOrCreateInterval(NewVReg);
2044 // The spill weight is now infinity as it cannot be spilled again.
2045 nI.weight = HUGE_VALF;
2049 // Keep track of the last def and first use in each MBB.
2051 if (MI != ReMatOrigDefMI || !CanDelete) {
2052 bool HasKill = false;
2054 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2056 // If this is a two-address code, then this index starts a new VNInfo.
2057 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2059 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2061 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2062 SpillIdxes.find(MBBId);
2064 if (SII == SpillIdxes.end()) {
2065 std::vector<SRInfo> S;
2066 S.push_back(SRInfo(index, NewVReg, true));
2067 SpillIdxes.insert(std::make_pair(MBBId, S));
2068 } else if (SII->second.back().vreg != NewVReg) {
2069 SII->second.push_back(SRInfo(index, NewVReg, true));
2070 } else if (index > SII->second.back().index) {
2071 // If there is an earlier def and this is a two-address
2072 // instruction, then it's not possible to fold the store (which
2073 // would also fold the load).
2074 SRInfo &Info = SII->second.back();
2076 Info.canFold = !HasUse;
2078 SpillMBBs.set(MBBId);
2079 } else if (SII != SpillIdxes.end() &&
2080 SII->second.back().vreg == NewVReg &&
2081 index > SII->second.back().index) {
2082 // There is an earlier def that's not killed (must be two-address).
2083 // The spill is no longer needed.
2084 SII->second.pop_back();
2085 if (SII->second.empty()) {
2086 SpillIdxes.erase(MBBId);
2087 SpillMBBs.reset(MBBId);
2094 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2095 SpillIdxes.find(MBBId);
2096 if (SII != SpillIdxes.end() &&
2097 SII->second.back().vreg == NewVReg &&
2098 index > SII->second.back().index)
2099 // Use(s) following the last def, it's not safe to fold the spill.
2100 SII->second.back().canFold = false;
2101 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2102 RestoreIdxes.find(MBBId);
2103 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2104 // If we are splitting live intervals, only fold if it's the first
2105 // use and there isn't another use later in the MBB.
2106 RII->second.back().canFold = false;
2108 // Only need a reload if there isn't an earlier def / use.
2109 if (RII == RestoreIdxes.end()) {
2110 std::vector<SRInfo> Infos;
2111 Infos.push_back(SRInfo(index, NewVReg, true));
2112 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2114 RII->second.push_back(SRInfo(index, NewVReg, true));
2116 RestoreMBBs.set(MBBId);
2120 // Update spill weight.
2121 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2122 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2125 if (NewVReg && TrySplit && AllCanFold) {
2126 // If all of its def / use can be folded, give it a low spill weight.
2127 LiveInterval &nI = getOrCreateInterval(NewVReg);
2132 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
2133 unsigned vr, BitVector &RestoreMBBs,
2134 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2135 if (!RestoreMBBs[Id])
2137 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2138 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2139 if (Restores[i].index == index &&
2140 Restores[i].vreg == vr &&
2141 Restores[i].canFold)
2146 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2147 unsigned vr, BitVector &RestoreMBBs,
2148 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2149 if (!RestoreMBBs[Id])
2151 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2152 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2153 if (Restores[i].index == index && Restores[i].vreg)
2154 Restores[i].index = MachineInstrIndex();
2157 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2158 /// spilled and create empty intervals for their uses.
2160 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2161 const TargetRegisterClass* rc,
2162 std::vector<LiveInterval*> &NewLIs) {
2163 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2164 re = mri_->reg_end(); ri != re; ) {
2165 MachineOperand &O = ri.getOperand();
2166 MachineInstr *MI = &*ri;
2169 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2170 "Register def was not rewritten?");
2171 RemoveMachineInstrFromMaps(MI);
2172 vrm.RemoveMachineInstrFromMaps(MI);
2173 MI->eraseFromParent();
2175 // This must be an use of an implicit_def so it's not part of the live
2176 // interval. Create a new empty live interval for it.
2177 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2178 unsigned NewVReg = mri_->createVirtualRegister(rc);
2180 vrm.setIsImplicitlyDefined(NewVReg);
2181 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2182 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2183 MachineOperand &MO = MI->getOperand(i);
2184 if (MO.isReg() && MO.getReg() == li.reg) {
2193 std::vector<LiveInterval*> LiveIntervals::
2194 addIntervalsForSpillsFast(const LiveInterval &li,
2195 const MachineLoopInfo *loopInfo,
2197 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2199 std::vector<LiveInterval*> added;
2201 assert(li.weight != HUGE_VALF &&
2202 "attempt to spill already spilled interval!");
2205 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2210 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2212 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2213 while (RI != mri_->reg_end()) {
2214 MachineInstr* MI = &*RI;
2216 SmallVector<unsigned, 2> Indices;
2217 bool HasUse = false;
2218 bool HasDef = false;
2220 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2221 MachineOperand& mop = MI->getOperand(i);
2222 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2224 HasUse |= MI->getOperand(i).isUse();
2225 HasDef |= MI->getOperand(i).isDef();
2227 Indices.push_back(i);
2230 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2231 Indices, true, slot, li.reg)) {
2232 unsigned NewVReg = mri_->createVirtualRegister(rc);
2234 vrm.assignVirt2StackSlot(NewVReg, slot);
2236 // create a new register for this spill
2237 LiveInterval &nI = getOrCreateInterval(NewVReg);
2239 // the spill weight is now infinity as it
2240 // cannot be spilled again
2241 nI.weight = HUGE_VALF;
2243 // Rewrite register operands to use the new vreg.
2244 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2245 E = Indices.end(); I != E; ++I) {
2246 MI->getOperand(*I).setReg(NewVReg);
2248 if (MI->getOperand(*I).isUse())
2249 MI->getOperand(*I).setIsKill(true);
2252 // Fill in the new live interval.
2253 MachineInstrIndex index = getInstructionIndex(MI);
2255 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2256 nI.getNextValue(MachineInstrIndex(), 0, false,
2257 getVNInfoAllocator()));
2258 DEBUG(errs() << " +" << LR);
2260 vrm.addRestorePoint(NewVReg, MI);
2263 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2264 nI.getNextValue(MachineInstrIndex(), 0, false,
2265 getVNInfoAllocator()));
2266 DEBUG(errs() << " +" << LR);
2268 vrm.addSpillPoint(NewVReg, true, MI);
2271 added.push_back(&nI);
2274 errs() << "\t\t\t\tadded new interval: ";
2281 RI = mri_->reg_begin(li.reg);
2287 std::vector<LiveInterval*> LiveIntervals::
2288 addIntervalsForSpills(const LiveInterval &li,
2289 SmallVectorImpl<LiveInterval*> &SpillIs,
2290 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2292 if (EnableFastSpilling)
2293 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2295 assert(li.weight != HUGE_VALF &&
2296 "attempt to spill already spilled interval!");
2299 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2300 li.print(errs(), tri_);
2304 // Each bit specify whether a spill is required in the MBB.
2305 BitVector SpillMBBs(mf_->getNumBlockIDs());
2306 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2307 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2308 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2309 DenseMap<unsigned,unsigned> MBBVRegsMap;
2310 std::vector<LiveInterval*> NewLIs;
2311 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2313 unsigned NumValNums = li.getNumValNums();
2314 SmallVector<MachineInstr*, 4> ReMatDefs;
2315 ReMatDefs.resize(NumValNums, NULL);
2316 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2317 ReMatOrigDefs.resize(NumValNums, NULL);
2318 SmallVector<int, 4> ReMatIds;
2319 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2320 BitVector ReMatDelete(NumValNums);
2321 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2323 // Spilling a split live interval. It cannot be split any further. Also,
2324 // it's also guaranteed to be a single val# / range interval.
2325 if (vrm.getPreSplitReg(li.reg)) {
2326 vrm.setIsSplitFromReg(li.reg, 0);
2327 // Unset the split kill marker on the last use.
2328 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2329 if (KillIdx != MachineInstrIndex()) {
2330 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2331 assert(KillMI && "Last use disappeared?");
2332 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2333 assert(KillOp != -1 && "Last use disappeared?");
2334 KillMI->getOperand(KillOp).setIsKill(false);
2336 vrm.removeKillPoint(li.reg);
2337 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2338 Slot = vrm.getStackSlot(li.reg);
2339 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2340 MachineInstr *ReMatDefMI = DefIsReMat ?
2341 vrm.getReMaterializedMI(li.reg) : NULL;
2343 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2344 bool isLoad = isLoadSS ||
2345 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2346 bool IsFirstRange = true;
2347 for (LiveInterval::Ranges::const_iterator
2348 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2349 // If this is a split live interval with multiple ranges, it means there
2350 // are two-address instructions that re-defined the value. Only the
2351 // first def can be rematerialized!
2353 // Note ReMatOrigDefMI has already been deleted.
2354 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2355 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2356 false, vrm, rc, ReMatIds, loopInfo,
2357 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2358 MBBVRegsMap, NewLIs);
2360 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2361 Slot, 0, false, false, false,
2362 false, vrm, rc, ReMatIds, loopInfo,
2363 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2364 MBBVRegsMap, NewLIs);
2366 IsFirstRange = false;
2369 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2373 bool TrySplit = !intervalIsInOneMBB(li);
2376 bool NeedStackSlot = false;
2377 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2379 const VNInfo *VNI = *i;
2380 unsigned VN = VNI->id;
2381 if (VNI->isUnused())
2382 continue; // Dead val#.
2383 // Is the def for the val# rematerializable?
2384 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2385 ? getInstructionFromIndex(VNI->def) : 0;
2387 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2388 // Remember how to remat the def of this val#.
2389 ReMatOrigDefs[VN] = ReMatDefMI;
2390 // Original def may be modified so we have to make a copy here.
2391 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2392 CloneMIs.push_back(Clone);
2393 ReMatDefs[VN] = Clone;
2395 bool CanDelete = true;
2396 if (VNI->hasPHIKill()) {
2397 // A kill is a phi node, not all of its uses can be rematerialized.
2398 // It must not be deleted.
2400 // Need a stack slot if there is any live range where uses cannot be
2402 NeedStackSlot = true;
2405 ReMatDelete.set(VN);
2407 // Need a stack slot if there is any live range where uses cannot be
2409 NeedStackSlot = true;
2413 // One stack slot per live interval.
2414 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2415 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2416 Slot = vrm.assignVirt2StackSlot(li.reg);
2418 // This case only occurs when the prealloc splitter has already assigned
2419 // a stack slot to this vreg.
2421 Slot = vrm.getStackSlot(li.reg);
2424 // Create new intervals and rewrite defs and uses.
2425 for (LiveInterval::Ranges::const_iterator
2426 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2427 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2428 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2429 bool DefIsReMat = ReMatDefMI != NULL;
2430 bool CanDelete = ReMatDelete[I->valno->id];
2432 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2433 bool isLoad = isLoadSS ||
2434 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2435 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2436 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2437 CanDelete, vrm, rc, ReMatIds, loopInfo,
2438 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2439 MBBVRegsMap, NewLIs);
2442 // Insert spills / restores if we are splitting.
2444 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2448 SmallPtrSet<LiveInterval*, 4> AddedKill;
2449 SmallVector<unsigned, 2> Ops;
2450 if (NeedStackSlot) {
2451 int Id = SpillMBBs.find_first();
2453 std::vector<SRInfo> &spills = SpillIdxes[Id];
2454 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2455 MachineInstrIndex index = spills[i].index;
2456 unsigned VReg = spills[i].vreg;
2457 LiveInterval &nI = getOrCreateInterval(VReg);
2458 bool isReMat = vrm.isReMaterialized(VReg);
2459 MachineInstr *MI = getInstructionFromIndex(index);
2460 bool CanFold = false;
2461 bool FoundUse = false;
2463 if (spills[i].canFold) {
2465 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2466 MachineOperand &MO = MI->getOperand(j);
2467 if (!MO.isReg() || MO.getReg() != VReg)
2474 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2475 RestoreMBBs, RestoreIdxes))) {
2476 // MI has two-address uses of the same register. If the use
2477 // isn't the first and only use in the BB, then we can't fold
2478 // it. FIXME: Move this to rewriteInstructionsForSpills.
2485 // Fold the store into the def if possible.
2486 bool Folded = false;
2487 if (CanFold && !Ops.empty()) {
2488 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2491 // Also folded uses, do not issue a load.
2492 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2493 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2495 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2499 // Otherwise tell the spiller to issue a spill.
2501 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2502 bool isKill = LR->end == getStoreIndex(index);
2503 if (!MI->registerDefIsDead(nI.reg))
2504 // No need to spill a dead def.
2505 vrm.addSpillPoint(VReg, isKill, MI);
2507 AddedKill.insert(&nI);
2510 Id = SpillMBBs.find_next(Id);
2514 int Id = RestoreMBBs.find_first();
2516 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2517 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2518 MachineInstrIndex index = restores[i].index;
2519 if (index == MachineInstrIndex())
2521 unsigned VReg = restores[i].vreg;
2522 LiveInterval &nI = getOrCreateInterval(VReg);
2523 bool isReMat = vrm.isReMaterialized(VReg);
2524 MachineInstr *MI = getInstructionFromIndex(index);
2525 bool CanFold = false;
2527 if (restores[i].canFold) {
2529 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2530 MachineOperand &MO = MI->getOperand(j);
2531 if (!MO.isReg() || MO.getReg() != VReg)
2535 // If this restore were to be folded, it would have been folded
2544 // Fold the load into the use if possible.
2545 bool Folded = false;
2546 if (CanFold && !Ops.empty()) {
2548 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2550 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2552 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2553 // If the rematerializable def is a load, also try to fold it.
2554 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2555 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2556 Ops, isLoadSS, LdSlot, VReg);
2558 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2560 // Re-matting an instruction with virtual register use. Add the
2561 // register as an implicit use on the use MI and update the register
2562 // interval's spill weight to HUGE_VALF to prevent it from being
2564 LiveInterval &ImpLi = getInterval(ImpUse);
2565 ImpLi.weight = HUGE_VALF;
2566 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2571 // If folding is not possible / failed, then tell the spiller to issue a
2572 // load / rematerialization for us.
2574 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2576 vrm.addRestorePoint(VReg, MI);
2578 Id = RestoreMBBs.find_next(Id);
2581 // Finalize intervals: add kills, finalize spill weights, and filter out
2583 std::vector<LiveInterval*> RetNewLIs;
2584 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2585 LiveInterval *LI = NewLIs[i];
2587 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2588 if (!AddedKill.count(LI)) {
2589 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2590 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2591 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2592 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2593 assert(UseIdx != -1);
2594 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2595 LastUse->getOperand(UseIdx).setIsKill();
2596 vrm.addKillPoint(LI->reg, LastUseIdx);
2599 RetNewLIs.push_back(LI);
2603 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2607 /// hasAllocatableSuperReg - Return true if the specified physical register has
2608 /// any super register that's allocatable.
2609 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2610 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2611 if (allocatableRegs_[*AS] && hasInterval(*AS))
2616 /// getRepresentativeReg - Find the largest super register of the specified
2617 /// physical register.
2618 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2619 // Find the largest super-register that is allocatable.
2620 unsigned BestReg = Reg;
2621 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2622 unsigned SuperReg = *AS;
2623 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2631 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2632 /// specified interval that conflicts with the specified physical register.
2633 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2634 unsigned PhysReg) const {
2635 unsigned NumConflicts = 0;
2636 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2637 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2638 E = mri_->reg_end(); I != E; ++I) {
2639 MachineOperand &O = I.getOperand();
2640 MachineInstr *MI = O.getParent();
2641 MachineInstrIndex Index = getInstructionIndex(MI);
2642 if (pli.liveAt(Index))
2645 return NumConflicts;
2648 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2649 /// around all defs and uses of the specified interval. Return true if it
2650 /// was able to cut its interval.
2651 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2652 unsigned PhysReg, VirtRegMap &vrm) {
2653 unsigned SpillReg = getRepresentativeReg(PhysReg);
2655 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2656 // If there are registers which alias PhysReg, but which are not a
2657 // sub-register of the chosen representative super register. Assert
2658 // since we can't handle it yet.
2659 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2660 tri_->isSuperRegister(*AS, SpillReg));
2663 LiveInterval &pli = getInterval(SpillReg);
2664 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2665 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2666 E = mri_->reg_end(); I != E; ++I) {
2667 MachineOperand &O = I.getOperand();
2668 MachineInstr *MI = O.getParent();
2669 if (SeenMIs.count(MI))
2672 MachineInstrIndex Index = getInstructionIndex(MI);
2673 if (pli.liveAt(Index)) {
2674 vrm.addEmergencySpill(SpillReg, MI);
2675 MachineInstrIndex StartIdx = getLoadIndex(Index);
2676 MachineInstrIndex EndIdx = getNextSlot(getStoreIndex(Index));
2677 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2678 pli.removeRange(StartIdx, EndIdx);
2682 raw_string_ostream Msg(msg);
2683 Msg << "Ran out of registers during register allocation!";
2684 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2685 Msg << "\nPlease check your inline asm statement for invalid "
2686 << "constraints:\n";
2687 MI->print(Msg, tm_);
2689 llvm_report_error(Msg.str());
2691 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2692 if (!hasInterval(*AS))
2694 LiveInterval &spli = getInterval(*AS);
2695 if (spli.liveAt(Index))
2696 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2703 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2704 MachineInstr* startInst) {
2705 LiveInterval& Interval = getOrCreateInterval(reg);
2706 VNInfo* VN = Interval.getNextValue(
2707 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2708 startInst, true, getVNInfoAllocator());
2709 VN->setHasPHIKill(true);
2710 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2712 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2713 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2714 Interval.addRange(LR);