1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
59 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden);
62 STATISTIC(numIntervals , "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
65 STATISTIC(numCoalescing, "Number of early coalescing performed");
67 char LiveIntervals::ID = 0;
68 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addPreserved<LiveVariables>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreservedID(MachineLoopInfoID);
77 AU.addPreservedID(MachineDominatorsID);
80 AU.addPreservedID(PHIEliminationID);
81 AU.addRequiredID(PHIEliminationID);
84 AU.addRequiredID(TwoAddressInstructionPassID);
85 MachineFunctionPass::getAnalysisUsage(AU);
88 void LiveIntervals::releaseMemory() {
89 // Free the live intervals themselves.
90 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
91 E = r2iMap_.end(); I != E; ++I)
99 terminatorGaps.clear();
100 phiJoinCopies.clear();
102 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
112 const TargetInstrInfo *tii_) {
113 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
114 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
118 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
119 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
120 MI->getOperand(2).getReg() == Reg)
122 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
123 MI->getOperand(1).getReg() == Reg)
128 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
129 /// there is one implicit_def for each use. Add isUndef marker to
130 /// implicit_def defs and their uses.
131 void LiveIntervals::processImplicitDefs() {
132 SmallSet<unsigned, 8> ImpDefRegs;
133 SmallVector<MachineInstr*, 8> ImpDefMIs;
134 MachineBasicBlock *Entry = mf_->begin();
135 SmallPtrSet<MachineBasicBlock*,16> Visited;
136 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
137 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
139 MachineBasicBlock *MBB = *DFI;
140 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
142 MachineInstr *MI = &*I;
144 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
145 unsigned Reg = MI->getOperand(0).getReg();
146 ImpDefRegs.insert(Reg);
147 ImpDefMIs.push_back(MI);
151 bool ChangedToImpDef = false;
152 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
153 MachineOperand& MO = MI->getOperand(i);
154 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
156 unsigned Reg = MO.getReg();
159 if (!ImpDefRegs.count(Reg))
161 // Use is a copy, just turn it into an implicit_def.
162 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
163 bool isKill = MO.isKill();
164 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
165 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
166 MI->RemoveOperand(j);
168 ImpDefRegs.erase(Reg);
169 ChangedToImpDef = true;
174 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
175 // Make sure other uses of
176 for (unsigned j = i+1; j != e; ++j) {
177 MachineOperand &MOJ = MI->getOperand(j);
178 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
181 ImpDefRegs.erase(Reg);
185 if (ChangedToImpDef) {
186 // Backtrack to process this new implicit_def.
189 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
190 MachineOperand& MO = MI->getOperand(i);
191 if (!MO.isReg() || !MO.isDef())
193 ImpDefRegs.erase(MO.getReg());
198 // Any outstanding liveout implicit_def's?
199 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
200 MachineInstr *MI = ImpDefMIs[i];
201 unsigned Reg = MI->getOperand(0).getReg();
202 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
203 !ImpDefRegs.count(Reg)) {
204 // Delete all "local" implicit_def's. That include those which define
205 // physical registers since they cannot be liveout.
206 MI->eraseFromParent();
210 // If there are multiple defs of the same register and at least one
211 // is not an implicit_def, do not insert implicit_def's before the
214 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
215 DE = mri_->def_end(); DI != DE; ++DI) {
216 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
224 // The only implicit_def which we want to keep are those that are live
226 MI->eraseFromParent();
228 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
229 UE = mri_->use_end(); UI != UE; ) {
230 MachineOperand &RMO = UI.getOperand();
231 MachineInstr *RMI = &*UI;
233 MachineBasicBlock *RMBB = RMI->getParent();
237 // Turn a copy use into an implicit_def.
238 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
239 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
241 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
242 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
243 RMI->RemoveOperand(j);
247 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
248 unsigned NewVReg = mri_->createVirtualRegister(RC);
260 void LiveIntervals::computeNumbering() {
261 Index2MiMap OldI2MI = i2miMap_;
262 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
268 terminatorGaps.clear();
269 phiJoinCopies.clear();
273 // Number MachineInstrs and MachineBasicBlocks.
274 // Initialize MBB indexes to a sentinal.
275 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
276 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
278 MachineInstrIndex MIIndex;
279 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
281 MachineInstrIndex StartIdx = MIIndex;
283 // Insert an empty slot at the beginning of each block.
284 MIIndex = getNextIndex(MIIndex);
285 i2miMap_.push_back(0);
287 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
290 if (I == MBB->getFirstTerminator()) {
291 // Leave a gap for before terminators, this is where we will point
293 MachineInstrIndex tGap(true, MIIndex);
295 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
297 "Multiple 'first' terminators encountered during numbering.");
298 inserted = inserted; // Avoid compiler warning if assertions turned off.
299 i2miMap_.push_back(0);
301 MIIndex = getNextIndex(MIIndex);
304 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
305 assert(inserted && "multiple MachineInstr -> index mappings");
307 i2miMap_.push_back(I);
308 MIIndex = getNextIndex(MIIndex);
311 // Insert max(1, numdefs) empty slots after every instruction.
312 unsigned Slots = I->getDesc().getNumDefs();
316 MIIndex = getNextIndex(MIIndex);
317 i2miMap_.push_back(0);
322 if (MBB->getFirstTerminator() == MBB->end()) {
323 // Leave a gap for before terminators, this is where we will point
325 MachineInstrIndex tGap(true, MIIndex);
327 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
329 "Multiple 'first' terminators encountered during numbering.");
330 inserted = inserted; // Avoid compiler warning if assertions turned off.
331 i2miMap_.push_back(0);
333 MIIndex = getNextIndex(MIIndex);
336 // Set the MBB2IdxMap entry for this MBB.
337 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
338 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
341 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
343 if (!OldI2MI.empty())
344 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
345 for (LiveInterval::iterator LI = OI->second->begin(),
346 LE = OI->second->end(); LI != LE; ++LI) {
348 // Remap the start index of the live range to the corresponding new
349 // number, or our best guess at what it _should_ correspond to if the
350 // original instruction has been erased. This is either the following
351 // instruction or its predecessor.
352 unsigned index = LI->start.getVecIndex();
353 MachineInstrIndex::Slot offset = LI->start.getSlot();
354 if (LI->start.isLoad()) {
355 std::vector<IdxMBBPair>::const_iterator I =
356 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
357 // Take the pair containing the index
358 std::vector<IdxMBBPair>::const_iterator J =
359 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
361 LI->start = getMBBStartIdx(J->second);
363 LI->start = MachineInstrIndex(
364 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
365 (MachineInstrIndex::Slot)offset);
368 // Remap the ending index in the same way that we remapped the start,
369 // except for the final step where we always map to the immediately
370 // following instruction.
371 index = (getPrevSlot(LI->end)).getVecIndex();
372 offset = LI->end.getSlot();
373 if (LI->end.isLoad()) {
374 // VReg dies at end of block.
375 std::vector<IdxMBBPair>::const_iterator I =
376 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
379 LI->end = getNextSlot(getMBBEndIdx(I->second));
381 unsigned idx = index;
382 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
384 if (index != OldI2MI.size())
386 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
387 (idx == index ? offset : MachineInstrIndex::LOAD));
390 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
394 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
395 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
398 // Remap the VNInfo def index, which works the same as the
399 // start indices above. VN's with special sentinel defs
400 // don't need to be remapped.
401 if (vni->isDefAccurate() && !vni->isUnused()) {
402 unsigned index = vni->def.getVecIndex();
403 MachineInstrIndex::Slot offset = vni->def.getSlot();
404 if (vni->def.isLoad()) {
405 std::vector<IdxMBBPair>::const_iterator I =
406 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
407 // Take the pair containing the index
408 std::vector<IdxMBBPair>::const_iterator J =
409 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
411 vni->def = getMBBStartIdx(J->second);
413 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
417 // Remap the VNInfo kill indices, which works the same as
418 // the end indices above.
419 for (size_t i = 0; i < vni->kills.size(); ++i) {
420 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
421 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
423 if (vni->kills[i].isLoad()) {
424 assert("Value killed at a load slot.");
425 /*std::vector<IdxMBBPair>::const_iterator I =
426 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
429 vni->kills[i] = getMBBEndIdx(I->second);*/
431 if (vni->kills[i].isPHIIndex()) {
432 std::vector<IdxMBBPair>::const_iterator I =
433 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
435 vni->kills[i] = terminatorGaps[I->second];
437 assert(OldI2MI[index] != 0 &&
438 "Kill refers to instruction not present in index maps.");
439 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
443 unsigned idx = index;
444 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
446 if (index != OldI2MI.size())
447 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
448 (idx == index ? offset : 0);
450 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
458 void LiveIntervals::scaleNumbering(int factor) {
460 // * scale MBB begin and end points
461 // * scale all ranges.
462 // * Update VNI structures.
463 // * Scale instruction numberings
465 // Scale the MBB indices.
467 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
468 MBB != MBBE; ++MBB) {
469 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
470 mbbIndices.first = mbbIndices.first.scale(factor);
471 mbbIndices.second = mbbIndices.second.scale(factor);
472 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
474 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
476 // Scale terminator gaps.
477 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
478 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
480 terminatorGaps[TGI->first] = TGI->second.scale(factor);
483 // Scale the intervals.
484 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
485 LI->second->scaleNumbering(factor);
488 // Scale MachineInstrs.
489 Mi2IndexMap oldmi2iMap = mi2iMap_;
490 MachineInstrIndex highestSlot;
491 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
493 MachineInstrIndex newSlot = MI->second.scale(factor);
494 mi2iMap_[MI->first] = newSlot;
495 highestSlot = std::max(highestSlot, newSlot);
498 unsigned highestVIndex = highestSlot.getVecIndex();
500 i2miMap_.resize(highestVIndex + 1);
501 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
503 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
509 /// runOnMachineFunction - Register allocate the whole function
511 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
513 mri_ = &mf_->getRegInfo();
514 tm_ = &fn.getTarget();
515 tri_ = tm_->getRegisterInfo();
516 tii_ = tm_->getInstrInfo();
517 aa_ = &getAnalysis<AliasAnalysis>();
518 lv_ = &getAnalysis<LiveVariables>();
519 allocatableRegs_ = tri_->getAllocatableSet(fn);
521 processImplicitDefs();
524 performEarlyCoalescing();
526 numIntervals += getNumIntervals();
532 /// print - Implement the dump method.
533 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
534 OS << "********** INTERVALS **********\n";
535 for (const_iterator I = begin(), E = end(); I != E; ++I) {
536 I->second->print(OS, tri_);
543 void LiveIntervals::printInstrs(raw_ostream &OS) const {
544 OS << "********** MACHINEINSTRS **********\n";
546 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
547 mbbi != mbbe; ++mbbi) {
548 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
549 for (MachineBasicBlock::iterator mii = mbbi->begin(),
550 mie = mbbi->end(); mii != mie; ++mii) {
551 OS << getInstructionIndex(mii) << '\t' << *mii;
556 void LiveIntervals::dumpInstrs() const {
560 /// conflictsWithPhysRegDef - Returns true if the specified register
561 /// is defined during the duration of the specified interval.
562 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
563 VirtRegMap &vrm, unsigned reg) {
564 for (LiveInterval::Ranges::const_iterator
565 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
566 for (MachineInstrIndex index = getBaseIndex(I->start),
567 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
568 index = getNextIndex(index)) {
569 // skip deleted instructions
570 while (index != end && !getInstructionFromIndex(index))
571 index = getNextIndex(index);
572 if (index == end) break;
574 MachineInstr *MI = getInstructionFromIndex(index);
575 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
576 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
577 if (SrcReg == li.reg || DstReg == li.reg)
579 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
580 MachineOperand& mop = MI->getOperand(i);
583 unsigned PhysReg = mop.getReg();
584 if (PhysReg == 0 || PhysReg == li.reg)
586 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
587 if (!vrm.hasPhys(PhysReg))
589 PhysReg = vrm.getPhys(PhysReg);
591 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
600 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
601 /// it can check use as well.
602 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
603 unsigned Reg, bool CheckUse,
604 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
605 for (LiveInterval::Ranges::const_iterator
606 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
607 for (MachineInstrIndex index = getBaseIndex(I->start),
608 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
609 index = getNextIndex(index)) {
610 // Skip deleted instructions.
611 MachineInstr *MI = 0;
612 while (index != end) {
613 MI = getInstructionFromIndex(index);
616 index = getNextIndex(index);
618 if (index == end) break;
620 if (JoinedCopies.count(MI))
622 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
623 MachineOperand& MO = MI->getOperand(i);
626 if (MO.isUse() && !CheckUse)
628 unsigned PhysReg = MO.getReg();
629 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
631 if (tri_->isSubRegister(Reg, PhysReg))
641 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
642 if (TargetRegisterInfo::isPhysicalRegister(reg))
643 errs() << tri_->getName(reg);
645 errs() << "%reg" << reg;
649 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
650 MachineBasicBlock::iterator mi,
651 MachineInstrIndex MIIdx,
654 LiveInterval &interval) {
656 errs() << "\t\tregister: ";
657 printRegName(interval.reg, tri_);
660 // Virtual registers may be defined multiple times (due to phi
661 // elimination and 2-addr elimination). Much of what we do only has to be
662 // done once for the vreg. We use an empty interval to detect the first
663 // time we see a vreg.
664 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
665 if (interval.empty()) {
666 // Get the Idx of the defining instructions.
667 MachineInstrIndex defIndex = getDefIndex(MIIdx);
668 // Earlyclobbers move back one, so that they overlap the live range
670 if (MO.isEarlyClobber())
671 defIndex = getUseIndex(MIIdx);
673 MachineInstr *CopyMI = NULL;
674 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
675 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
676 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
677 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
678 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
680 // Earlyclobbers move back one.
681 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
683 assert(ValNo->id == 0 && "First value in interval is not 0?");
685 // Loop over all of the blocks that the vreg is defined in. There are
686 // two cases we have to handle here. The most common case is a vreg
687 // whose lifetime is contained within a basic block. In this case there
688 // will be a single kill, in MBB, which comes after the definition.
689 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
690 // FIXME: what about dead vars?
691 MachineInstrIndex killIdx;
692 if (vi.Kills[0] != mi)
693 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
694 else if (MO.isEarlyClobber())
695 // Earlyclobbers that die in this instruction move up one extra, to
696 // compensate for having the starting point moved back one. This
697 // gets them to overlap the live range of other outputs.
698 killIdx = getNextSlot(getNextSlot(defIndex));
700 killIdx = getNextSlot(defIndex);
702 // If the kill happens after the definition, we have an intra-block
704 if (killIdx > defIndex) {
705 assert(vi.AliveBlocks.empty() &&
706 "Shouldn't be alive across any blocks!");
707 LiveRange LR(defIndex, killIdx, ValNo);
708 interval.addRange(LR);
709 DEBUG(errs() << " +" << LR << "\n");
710 ValNo->addKill(killIdx);
715 // The other case we handle is when a virtual register lives to the end
716 // of the defining block, potentially live across some blocks, then is
717 // live into some number of blocks, but gets killed. Start by adding a
718 // range that goes from this definition to the end of the defining block.
719 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
720 DEBUG(errs() << " +" << NewLR);
721 interval.addRange(NewLR);
723 // Iterate over all of the blocks that the variable is completely
724 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
726 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
727 E = vi.AliveBlocks.end(); I != E; ++I) {
728 LiveRange LR(getMBBStartIdx(*I),
729 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
731 interval.addRange(LR);
732 DEBUG(errs() << " +" << LR);
735 // Finally, this virtual register is live from the start of any killing
736 // block to the 'use' slot of the killing instruction.
737 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
738 MachineInstr *Kill = vi.Kills[i];
739 MachineInstrIndex killIdx =
740 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
741 LiveRange LR(getMBBStartIdx(Kill->getParent()),
743 interval.addRange(LR);
744 ValNo->addKill(killIdx);
745 DEBUG(errs() << " +" << LR);
749 // If this is the second time we see a virtual register definition, it
750 // must be due to phi elimination or two addr elimination. If this is
751 // the result of two address elimination, then the vreg is one of the
752 // def-and-use register operand.
753 if (mi->isRegTiedToUseOperand(MOIdx)) {
754 // If this is a two-address definition, then we have already processed
755 // the live range. The only problem is that we didn't realize there
756 // are actually two values in the live interval. Because of this we
757 // need to take the LiveRegion that defines this register and split it
759 assert(interval.containsOneValue());
760 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
761 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
762 if (MO.isEarlyClobber())
763 RedefIndex = getUseIndex(MIIdx);
765 const LiveRange *OldLR =
766 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
767 VNInfo *OldValNo = OldLR->valno;
769 // Delete the initial value, which should be short and continuous,
770 // because the 2-addr copy must be in the same MBB as the redef.
771 interval.removeRange(DefIndex, RedefIndex);
773 // Two-address vregs should always only be redefined once. This means
774 // that at this point, there should be exactly one value number in it.
775 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
777 // The new value number (#1) is defined by the instruction we claimed
779 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
780 false, // update at *
782 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
784 // Value#0 is now defined by the 2-addr instruction.
785 OldValNo->def = RedefIndex;
786 OldValNo->setCopy(0);
787 if (MO.isEarlyClobber())
788 OldValNo->setHasRedefByEC(true);
790 // Add the new live interval which replaces the range for the input copy.
791 LiveRange LR(DefIndex, RedefIndex, ValNo);
792 DEBUG(errs() << " replace range with " << LR);
793 interval.addRange(LR);
794 ValNo->addKill(RedefIndex);
796 // If this redefinition is dead, we need to add a dummy unit live
797 // range covering the def slot.
800 LiveRange(RedefIndex, MO.isEarlyClobber() ?
801 getNextSlot(getNextSlot(RedefIndex)) :
802 getNextSlot(RedefIndex), OldValNo));
805 errs() << " RESULT: ";
806 interval.print(errs(), tri_);
809 // Otherwise, this must be because of phi elimination. If this is the
810 // first redefinition of the vreg that we have seen, go back and change
811 // the live range in the PHI block to be a different value number.
812 if (interval.containsOneValue()) {
813 // Remove the old range that we now know has an incorrect number.
814 VNInfo *VNI = interval.getValNumInfo(0);
815 MachineInstr *Killer = vi.Kills[0];
816 phiJoinCopies.push_back(Killer);
817 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
818 MachineInstrIndex End =
819 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
821 errs() << " Removing [" << Start << "," << End << "] from: ";
822 interval.print(errs(), tri_);
825 interval.removeRange(Start, End);
826 assert(interval.ranges.size() == 1 &&
827 "Newly discovered PHI interval has >1 ranges.");
828 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
829 VNI->addKill(terminatorGaps[killMBB]);
830 VNI->setHasPHIKill(true);
832 errs() << " RESULT: ";
833 interval.print(errs(), tri_);
836 // Replace the interval with one of a NEW value number. Note that this
837 // value number isn't actually defined by an instruction, weird huh? :)
838 LiveRange LR(Start, End,
839 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
840 0, false, VNInfoAllocator));
841 LR.valno->setIsPHIDef(true);
842 DEBUG(errs() << " replace range with " << LR);
843 interval.addRange(LR);
844 LR.valno->addKill(End);
846 errs() << " RESULT: ";
847 interval.print(errs(), tri_);
851 // In the case of PHI elimination, each variable definition is only
852 // live until the end of the block. We've already taken care of the
853 // rest of the live range.
854 MachineInstrIndex defIndex = getDefIndex(MIIdx);
855 if (MO.isEarlyClobber())
856 defIndex = getUseIndex(MIIdx);
859 MachineInstr *CopyMI = NULL;
860 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
861 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
862 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
863 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
864 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
866 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
868 MachineInstrIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
869 LiveRange LR(defIndex, killIndex, ValNo);
870 interval.addRange(LR);
871 ValNo->addKill(terminatorGaps[mbb]);
872 ValNo->setHasPHIKill(true);
873 DEBUG(errs() << " +" << LR);
877 DEBUG(errs() << '\n');
880 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
881 MachineBasicBlock::iterator mi,
882 MachineInstrIndex MIIdx,
884 LiveInterval &interval,
885 MachineInstr *CopyMI) {
886 // A physical register cannot be live across basic block, so its
887 // lifetime must end somewhere in its defining basic block.
889 errs() << "\t\tregister: ";
890 printRegName(interval.reg, tri_);
893 MachineInstrIndex baseIndex = MIIdx;
894 MachineInstrIndex start = getDefIndex(baseIndex);
895 // Earlyclobbers move back one.
896 if (MO.isEarlyClobber())
897 start = getUseIndex(MIIdx);
898 MachineInstrIndex end = start;
900 // If it is not used after definition, it is considered dead at
901 // the instruction defining it. Hence its interval is:
902 // [defSlot(def), defSlot(def)+1)
903 // For earlyclobbers, the defSlot was pushed back one; the extra
904 // advance below compensates.
906 DEBUG(errs() << " dead");
907 if (MO.isEarlyClobber())
908 end = getNextSlot(getNextSlot(start));
910 end = getNextSlot(start);
914 // If it is not dead on definition, it must be killed by a
915 // subsequent instruction. Hence its interval is:
916 // [defSlot(def), useSlot(kill)+1)
917 baseIndex = getNextIndex(baseIndex);
918 while (++mi != MBB->end()) {
919 while (baseIndex.getVecIndex() < i2miMap_.size() &&
920 getInstructionFromIndex(baseIndex) == 0)
921 baseIndex = getNextIndex(baseIndex);
922 if (mi->killsRegister(interval.reg, tri_)) {
923 DEBUG(errs() << " killed");
924 end = getNextSlot(getUseIndex(baseIndex));
927 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
929 if (mi->isRegTiedToUseOperand(DefIdx)) {
930 // Two-address instruction.
931 end = getDefIndex(baseIndex);
932 if (mi->getOperand(DefIdx).isEarlyClobber())
933 end = getUseIndex(baseIndex);
935 // Another instruction redefines the register before it is ever read.
936 // Then the register is essentially dead at the instruction that defines
937 // it. Hence its interval is:
938 // [defSlot(def), defSlot(def)+1)
939 DEBUG(errs() << " dead");
940 end = getNextSlot(start);
946 baseIndex = getNextIndex(baseIndex);
949 // The only case we should have a dead physreg here without a killing or
950 // instruction where we know it's dead is if it is live-in to the function
951 // and never used. Another possible case is the implicit use of the
952 // physical register has been deleted by two-address pass.
953 end = getNextSlot(start);
956 assert(start < end && "did not find end of interval?");
958 // Already exists? Extend old live interval.
959 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
960 bool Extend = OldLR != interval.end();
961 VNInfo *ValNo = Extend
962 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
963 if (MO.isEarlyClobber() && Extend)
964 ValNo->setHasRedefByEC(true);
965 LiveRange LR(start, end, ValNo);
966 interval.addRange(LR);
967 LR.valno->addKill(end);
968 DEBUG(errs() << " +" << LR << '\n');
971 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
972 MachineBasicBlock::iterator MI,
973 MachineInstrIndex MIIdx,
976 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
977 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
978 getOrCreateInterval(MO.getReg()));
979 else if (allocatableRegs_[MO.getReg()]) {
980 MachineInstr *CopyMI = NULL;
981 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
982 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
983 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
984 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
985 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
987 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
988 getOrCreateInterval(MO.getReg()), CopyMI);
989 // Def of a register also defines its sub-registers.
990 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
991 // If MI also modifies the sub-register explicitly, avoid processing it
992 // more than once. Do not pass in TRI here so it checks for exact match.
993 if (!MI->modifiesRegister(*AS))
994 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
995 getOrCreateInterval(*AS), 0);
999 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1000 MachineInstrIndex MIIdx,
1001 LiveInterval &interval, bool isAlias) {
1003 errs() << "\t\tlivein register: ";
1004 printRegName(interval.reg, tri_);
1007 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1008 // be considered a livein.
1009 MachineBasicBlock::iterator mi = MBB->begin();
1010 MachineInstrIndex baseIndex = MIIdx;
1011 MachineInstrIndex start = baseIndex;
1012 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1013 getInstructionFromIndex(baseIndex) == 0)
1014 baseIndex = getNextIndex(baseIndex);
1015 MachineInstrIndex end = baseIndex;
1016 bool SeenDefUse = false;
1018 while (mi != MBB->end()) {
1019 if (mi->killsRegister(interval.reg, tri_)) {
1020 DEBUG(errs() << " killed");
1021 end = getNextSlot(getUseIndex(baseIndex));
1024 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1025 // Another instruction redefines the register before it is ever read.
1026 // Then the register is essentially dead at the instruction that defines
1027 // it. Hence its interval is:
1028 // [defSlot(def), defSlot(def)+1)
1029 DEBUG(errs() << " dead");
1030 end = getNextSlot(getDefIndex(start));
1035 baseIndex = getNextIndex(baseIndex);
1037 if (mi != MBB->end()) {
1038 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1039 getInstructionFromIndex(baseIndex) == 0)
1040 baseIndex = getNextIndex(baseIndex);
1044 // Live-in register might not be used at all.
1047 DEBUG(errs() << " dead");
1048 end = getNextSlot(getDefIndex(MIIdx));
1050 DEBUG(errs() << " live through");
1056 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1057 0, false, VNInfoAllocator);
1058 vni->setIsPHIDef(true);
1059 LiveRange LR(start, end, vni);
1061 interval.addRange(LR);
1062 LR.valno->addKill(end);
1063 DEBUG(errs() << " +" << LR << '\n');
1067 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1068 SmallVector<MachineInstr*,16> &IdentCopies,
1069 SmallVector<MachineInstr*,16> &OtherCopies) {
1070 bool HaveConflict = false;
1071 unsigned NumIdent = 0;
1072 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(SrcInt.reg),
1073 re = mri_->reg_end(); ri != re; ++ri) {
1074 MachineOperand &O = ri.getOperand();
1078 MachineInstr *MI = &*ri;
1079 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1080 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1082 if (SrcReg != DstInt.reg) {
1083 OtherCopies.push_back(MI);
1084 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1086 IdentCopies.push_back(MI);
1092 return false; // Let coalescer handle it
1093 return IdentCopies.size() > OtherCopies.size();
1096 void LiveIntervals::performEarlyCoalescing() {
1097 if (!EarlyCoalescing)
1100 /// Perform early coalescing: eliminate copies which feed into phi joins
1101 /// and whose sources are defined by the phi joins.
1102 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1103 MachineInstr *Join = phiJoinCopies[i];
1104 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1107 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1108 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1110 assert(isMove && "PHI join instruction must be a move!");
1115 LiveInterval &DstInt = getInterval(PHIDst);
1116 LiveInterval &SrcInt = getInterval(PHISrc);
1117 SmallVector<MachineInstr*, 16> IdentCopies;
1118 SmallVector<MachineInstr*, 16> OtherCopies;
1119 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1122 DEBUG(errs() << "PHI Join: " << *Join);
1123 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1124 VNInfo *VNI = DstInt.getValNumInfo(0);
1126 // Change the non-identity copies to directly target the phi destination.
1127 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1128 MachineInstr *PHICopy = OtherCopies[i];
1129 DEBUG(errs() << "Moving: " << *PHICopy);
1131 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1132 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1133 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1134 MachineInstrIndex StartIndex = SLR->start;
1135 MachineInstrIndex EndIndex = SLR->end;
1137 // Delete val# defined by the now identity copy and add the range from
1138 // beginning of the mbb to the end of the range.
1139 SrcInt.removeValNo(SLR->valno);
1140 DEBUG(errs() << " added range [" << StartIndex << ','
1141 << EndIndex << "] to reg" << DstInt.reg << '\n');
1142 if (DstInt.liveAt(StartIndex))
1143 DstInt.removeRange(StartIndex, EndIndex);
1144 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1146 NewVNI->setHasPHIKill(true);
1147 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1148 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1149 MachineOperand &MO = PHICopy->getOperand(j);
1150 if (!MO.isReg() || MO.getReg() != PHISrc)
1156 // Now let's eliminate all the would-be identity copies.
1157 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1158 MachineInstr *PHICopy = IdentCopies[i];
1159 DEBUG(errs() << "Coalescing: " << *PHICopy);
1161 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1162 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1163 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1164 MachineInstrIndex StartIndex = SLR->start;
1165 MachineInstrIndex EndIndex = SLR->end;
1167 // Delete val# defined by the now identity copy and add the range from
1168 // beginning of the mbb to the end of the range.
1169 SrcInt.removeValNo(SLR->valno);
1170 RemoveMachineInstrFromMaps(PHICopy);
1171 PHICopy->eraseFromParent();
1172 DEBUG(errs() << " added range [" << StartIndex << ','
1173 << EndIndex << "] to reg" << DstInt.reg << '\n');
1174 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1177 // Remove the phi join and update the phi block liveness.
1178 MachineInstrIndex MIIndex = getInstructionIndex(Join);
1179 MachineInstrIndex UseIndex = getUseIndex(MIIndex);
1180 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1181 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1182 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1183 DLR->valno->setCopy(0);
1184 DLR->valno->setIsDefAccurate(false);
1185 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1186 SrcInt.removeRange(SLR->start, SLR->end);
1187 assert(SrcInt.empty());
1188 removeInterval(PHISrc);
1189 RemoveMachineInstrFromMaps(Join);
1190 Join->eraseFromParent();
1196 /// computeIntervals - computes the live intervals for virtual
1197 /// registers. for some ordering of the machine instructions [1,N] a
1198 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1199 /// which a variable is live
1200 void LiveIntervals::computeIntervals() {
1201 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1202 << "********** Function: "
1203 << ((Value*)mf_->getFunction())->getName() << '\n');
1205 SmallVector<unsigned, 8> UndefUses;
1206 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1207 MBBI != E; ++MBBI) {
1208 MachineBasicBlock *MBB = MBBI;
1209 // Track the index of the current machine instr.
1210 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1211 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1213 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1215 // Create intervals for live-ins to this BB first.
1216 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1217 LE = MBB->livein_end(); LI != LE; ++LI) {
1218 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1219 // Multiple live-ins can alias the same register.
1220 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1221 if (!hasInterval(*AS))
1222 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1226 // Skip over empty initial indices.
1227 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1228 getInstructionFromIndex(MIIndex) == 0)
1229 MIIndex = getNextIndex(MIIndex);
1231 for (; MI != miEnd; ++MI) {
1232 DEBUG(errs() << MIIndex << "\t" << *MI);
1235 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1236 MachineOperand &MO = MI->getOperand(i);
1237 if (!MO.isReg() || !MO.getReg())
1240 // handle register defs - build intervals
1242 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1243 else if (MO.isUndef())
1244 UndefUses.push_back(MO.getReg());
1247 // Skip over the empty slots after each instruction.
1248 unsigned Slots = MI->getDesc().getNumDefs();
1253 MIIndex = getNextIndex(MIIndex);
1255 // Skip over empty indices.
1256 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1257 getInstructionFromIndex(MIIndex) == 0)
1258 MIIndex = getNextIndex(MIIndex);
1262 // Create empty intervals for registers defined by implicit_def's (except
1263 // for those implicit_def that define values which are liveout of their
1265 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1266 unsigned UndefReg = UndefUses[i];
1267 (void)getOrCreateInterval(UndefReg);
1271 bool LiveIntervals::findLiveInMBBs(
1272 MachineInstrIndex Start, MachineInstrIndex End,
1273 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1274 std::vector<IdxMBBPair>::const_iterator I =
1275 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1277 bool ResVal = false;
1278 while (I != Idx2MBBMap.end()) {
1279 if (I->first >= End)
1281 MBBs.push_back(I->second);
1288 bool LiveIntervals::findReachableMBBs(
1289 MachineInstrIndex Start, MachineInstrIndex End,
1290 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1291 std::vector<IdxMBBPair>::const_iterator I =
1292 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1294 bool ResVal = false;
1295 while (I != Idx2MBBMap.end()) {
1298 MachineBasicBlock *MBB = I->second;
1299 if (getMBBEndIdx(MBB) > End)
1301 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1302 SE = MBB->succ_end(); SI != SE; ++SI)
1303 MBBs.push_back(*SI);
1310 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1311 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1312 return new LiveInterval(reg, Weight);
1315 /// dupInterval - Duplicate a live interval. The caller is responsible for
1316 /// managing the allocated memory.
1317 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1318 LiveInterval *NewLI = createInterval(li->reg);
1319 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1323 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1324 /// copy field and returns the source register that defines it.
1325 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1326 if (!VNI->getCopy())
1329 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1330 // If it's extracting out of a physical register, return the sub-register.
1331 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1332 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1333 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1335 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1336 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1337 return VNI->getCopy()->getOperand(2).getReg();
1339 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1340 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1342 llvm_unreachable("Unrecognized copy instruction!");
1346 //===----------------------------------------------------------------------===//
1347 // Register allocator hooks.
1350 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1351 /// allow one) virtual register operand, then its uses are implicitly using
1352 /// the register. Returns the virtual register.
1353 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1354 MachineInstr *MI) const {
1356 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1357 MachineOperand &MO = MI->getOperand(i);
1358 if (!MO.isReg() || !MO.isUse())
1360 unsigned Reg = MO.getReg();
1361 if (Reg == 0 || Reg == li.reg)
1364 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1365 !allocatableRegs_[Reg])
1367 // FIXME: For now, only remat MI with at most one register operand.
1369 "Can't rematerialize instruction with multiple register operand!");
1370 RegOp = MO.getReg();
1378 /// isValNoAvailableAt - Return true if the val# of the specified interval
1379 /// which reaches the given instruction also reaches the specified use index.
1380 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1381 MachineInstrIndex UseIdx) const {
1382 MachineInstrIndex Index = getInstructionIndex(MI);
1383 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1384 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1385 return UI != li.end() && UI->valno == ValNo;
1388 /// isReMaterializable - Returns true if the definition MI of the specified
1389 /// val# of the specified interval is re-materializable.
1390 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1391 const VNInfo *ValNo, MachineInstr *MI,
1392 SmallVectorImpl<LiveInterval*> &SpillIs,
1397 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1401 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1402 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1403 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1404 // this but remember this is not safe to fold into a two-address
1406 // This is a load from fixed stack slot. It can be rematerialized.
1409 // If the target-specific rules don't identify an instruction as
1410 // being trivially rematerializable, use some target-independent
1412 if (!MI->getDesc().isRematerializable() ||
1413 !tii_->isTriviallyReMaterializable(MI)) {
1414 if (!EnableAggressiveRemat)
1417 // If the instruction accesses memory but the memoperands have been lost,
1418 // we can't analyze it.
1419 const TargetInstrDesc &TID = MI->getDesc();
1420 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1423 // Avoid instructions obviously unsafe for remat.
1424 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1427 // If the instruction accesses memory and the memory could be non-constant,
1428 // assume the instruction is not rematerializable.
1429 for (std::list<MachineMemOperand>::const_iterator
1430 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1431 const MachineMemOperand &MMO = *I;
1432 if (MMO.isVolatile() || MMO.isStore())
1434 const Value *V = MMO.getValue();
1437 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1438 if (!PSV->isConstant(mf_->getFrameInfo()))
1440 } else if (!aa_->pointsToConstantMemory(V))
1444 // If any of the registers accessed are non-constant, conservatively assume
1445 // the instruction is not rematerializable.
1446 unsigned ImpUse = 0;
1447 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1448 const MachineOperand &MO = MI->getOperand(i);
1450 unsigned Reg = MO.getReg();
1453 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1456 // Only allow one def, and that in the first operand.
1457 if (MO.isDef() != (i == 0))
1460 // Only allow constant-valued registers.
1461 bool IsLiveIn = mri_->isLiveIn(Reg);
1462 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1463 E = mri_->def_end();
1465 // For the def, it should be the only def of that register.
1466 if (MO.isDef() && (next(I) != E || IsLiveIn))
1470 // Only allow one use other register use, as that's all the
1471 // remat mechanisms support currently.
1472 if (Reg != li.reg) {
1475 else if (Reg != ImpUse)
1478 // For the use, there should be only one associated def.
1479 if (I != E && (next(I) != E || IsLiveIn))
1486 unsigned ImpUse = getReMatImplicitUse(li, MI);
1488 const LiveInterval &ImpLi = getInterval(ImpUse);
1489 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1490 re = mri_->use_end(); ri != re; ++ri) {
1491 MachineInstr *UseMI = &*ri;
1492 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1493 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1495 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1499 // If a register operand of the re-materialized instruction is going to
1500 // be spilled next, then it's not legal to re-materialize this instruction.
1501 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1502 if (ImpUse == SpillIs[i]->reg)
1508 /// isReMaterializable - Returns true if the definition MI of the specified
1509 /// val# of the specified interval is re-materializable.
1510 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1511 const VNInfo *ValNo, MachineInstr *MI) {
1512 SmallVector<LiveInterval*, 4> Dummy1;
1514 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1517 /// isReMaterializable - Returns true if every definition of MI of every
1518 /// val# of the specified interval is re-materializable.
1519 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1520 SmallVectorImpl<LiveInterval*> &SpillIs,
1523 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1525 const VNInfo *VNI = *i;
1526 if (VNI->isUnused())
1527 continue; // Dead val#.
1528 // Is the def for the val# rematerializable?
1529 if (!VNI->isDefAccurate())
1531 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1532 bool DefIsLoad = false;
1534 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1536 isLoad |= DefIsLoad;
1541 /// FilterFoldedOps - Filter out two-address use operands. Return
1542 /// true if it finds any issue with the operands that ought to prevent
1544 static bool FilterFoldedOps(MachineInstr *MI,
1545 SmallVector<unsigned, 2> &Ops,
1547 SmallVector<unsigned, 2> &FoldOps) {
1549 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1550 unsigned OpIdx = Ops[i];
1551 MachineOperand &MO = MI->getOperand(OpIdx);
1552 // FIXME: fold subreg use.
1556 MRInfo |= (unsigned)VirtRegMap::isMod;
1558 // Filter out two-address use operand(s).
1559 if (MI->isRegTiedToDefOperand(OpIdx)) {
1560 MRInfo = VirtRegMap::isModRef;
1563 MRInfo |= (unsigned)VirtRegMap::isRef;
1565 FoldOps.push_back(OpIdx);
1571 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1572 /// slot / to reg or any rematerialized load into ith operand of specified
1573 /// MI. If it is successul, MI is updated with the newly created MI and
1575 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1576 VirtRegMap &vrm, MachineInstr *DefMI,
1577 MachineInstrIndex InstrIdx,
1578 SmallVector<unsigned, 2> &Ops,
1579 bool isSS, int Slot, unsigned Reg) {
1580 // If it is an implicit def instruction, just delete it.
1581 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1582 RemoveMachineInstrFromMaps(MI);
1583 vrm.RemoveMachineInstrFromMaps(MI);
1584 MI->eraseFromParent();
1589 // Filter the list of operand indexes that are to be folded. Abort if
1590 // any operand will prevent folding.
1591 unsigned MRInfo = 0;
1592 SmallVector<unsigned, 2> FoldOps;
1593 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1596 // The only time it's safe to fold into a two address instruction is when
1597 // it's folding reload and spill from / into a spill stack slot.
1598 if (DefMI && (MRInfo & VirtRegMap::isMod))
1601 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1602 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1604 // Remember this instruction uses the spill slot.
1605 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1607 // Attempt to fold the memory reference into the instruction. If
1608 // we can do this, we don't need to insert spill code.
1609 MachineBasicBlock &MBB = *MI->getParent();
1610 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1611 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1612 vrm.transferSpillPts(MI, fmi);
1613 vrm.transferRestorePts(MI, fmi);
1614 vrm.transferEmergencySpills(MI, fmi);
1616 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1617 mi2iMap_[fmi] = InstrIdx;
1618 MI = MBB.insert(MBB.erase(MI), fmi);
1625 /// canFoldMemoryOperand - Returns true if the specified load / store
1626 /// folding is possible.
1627 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1628 SmallVector<unsigned, 2> &Ops,
1630 // Filter the list of operand indexes that are to be folded. Abort if
1631 // any operand will prevent folding.
1632 unsigned MRInfo = 0;
1633 SmallVector<unsigned, 2> FoldOps;
1634 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1637 // It's only legal to remat for a use, not a def.
1638 if (ReMat && (MRInfo & VirtRegMap::isMod))
1641 return tii_->canFoldMemoryOperand(MI, FoldOps);
1644 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1645 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1646 for (LiveInterval::Ranges::const_iterator
1647 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1648 std::vector<IdxMBBPair>::const_iterator II =
1649 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1650 if (II == Idx2MBBMap.end())
1652 if (I->end > II->first) // crossing a MBB.
1654 MBBs.insert(II->second);
1655 if (MBBs.size() > 1)
1661 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1662 /// interval on to-be re-materialized operands of MI) with new register.
1663 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1664 MachineInstr *MI, unsigned NewVReg,
1666 // There is an implicit use. That means one of the other operand is
1667 // being remat'ed and the remat'ed instruction has li.reg as an
1668 // use operand. Make sure we rewrite that as well.
1669 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1670 MachineOperand &MO = MI->getOperand(i);
1673 unsigned Reg = MO.getReg();
1674 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1676 if (!vrm.isReMaterialized(Reg))
1678 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1679 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1681 UseMO->setReg(NewVReg);
1685 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1686 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1687 bool LiveIntervals::
1688 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1689 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1691 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1692 unsigned Slot, int LdSlot,
1693 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1695 const TargetRegisterClass* rc,
1696 SmallVector<int, 4> &ReMatIds,
1697 const MachineLoopInfo *loopInfo,
1698 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1699 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1700 std::vector<LiveInterval*> &NewLIs) {
1701 bool CanFold = false;
1703 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1704 MachineOperand& mop = MI->getOperand(i);
1707 unsigned Reg = mop.getReg();
1708 unsigned RegI = Reg;
1709 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1714 bool TryFold = !DefIsReMat;
1715 bool FoldSS = true; // Default behavior unless it's a remat.
1716 int FoldSlot = Slot;
1718 // If this is the rematerializable definition MI itself and
1719 // all of its uses are rematerialized, simply delete it.
1720 if (MI == ReMatOrigDefMI && CanDelete) {
1721 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1723 RemoveMachineInstrFromMaps(MI);
1724 vrm.RemoveMachineInstrFromMaps(MI);
1725 MI->eraseFromParent();
1729 // If def for this use can't be rematerialized, then try folding.
1730 // If def is rematerializable and it's a load, also try folding.
1731 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1733 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1739 // Scan all of the operands of this instruction rewriting operands
1740 // to use NewVReg instead of li.reg as appropriate. We do this for
1743 // 1. If the instr reads the same spilled vreg multiple times, we
1744 // want to reuse the NewVReg.
1745 // 2. If the instr is a two-addr instruction, we are required to
1746 // keep the src/dst regs pinned.
1748 // Keep track of whether we replace a use and/or def so that we can
1749 // create the spill interval with the appropriate range.
1751 HasUse = mop.isUse();
1752 HasDef = mop.isDef();
1753 SmallVector<unsigned, 2> Ops;
1755 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1756 const MachineOperand &MOj = MI->getOperand(j);
1759 unsigned RegJ = MOj.getReg();
1760 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1764 if (!MOj.isUndef()) {
1765 HasUse |= MOj.isUse();
1766 HasDef |= MOj.isDef();
1771 // Create a new virtual register for the spill interval.
1772 // Create the new register now so we can map the fold instruction
1773 // to the new register so when it is unfolded we get the correct
1775 bool CreatedNewVReg = false;
1777 NewVReg = mri_->createVirtualRegister(rc);
1779 CreatedNewVReg = true;
1785 // Do not fold load / store here if we are splitting. We'll find an
1786 // optimal point to insert a load / store later.
1788 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1789 Ops, FoldSS, FoldSlot, NewVReg)) {
1790 // Folding the load/store can completely change the instruction in
1791 // unpredictable ways, rescan it from the beginning.
1794 // We need to give the new vreg the same stack slot as the
1795 // spilled interval.
1796 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1802 if (isNotInMIMap(MI))
1804 goto RestartInstruction;
1807 // We'll try to fold it later if it's profitable.
1808 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1812 mop.setReg(NewVReg);
1813 if (mop.isImplicit())
1814 rewriteImplicitOps(li, MI, NewVReg, vrm);
1816 // Reuse NewVReg for other reads.
1817 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1818 MachineOperand &mopj = MI->getOperand(Ops[j]);
1819 mopj.setReg(NewVReg);
1820 if (mopj.isImplicit())
1821 rewriteImplicitOps(li, MI, NewVReg, vrm);
1824 if (CreatedNewVReg) {
1826 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1827 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1828 // Each valnum may have its own remat id.
1829 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1831 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1833 if (!CanDelete || (HasUse && HasDef)) {
1834 // If this is a two-addr instruction then its use operands are
1835 // rematerializable but its def is not. It should be assigned a
1837 vrm.assignVirt2StackSlot(NewVReg, Slot);
1840 vrm.assignVirt2StackSlot(NewVReg, Slot);
1842 } else if (HasUse && HasDef &&
1843 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1844 // If this interval hasn't been assigned a stack slot (because earlier
1845 // def is a deleted remat def), do it now.
1846 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1847 vrm.assignVirt2StackSlot(NewVReg, Slot);
1850 // Re-matting an instruction with virtual register use. Add the
1851 // register as an implicit use on the use MI.
1852 if (DefIsReMat && ImpUse)
1853 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1855 // Create a new register interval for this spill / remat.
1856 LiveInterval &nI = getOrCreateInterval(NewVReg);
1857 if (CreatedNewVReg) {
1858 NewLIs.push_back(&nI);
1859 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1861 vrm.setIsSplitFromReg(NewVReg, li.reg);
1865 if (CreatedNewVReg) {
1866 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1867 nI.getNextValue(MachineInstrIndex(), 0, false,
1869 DEBUG(errs() << " +" << LR);
1872 // Extend the split live interval to this def / use.
1873 MachineInstrIndex End = getNextSlot(getUseIndex(index));
1874 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1875 nI.getValNumInfo(nI.getNumValNums()-1));
1876 DEBUG(errs() << " +" << LR);
1881 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1882 nI.getNextValue(MachineInstrIndex(), 0, false,
1884 DEBUG(errs() << " +" << LR);
1889 errs() << "\t\t\t\tAdded new interval: ";
1890 nI.print(errs(), tri_);
1896 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1898 MachineBasicBlock *MBB,
1899 MachineInstrIndex Idx) const {
1900 MachineInstrIndex End = getMBBEndIdx(MBB);
1901 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1902 if (VNI->kills[j].isPHIIndex())
1905 MachineInstrIndex KillIdx = VNI->kills[j];
1906 if (KillIdx > Idx && KillIdx < End)
1912 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1913 /// during spilling.
1915 struct RewriteInfo {
1916 MachineInstrIndex Index;
1920 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1921 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1924 struct RewriteInfoCompare {
1925 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1926 return LHS.Index < RHS.Index;
1931 void LiveIntervals::
1932 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1933 LiveInterval::Ranges::const_iterator &I,
1934 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1935 unsigned Slot, int LdSlot,
1936 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1938 const TargetRegisterClass* rc,
1939 SmallVector<int, 4> &ReMatIds,
1940 const MachineLoopInfo *loopInfo,
1941 BitVector &SpillMBBs,
1942 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1943 BitVector &RestoreMBBs,
1944 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1945 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1946 std::vector<LiveInterval*> &NewLIs) {
1947 bool AllCanFold = true;
1948 unsigned NewVReg = 0;
1949 MachineInstrIndex start = getBaseIndex(I->start);
1950 MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1952 // First collect all the def / use in this live range that will be rewritten.
1953 // Make sure they are sorted according to instruction index.
1954 std::vector<RewriteInfo> RewriteMIs;
1955 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1956 re = mri_->reg_end(); ri != re; ) {
1957 MachineInstr *MI = &*ri;
1958 MachineOperand &O = ri.getOperand();
1960 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1961 MachineInstrIndex index = getInstructionIndex(MI);
1962 if (index < start || index >= end)
1966 // Must be defined by an implicit def. It should not be spilled. Note,
1967 // this is for correctness reason. e.g.
1968 // 8 %reg1024<def> = IMPLICIT_DEF
1969 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1970 // The live range [12, 14) are not part of the r1024 live interval since
1971 // it's defined by an implicit def. It will not conflicts with live
1972 // interval of r1025. Now suppose both registers are spilled, you can
1973 // easily see a situation where both registers are reloaded before
1974 // the INSERT_SUBREG and both target registers that would overlap.
1976 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1978 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1980 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1981 // Now rewrite the defs and uses.
1982 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1983 RewriteInfo &rwi = RewriteMIs[i];
1985 MachineInstrIndex index = rwi.Index;
1986 bool MIHasUse = rwi.HasUse;
1987 bool MIHasDef = rwi.HasDef;
1988 MachineInstr *MI = rwi.MI;
1989 // If MI def and/or use the same register multiple times, then there
1990 // are multiple entries.
1991 unsigned NumUses = MIHasUse;
1992 while (i != e && RewriteMIs[i].MI == MI) {
1993 assert(RewriteMIs[i].Index == index);
1994 bool isUse = RewriteMIs[i].HasUse;
1995 if (isUse) ++NumUses;
1997 MIHasDef |= RewriteMIs[i].HasDef;
2000 MachineBasicBlock *MBB = MI->getParent();
2002 if (ImpUse && MI != ReMatDefMI) {
2003 // Re-matting an instruction with virtual register use. Update the
2004 // register interval's spill weight to HUGE_VALF to prevent it from
2006 LiveInterval &ImpLi = getInterval(ImpUse);
2007 ImpLi.weight = HUGE_VALF;
2010 unsigned MBBId = MBB->getNumber();
2011 unsigned ThisVReg = 0;
2013 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2014 if (NVI != MBBVRegsMap.end()) {
2015 ThisVReg = NVI->second;
2022 // It's better to start a new interval to avoid artifically
2023 // extend the new interval.
2024 if (MIHasDef && !MIHasUse) {
2025 MBBVRegsMap.erase(MBB->getNumber());
2031 bool IsNew = ThisVReg == 0;
2033 // This ends the previous live interval. If all of its def / use
2034 // can be folded, give it a low spill weight.
2035 if (NewVReg && TrySplit && AllCanFold) {
2036 LiveInterval &nI = getOrCreateInterval(NewVReg);
2043 bool HasDef = false;
2044 bool HasUse = false;
2045 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2046 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2047 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2048 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2049 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2050 if (!HasDef && !HasUse)
2053 AllCanFold &= CanFold;
2055 // Update weight of spill interval.
2056 LiveInterval &nI = getOrCreateInterval(NewVReg);
2058 // The spill weight is now infinity as it cannot be spilled again.
2059 nI.weight = HUGE_VALF;
2063 // Keep track of the last def and first use in each MBB.
2065 if (MI != ReMatOrigDefMI || !CanDelete) {
2066 bool HasKill = false;
2068 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2070 // If this is a two-address code, then this index starts a new VNInfo.
2071 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2073 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2075 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2076 SpillIdxes.find(MBBId);
2078 if (SII == SpillIdxes.end()) {
2079 std::vector<SRInfo> S;
2080 S.push_back(SRInfo(index, NewVReg, true));
2081 SpillIdxes.insert(std::make_pair(MBBId, S));
2082 } else if (SII->second.back().vreg != NewVReg) {
2083 SII->second.push_back(SRInfo(index, NewVReg, true));
2084 } else if (index > SII->second.back().index) {
2085 // If there is an earlier def and this is a two-address
2086 // instruction, then it's not possible to fold the store (which
2087 // would also fold the load).
2088 SRInfo &Info = SII->second.back();
2090 Info.canFold = !HasUse;
2092 SpillMBBs.set(MBBId);
2093 } else if (SII != SpillIdxes.end() &&
2094 SII->second.back().vreg == NewVReg &&
2095 index > SII->second.back().index) {
2096 // There is an earlier def that's not killed (must be two-address).
2097 // The spill is no longer needed.
2098 SII->second.pop_back();
2099 if (SII->second.empty()) {
2100 SpillIdxes.erase(MBBId);
2101 SpillMBBs.reset(MBBId);
2108 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2109 SpillIdxes.find(MBBId);
2110 if (SII != SpillIdxes.end() &&
2111 SII->second.back().vreg == NewVReg &&
2112 index > SII->second.back().index)
2113 // Use(s) following the last def, it's not safe to fold the spill.
2114 SII->second.back().canFold = false;
2115 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2116 RestoreIdxes.find(MBBId);
2117 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2118 // If we are splitting live intervals, only fold if it's the first
2119 // use and there isn't another use later in the MBB.
2120 RII->second.back().canFold = false;
2122 // Only need a reload if there isn't an earlier def / use.
2123 if (RII == RestoreIdxes.end()) {
2124 std::vector<SRInfo> Infos;
2125 Infos.push_back(SRInfo(index, NewVReg, true));
2126 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2128 RII->second.push_back(SRInfo(index, NewVReg, true));
2130 RestoreMBBs.set(MBBId);
2134 // Update spill weight.
2135 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2136 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2139 if (NewVReg && TrySplit && AllCanFold) {
2140 // If all of its def / use can be folded, give it a low spill weight.
2141 LiveInterval &nI = getOrCreateInterval(NewVReg);
2146 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
2147 unsigned vr, BitVector &RestoreMBBs,
2148 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2149 if (!RestoreMBBs[Id])
2151 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2152 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2153 if (Restores[i].index == index &&
2154 Restores[i].vreg == vr &&
2155 Restores[i].canFold)
2160 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2161 unsigned vr, BitVector &RestoreMBBs,
2162 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2163 if (!RestoreMBBs[Id])
2165 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2166 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2167 if (Restores[i].index == index && Restores[i].vreg)
2168 Restores[i].index = MachineInstrIndex();
2171 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2172 /// spilled and create empty intervals for their uses.
2174 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2175 const TargetRegisterClass* rc,
2176 std::vector<LiveInterval*> &NewLIs) {
2177 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2178 re = mri_->reg_end(); ri != re; ) {
2179 MachineOperand &O = ri.getOperand();
2180 MachineInstr *MI = &*ri;
2183 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2184 "Register def was not rewritten?");
2185 RemoveMachineInstrFromMaps(MI);
2186 vrm.RemoveMachineInstrFromMaps(MI);
2187 MI->eraseFromParent();
2189 // This must be an use of an implicit_def so it's not part of the live
2190 // interval. Create a new empty live interval for it.
2191 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2192 unsigned NewVReg = mri_->createVirtualRegister(rc);
2194 vrm.setIsImplicitlyDefined(NewVReg);
2195 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2196 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2197 MachineOperand &MO = MI->getOperand(i);
2198 if (MO.isReg() && MO.getReg() == li.reg) {
2207 std::vector<LiveInterval*> LiveIntervals::
2208 addIntervalsForSpillsFast(const LiveInterval &li,
2209 const MachineLoopInfo *loopInfo,
2211 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2213 std::vector<LiveInterval*> added;
2215 assert(li.weight != HUGE_VALF &&
2216 "attempt to spill already spilled interval!");
2219 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2224 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2226 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2227 while (RI != mri_->reg_end()) {
2228 MachineInstr* MI = &*RI;
2230 SmallVector<unsigned, 2> Indices;
2231 bool HasUse = false;
2232 bool HasDef = false;
2234 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2235 MachineOperand& mop = MI->getOperand(i);
2236 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2238 HasUse |= MI->getOperand(i).isUse();
2239 HasDef |= MI->getOperand(i).isDef();
2241 Indices.push_back(i);
2244 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2245 Indices, true, slot, li.reg)) {
2246 unsigned NewVReg = mri_->createVirtualRegister(rc);
2248 vrm.assignVirt2StackSlot(NewVReg, slot);
2250 // create a new register for this spill
2251 LiveInterval &nI = getOrCreateInterval(NewVReg);
2253 // the spill weight is now infinity as it
2254 // cannot be spilled again
2255 nI.weight = HUGE_VALF;
2257 // Rewrite register operands to use the new vreg.
2258 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2259 E = Indices.end(); I != E; ++I) {
2260 MI->getOperand(*I).setReg(NewVReg);
2262 if (MI->getOperand(*I).isUse())
2263 MI->getOperand(*I).setIsKill(true);
2266 // Fill in the new live interval.
2267 MachineInstrIndex index = getInstructionIndex(MI);
2269 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2270 nI.getNextValue(MachineInstrIndex(), 0, false,
2271 getVNInfoAllocator()));
2272 DEBUG(errs() << " +" << LR);
2274 vrm.addRestorePoint(NewVReg, MI);
2277 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2278 nI.getNextValue(MachineInstrIndex(), 0, false,
2279 getVNInfoAllocator()));
2280 DEBUG(errs() << " +" << LR);
2282 vrm.addSpillPoint(NewVReg, true, MI);
2285 added.push_back(&nI);
2288 errs() << "\t\t\t\tadded new interval: ";
2295 RI = mri_->reg_begin(li.reg);
2301 std::vector<LiveInterval*> LiveIntervals::
2302 addIntervalsForSpills(const LiveInterval &li,
2303 SmallVectorImpl<LiveInterval*> &SpillIs,
2304 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2306 if (EnableFastSpilling)
2307 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2309 assert(li.weight != HUGE_VALF &&
2310 "attempt to spill already spilled interval!");
2313 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2314 li.print(errs(), tri_);
2318 // Each bit specify whether a spill is required in the MBB.
2319 BitVector SpillMBBs(mf_->getNumBlockIDs());
2320 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2321 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2322 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2323 DenseMap<unsigned,unsigned> MBBVRegsMap;
2324 std::vector<LiveInterval*> NewLIs;
2325 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2327 unsigned NumValNums = li.getNumValNums();
2328 SmallVector<MachineInstr*, 4> ReMatDefs;
2329 ReMatDefs.resize(NumValNums, NULL);
2330 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2331 ReMatOrigDefs.resize(NumValNums, NULL);
2332 SmallVector<int, 4> ReMatIds;
2333 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2334 BitVector ReMatDelete(NumValNums);
2335 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2337 // Spilling a split live interval. It cannot be split any further. Also,
2338 // it's also guaranteed to be a single val# / range interval.
2339 if (vrm.getPreSplitReg(li.reg)) {
2340 vrm.setIsSplitFromReg(li.reg, 0);
2341 // Unset the split kill marker on the last use.
2342 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2343 if (KillIdx != MachineInstrIndex()) {
2344 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2345 assert(KillMI && "Last use disappeared?");
2346 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2347 assert(KillOp != -1 && "Last use disappeared?");
2348 KillMI->getOperand(KillOp).setIsKill(false);
2350 vrm.removeKillPoint(li.reg);
2351 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2352 Slot = vrm.getStackSlot(li.reg);
2353 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2354 MachineInstr *ReMatDefMI = DefIsReMat ?
2355 vrm.getReMaterializedMI(li.reg) : NULL;
2357 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2358 bool isLoad = isLoadSS ||
2359 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2360 bool IsFirstRange = true;
2361 for (LiveInterval::Ranges::const_iterator
2362 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2363 // If this is a split live interval with multiple ranges, it means there
2364 // are two-address instructions that re-defined the value. Only the
2365 // first def can be rematerialized!
2367 // Note ReMatOrigDefMI has already been deleted.
2368 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2369 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2370 false, vrm, rc, ReMatIds, loopInfo,
2371 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2372 MBBVRegsMap, NewLIs);
2374 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2375 Slot, 0, false, false, false,
2376 false, vrm, rc, ReMatIds, loopInfo,
2377 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2378 MBBVRegsMap, NewLIs);
2380 IsFirstRange = false;
2383 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2387 bool TrySplit = !intervalIsInOneMBB(li);
2390 bool NeedStackSlot = false;
2391 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2393 const VNInfo *VNI = *i;
2394 unsigned VN = VNI->id;
2395 if (VNI->isUnused())
2396 continue; // Dead val#.
2397 // Is the def for the val# rematerializable?
2398 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2399 ? getInstructionFromIndex(VNI->def) : 0;
2401 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2402 // Remember how to remat the def of this val#.
2403 ReMatOrigDefs[VN] = ReMatDefMI;
2404 // Original def may be modified so we have to make a copy here.
2405 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2406 CloneMIs.push_back(Clone);
2407 ReMatDefs[VN] = Clone;
2409 bool CanDelete = true;
2410 if (VNI->hasPHIKill()) {
2411 // A kill is a phi node, not all of its uses can be rematerialized.
2412 // It must not be deleted.
2414 // Need a stack slot if there is any live range where uses cannot be
2416 NeedStackSlot = true;
2419 ReMatDelete.set(VN);
2421 // Need a stack slot if there is any live range where uses cannot be
2423 NeedStackSlot = true;
2427 // One stack slot per live interval.
2428 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2429 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2430 Slot = vrm.assignVirt2StackSlot(li.reg);
2432 // This case only occurs when the prealloc splitter has already assigned
2433 // a stack slot to this vreg.
2435 Slot = vrm.getStackSlot(li.reg);
2438 // Create new intervals and rewrite defs and uses.
2439 for (LiveInterval::Ranges::const_iterator
2440 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2441 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2442 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2443 bool DefIsReMat = ReMatDefMI != NULL;
2444 bool CanDelete = ReMatDelete[I->valno->id];
2446 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2447 bool isLoad = isLoadSS ||
2448 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2449 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2450 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2451 CanDelete, vrm, rc, ReMatIds, loopInfo,
2452 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2453 MBBVRegsMap, NewLIs);
2456 // Insert spills / restores if we are splitting.
2458 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2462 SmallPtrSet<LiveInterval*, 4> AddedKill;
2463 SmallVector<unsigned, 2> Ops;
2464 if (NeedStackSlot) {
2465 int Id = SpillMBBs.find_first();
2467 std::vector<SRInfo> &spills = SpillIdxes[Id];
2468 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2469 MachineInstrIndex index = spills[i].index;
2470 unsigned VReg = spills[i].vreg;
2471 LiveInterval &nI = getOrCreateInterval(VReg);
2472 bool isReMat = vrm.isReMaterialized(VReg);
2473 MachineInstr *MI = getInstructionFromIndex(index);
2474 bool CanFold = false;
2475 bool FoundUse = false;
2477 if (spills[i].canFold) {
2479 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2480 MachineOperand &MO = MI->getOperand(j);
2481 if (!MO.isReg() || MO.getReg() != VReg)
2488 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2489 RestoreMBBs, RestoreIdxes))) {
2490 // MI has two-address uses of the same register. If the use
2491 // isn't the first and only use in the BB, then we can't fold
2492 // it. FIXME: Move this to rewriteInstructionsForSpills.
2499 // Fold the store into the def if possible.
2500 bool Folded = false;
2501 if (CanFold && !Ops.empty()) {
2502 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2505 // Also folded uses, do not issue a load.
2506 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2507 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2509 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2513 // Otherwise tell the spiller to issue a spill.
2515 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2516 bool isKill = LR->end == getStoreIndex(index);
2517 if (!MI->registerDefIsDead(nI.reg))
2518 // No need to spill a dead def.
2519 vrm.addSpillPoint(VReg, isKill, MI);
2521 AddedKill.insert(&nI);
2524 Id = SpillMBBs.find_next(Id);
2528 int Id = RestoreMBBs.find_first();
2530 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2531 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2532 MachineInstrIndex index = restores[i].index;
2533 if (index == MachineInstrIndex())
2535 unsigned VReg = restores[i].vreg;
2536 LiveInterval &nI = getOrCreateInterval(VReg);
2537 bool isReMat = vrm.isReMaterialized(VReg);
2538 MachineInstr *MI = getInstructionFromIndex(index);
2539 bool CanFold = false;
2541 if (restores[i].canFold) {
2543 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2544 MachineOperand &MO = MI->getOperand(j);
2545 if (!MO.isReg() || MO.getReg() != VReg)
2549 // If this restore were to be folded, it would have been folded
2558 // Fold the load into the use if possible.
2559 bool Folded = false;
2560 if (CanFold && !Ops.empty()) {
2562 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2564 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2566 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2567 // If the rematerializable def is a load, also try to fold it.
2568 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2569 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2570 Ops, isLoadSS, LdSlot, VReg);
2572 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2574 // Re-matting an instruction with virtual register use. Add the
2575 // register as an implicit use on the use MI and update the register
2576 // interval's spill weight to HUGE_VALF to prevent it from being
2578 LiveInterval &ImpLi = getInterval(ImpUse);
2579 ImpLi.weight = HUGE_VALF;
2580 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2585 // If folding is not possible / failed, then tell the spiller to issue a
2586 // load / rematerialization for us.
2588 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2590 vrm.addRestorePoint(VReg, MI);
2592 Id = RestoreMBBs.find_next(Id);
2595 // Finalize intervals: add kills, finalize spill weights, and filter out
2597 std::vector<LiveInterval*> RetNewLIs;
2598 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2599 LiveInterval *LI = NewLIs[i];
2601 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2602 if (!AddedKill.count(LI)) {
2603 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2604 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2605 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2606 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2607 assert(UseIdx != -1);
2608 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2609 LastUse->getOperand(UseIdx).setIsKill();
2610 vrm.addKillPoint(LI->reg, LastUseIdx);
2613 RetNewLIs.push_back(LI);
2617 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2621 /// hasAllocatableSuperReg - Return true if the specified physical register has
2622 /// any super register that's allocatable.
2623 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2624 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2625 if (allocatableRegs_[*AS] && hasInterval(*AS))
2630 /// getRepresentativeReg - Find the largest super register of the specified
2631 /// physical register.
2632 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2633 // Find the largest super-register that is allocatable.
2634 unsigned BestReg = Reg;
2635 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2636 unsigned SuperReg = *AS;
2637 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2645 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2646 /// specified interval that conflicts with the specified physical register.
2647 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2648 unsigned PhysReg) const {
2649 unsigned NumConflicts = 0;
2650 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2651 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2652 E = mri_->reg_end(); I != E; ++I) {
2653 MachineOperand &O = I.getOperand();
2654 MachineInstr *MI = O.getParent();
2655 MachineInstrIndex Index = getInstructionIndex(MI);
2656 if (pli.liveAt(Index))
2659 return NumConflicts;
2662 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2663 /// around all defs and uses of the specified interval. Return true if it
2664 /// was able to cut its interval.
2665 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2666 unsigned PhysReg, VirtRegMap &vrm) {
2667 unsigned SpillReg = getRepresentativeReg(PhysReg);
2669 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2670 // If there are registers which alias PhysReg, but which are not a
2671 // sub-register of the chosen representative super register. Assert
2672 // since we can't handle it yet.
2673 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2674 tri_->isSuperRegister(*AS, SpillReg));
2677 LiveInterval &pli = getInterval(SpillReg);
2678 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2679 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2680 E = mri_->reg_end(); I != E; ++I) {
2681 MachineOperand &O = I.getOperand();
2682 MachineInstr *MI = O.getParent();
2683 if (SeenMIs.count(MI))
2686 MachineInstrIndex Index = getInstructionIndex(MI);
2687 if (pli.liveAt(Index)) {
2688 vrm.addEmergencySpill(SpillReg, MI);
2689 MachineInstrIndex StartIdx = getLoadIndex(Index);
2690 MachineInstrIndex EndIdx = getNextSlot(getStoreIndex(Index));
2691 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2692 pli.removeRange(StartIdx, EndIdx);
2696 raw_string_ostream Msg(msg);
2697 Msg << "Ran out of registers during register allocation!";
2698 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2699 Msg << "\nPlease check your inline asm statement for invalid "
2700 << "constraints:\n";
2701 MI->print(Msg, tm_);
2703 llvm_report_error(Msg.str());
2705 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2706 if (!hasInterval(*AS))
2708 LiveInterval &spli = getInterval(*AS);
2709 if (spli.liveAt(Index))
2710 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2717 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2718 MachineInstr* startInst) {
2719 LiveInterval& Interval = getOrCreateInterval(reg);
2720 VNInfo* VN = Interval.getNextValue(
2721 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2722 startInst, true, getVNInfoAllocator());
2723 VN->setHasPHIKill(true);
2724 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2726 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2727 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2728 Interval.addRange(LR);