1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/PseudoSourceValue.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
55 static cl::opt<bool> EnableFastSpilling("fast-spill",
56 cl::init(false), cl::Hidden);
58 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
60 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
61 cl::init(-1), cl::Hidden);
63 STATISTIC(numIntervals , "Number of original intervals");
64 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
65 STATISTIC(numSplits , "Number of intervals split");
66 STATISTIC(numCoalescing, "Number of early coalescing performed");
68 char LiveIntervals::ID = 0;
69 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<LiveVariables>();
77 AU.addPreservedID(MachineLoopInfoID);
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 MachineFunctionPass::getAnalysisUsage(AU);
89 void LiveIntervals::releaseMemory() {
90 // Free the live intervals themselves.
91 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
92 E = r2iMap_.end(); I != E; ++I)
100 terminatorGaps.clear();
101 phiJoinCopies.clear();
103 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
104 VNInfoAllocator.Reset();
105 while (!CloneMIs.empty()) {
106 MachineInstr *MI = CloneMIs.back();
108 mf_->DeleteMachineInstr(MI);
112 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
113 unsigned OpIdx, const TargetInstrInfo *tii_){
114 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
115 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
119 if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
121 if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
140 MachineInstr *MI = &*I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
146 for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
147 ImpDefRegs.insert(*SS);
149 ImpDefMIs.push_back(MI);
153 if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
154 MachineOperand &MO = MI->getOperand(2);
155 if (ImpDefRegs.count(MO.getReg())) {
156 // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
157 // This is an identity copy, eliminate it now.
159 LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
162 MI->eraseFromParent();
167 bool ChangedToImpDef = false;
168 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
169 MachineOperand& MO = MI->getOperand(i);
170 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
172 unsigned Reg = MO.getReg();
175 if (!ImpDefRegs.count(Reg))
177 // Use is a copy, just turn it into an implicit_def.
178 if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
179 bool isKill = MO.isKill();
180 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
181 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
182 MI->RemoveOperand(j);
184 ImpDefRegs.erase(Reg);
185 LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
188 ChangedToImpDef = true;
193 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
194 // Make sure other uses of
195 for (unsigned j = i+1; j != e; ++j) {
196 MachineOperand &MOJ = MI->getOperand(j);
197 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
200 ImpDefRegs.erase(Reg);
204 if (ChangedToImpDef) {
205 // Backtrack to process this new implicit_def.
208 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
209 MachineOperand& MO = MI->getOperand(i);
210 if (!MO.isReg() || !MO.isDef())
212 ImpDefRegs.erase(MO.getReg());
217 // Any outstanding liveout implicit_def's?
218 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
219 MachineInstr *MI = ImpDefMIs[i];
220 unsigned Reg = MI->getOperand(0).getReg();
221 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
222 !ImpDefRegs.count(Reg)) {
223 // Delete all "local" implicit_def's. That include those which define
224 // physical registers since they cannot be liveout.
225 MI->eraseFromParent();
229 // If there are multiple defs of the same register and at least one
230 // is not an implicit_def, do not insert implicit_def's before the
233 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
234 DE = mri_->def_end(); DI != DE; ++DI) {
235 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
243 // The only implicit_def which we want to keep are those that are live
245 MI->eraseFromParent();
247 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
248 UE = mri_->use_end(); UI != UE; ) {
249 MachineOperand &RMO = UI.getOperand();
250 MachineInstr *RMI = &*UI;
252 MachineBasicBlock *RMBB = RMI->getParent();
256 // Turn a copy use into an implicit_def.
257 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
258 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
260 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
261 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
262 RMI->RemoveOperand(j);
266 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
267 unsigned NewVReg = mri_->createVirtualRegister(RC);
279 void LiveIntervals::computeNumbering() {
280 Index2MiMap OldI2MI = i2miMap_;
281 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
287 terminatorGaps.clear();
288 phiJoinCopies.clear();
292 // Number MachineInstrs and MachineBasicBlocks.
293 // Initialize MBB indexes to a sentinal.
294 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
295 std::make_pair(LiveIndex(),LiveIndex()));
298 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
300 LiveIndex StartIdx = MIIndex;
302 // Insert an empty slot at the beginning of each block.
303 MIIndex = getNextIndex(MIIndex);
304 i2miMap_.push_back(0);
306 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
309 if (I == MBB->getFirstTerminator()) {
310 // Leave a gap for before terminators, this is where we will point
312 LiveIndex tGap(true, MIIndex);
314 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
316 "Multiple 'first' terminators encountered during numbering.");
317 inserted = inserted; // Avoid compiler warning if assertions turned off.
318 i2miMap_.push_back(0);
320 MIIndex = getNextIndex(MIIndex);
323 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
324 assert(inserted && "multiple MachineInstr -> index mappings");
326 i2miMap_.push_back(I);
327 MIIndex = getNextIndex(MIIndex);
330 // Insert max(1, numdefs) empty slots after every instruction.
331 unsigned Slots = I->getDesc().getNumDefs();
335 MIIndex = getNextIndex(MIIndex);
336 i2miMap_.push_back(0);
341 if (MBB->getFirstTerminator() == MBB->end()) {
342 // Leave a gap for before terminators, this is where we will point
344 LiveIndex tGap(true, MIIndex);
346 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
348 "Multiple 'first' terminators encountered during numbering.");
349 inserted = inserted; // Avoid compiler warning if assertions turned off.
350 i2miMap_.push_back(0);
352 MIIndex = getNextIndex(MIIndex);
355 // Set the MBB2IdxMap entry for this MBB.
356 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
357 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
360 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
362 if (!OldI2MI.empty())
363 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
364 for (LiveInterval::iterator LI = OI->second->begin(),
365 LE = OI->second->end(); LI != LE; ++LI) {
367 // Remap the start index of the live range to the corresponding new
368 // number, or our best guess at what it _should_ correspond to if the
369 // original instruction has been erased. This is either the following
370 // instruction or its predecessor.
371 unsigned index = LI->start.getVecIndex();
372 LiveIndex::Slot offset = LI->start.getSlot();
373 if (LI->start.isLoad()) {
374 std::vector<IdxMBBPair>::const_iterator I =
375 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
376 // Take the pair containing the index
377 std::vector<IdxMBBPair>::const_iterator J =
378 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
380 LI->start = getMBBStartIdx(J->second);
382 LI->start = LiveIndex(
383 LiveIndex(mi2iMap_[OldI2MI[index]]),
384 (LiveIndex::Slot)offset);
387 // Remap the ending index in the same way that we remapped the start,
388 // except for the final step where we always map to the immediately
389 // following instruction.
390 index = (getPrevSlot(LI->end)).getVecIndex();
391 offset = LI->end.getSlot();
392 if (LI->end.isLoad()) {
393 // VReg dies at end of block.
394 std::vector<IdxMBBPair>::const_iterator I =
395 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
398 LI->end = getNextSlot(getMBBEndIdx(I->second));
400 unsigned idx = index;
401 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
403 if (index != OldI2MI.size())
405 LiveIndex(mi2iMap_[OldI2MI[index]],
406 (idx == index ? offset : LiveIndex::LOAD));
409 LiveIndex(LiveIndex::NUM * i2miMap_.size());
413 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
414 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
417 // Remap the VNInfo def index, which works the same as the
418 // start indices above. VN's with special sentinel defs
419 // don't need to be remapped.
420 if (vni->isDefAccurate() && !vni->isUnused()) {
421 unsigned index = vni->def.getVecIndex();
422 LiveIndex::Slot offset = vni->def.getSlot();
423 if (vni->def.isLoad()) {
424 std::vector<IdxMBBPair>::const_iterator I =
425 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
426 // Take the pair containing the index
427 std::vector<IdxMBBPair>::const_iterator J =
428 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
430 vni->def = getMBBStartIdx(J->second);
432 vni->def = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
436 // Remap the VNInfo kill indices, which works the same as
437 // the end indices above.
438 for (size_t i = 0; i < vni->kills.size(); ++i) {
439 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
440 LiveIndex::Slot offset = vni->kills[i].getSlot();
442 if (vni->kills[i].isLoad()) {
443 assert("Value killed at a load slot.");
444 /*std::vector<IdxMBBPair>::const_iterator I =
445 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
448 vni->kills[i] = getMBBEndIdx(I->second);*/
450 if (vni->kills[i].isPHIIndex()) {
451 std::vector<IdxMBBPair>::const_iterator I =
452 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
454 vni->kills[i] = terminatorGaps[I->second];
456 assert(OldI2MI[index] != 0 &&
457 "Kill refers to instruction not present in index maps.");
458 vni->kills[i] = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
462 unsigned idx = index;
463 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
465 if (index != OldI2MI.size())
466 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
467 (idx == index ? offset : 0);
469 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
477 void LiveIntervals::scaleNumbering(int factor) {
479 // * scale MBB begin and end points
480 // * scale all ranges.
481 // * Update VNI structures.
482 // * Scale instruction numberings
484 // Scale the MBB indices.
486 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
487 MBB != MBBE; ++MBB) {
488 std::pair<LiveIndex, LiveIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
489 mbbIndices.first = mbbIndices.first.scale(factor);
490 mbbIndices.second = mbbIndices.second.scale(factor);
491 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
493 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
495 // Scale terminator gaps.
496 for (DenseMap<MachineBasicBlock*, LiveIndex>::iterator
497 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
499 terminatorGaps[TGI->first] = TGI->second.scale(factor);
502 // Scale the intervals.
503 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
504 LI->second->scaleNumbering(factor);
507 // Scale MachineInstrs.
508 Mi2IndexMap oldmi2iMap = mi2iMap_;
509 LiveIndex highestSlot;
510 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
512 LiveIndex newSlot = MI->second.scale(factor);
513 mi2iMap_[MI->first] = newSlot;
514 highestSlot = std::max(highestSlot, newSlot);
517 unsigned highestVIndex = highestSlot.getVecIndex();
519 i2miMap_.resize(highestVIndex + 1);
520 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
522 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
528 /// runOnMachineFunction - Register allocate the whole function
530 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
532 mri_ = &mf_->getRegInfo();
533 tm_ = &fn.getTarget();
534 tri_ = tm_->getRegisterInfo();
535 tii_ = tm_->getInstrInfo();
536 aa_ = &getAnalysis<AliasAnalysis>();
537 lv_ = &getAnalysis<LiveVariables>();
538 allocatableRegs_ = tri_->getAllocatableSet(fn);
540 processImplicitDefs();
543 performEarlyCoalescing();
545 numIntervals += getNumIntervals();
551 /// print - Implement the dump method.
552 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
553 OS << "********** INTERVALS **********\n";
554 for (const_iterator I = begin(), E = end(); I != E; ++I) {
555 I->second->print(OS, tri_);
562 void LiveIntervals::printInstrs(raw_ostream &OS) const {
563 OS << "********** MACHINEINSTRS **********\n";
565 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
566 mbbi != mbbe; ++mbbi) {
567 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
568 for (MachineBasicBlock::iterator mii = mbbi->begin(),
569 mie = mbbi->end(); mii != mie; ++mii) {
570 OS << getInstructionIndex(mii) << '\t' << *mii;
575 void LiveIntervals::dumpInstrs() const {
579 /// conflictsWithPhysRegDef - Returns true if the specified register
580 /// is defined during the duration of the specified interval.
581 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
582 VirtRegMap &vrm, unsigned reg) {
583 for (LiveInterval::Ranges::const_iterator
584 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
585 for (LiveIndex index = getBaseIndex(I->start),
586 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
587 index = getNextIndex(index)) {
588 // skip deleted instructions
589 while (index != end && !getInstructionFromIndex(index))
590 index = getNextIndex(index);
591 if (index == end) break;
593 MachineInstr *MI = getInstructionFromIndex(index);
594 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
595 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
596 if (SrcReg == li.reg || DstReg == li.reg)
598 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
599 MachineOperand& mop = MI->getOperand(i);
602 unsigned PhysReg = mop.getReg();
603 if (PhysReg == 0 || PhysReg == li.reg)
605 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
606 if (!vrm.hasPhys(PhysReg))
608 PhysReg = vrm.getPhys(PhysReg);
610 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
619 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
620 /// it can check use as well.
621 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
622 unsigned Reg, bool CheckUse,
623 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
624 for (LiveInterval::Ranges::const_iterator
625 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
626 for (LiveIndex index = getBaseIndex(I->start),
627 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
628 index = getNextIndex(index)) {
629 // Skip deleted instructions.
630 MachineInstr *MI = 0;
631 while (index != end) {
632 MI = getInstructionFromIndex(index);
635 index = getNextIndex(index);
637 if (index == end) break;
639 if (JoinedCopies.count(MI))
641 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
642 MachineOperand& MO = MI->getOperand(i);
645 if (MO.isUse() && !CheckUse)
647 unsigned PhysReg = MO.getReg();
648 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
650 if (tri_->isSubRegister(Reg, PhysReg))
660 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
661 if (TargetRegisterInfo::isPhysicalRegister(reg))
662 errs() << tri_->getName(reg);
664 errs() << "%reg" << reg;
668 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
669 MachineBasicBlock::iterator mi,
673 LiveInterval &interval) {
675 errs() << "\t\tregister: ";
676 printRegName(interval.reg, tri_);
679 // Virtual registers may be defined multiple times (due to phi
680 // elimination and 2-addr elimination). Much of what we do only has to be
681 // done once for the vreg. We use an empty interval to detect the first
682 // time we see a vreg.
683 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
684 if (interval.empty()) {
685 // Get the Idx of the defining instructions.
686 LiveIndex defIndex = getDefIndex(MIIdx);
687 // Earlyclobbers move back one, so that they overlap the live range
689 if (MO.isEarlyClobber())
690 defIndex = getUseIndex(MIIdx);
692 MachineInstr *CopyMI = NULL;
693 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
694 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
695 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
696 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
697 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
699 // Earlyclobbers move back one.
700 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
702 assert(ValNo->id == 0 && "First value in interval is not 0?");
704 // Loop over all of the blocks that the vreg is defined in. There are
705 // two cases we have to handle here. The most common case is a vreg
706 // whose lifetime is contained within a basic block. In this case there
707 // will be a single kill, in MBB, which comes after the definition.
708 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
709 // FIXME: what about dead vars?
711 if (vi.Kills[0] != mi)
712 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
713 else if (MO.isEarlyClobber())
714 // Earlyclobbers that die in this instruction move up one extra, to
715 // compensate for having the starting point moved back one. This
716 // gets them to overlap the live range of other outputs.
717 killIdx = getNextSlot(getNextSlot(defIndex));
719 killIdx = getNextSlot(defIndex);
721 // If the kill happens after the definition, we have an intra-block
723 if (killIdx > defIndex) {
724 assert(vi.AliveBlocks.empty() &&
725 "Shouldn't be alive across any blocks!");
726 LiveRange LR(defIndex, killIdx, ValNo);
727 interval.addRange(LR);
728 DEBUG(errs() << " +" << LR << "\n");
729 ValNo->addKill(killIdx);
734 // The other case we handle is when a virtual register lives to the end
735 // of the defining block, potentially live across some blocks, then is
736 // live into some number of blocks, but gets killed. Start by adding a
737 // range that goes from this definition to the end of the defining block.
738 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
739 DEBUG(errs() << " +" << NewLR);
740 interval.addRange(NewLR);
742 // Iterate over all of the blocks that the variable is completely
743 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
745 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
746 E = vi.AliveBlocks.end(); I != E; ++I) {
747 LiveRange LR(getMBBStartIdx(*I),
748 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
750 interval.addRange(LR);
751 DEBUG(errs() << " +" << LR);
754 // Finally, this virtual register is live from the start of any killing
755 // block to the 'use' slot of the killing instruction.
756 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
757 MachineInstr *Kill = vi.Kills[i];
759 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
760 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
761 interval.addRange(LR);
762 ValNo->addKill(killIdx);
763 DEBUG(errs() << " +" << LR);
767 // If this is the second time we see a virtual register definition, it
768 // must be due to phi elimination or two addr elimination. If this is
769 // the result of two address elimination, then the vreg is one of the
770 // def-and-use register operand.
771 if (mi->isRegTiedToUseOperand(MOIdx)) {
772 // If this is a two-address definition, then we have already processed
773 // the live range. The only problem is that we didn't realize there
774 // are actually two values in the live interval. Because of this we
775 // need to take the LiveRegion that defines this register and split it
777 assert(interval.containsOneValue());
778 LiveIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
779 LiveIndex RedefIndex = getDefIndex(MIIdx);
780 if (MO.isEarlyClobber())
781 RedefIndex = getUseIndex(MIIdx);
783 const LiveRange *OldLR =
784 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
785 VNInfo *OldValNo = OldLR->valno;
787 // Delete the initial value, which should be short and continuous,
788 // because the 2-addr copy must be in the same MBB as the redef.
789 interval.removeRange(DefIndex, RedefIndex);
791 // Two-address vregs should always only be redefined once. This means
792 // that at this point, there should be exactly one value number in it.
793 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
795 // The new value number (#1) is defined by the instruction we claimed
797 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
798 false, // update at *
800 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
802 // Value#0 is now defined by the 2-addr instruction.
803 OldValNo->def = RedefIndex;
804 OldValNo->setCopy(0);
805 if (MO.isEarlyClobber())
806 OldValNo->setHasRedefByEC(true);
808 // Add the new live interval which replaces the range for the input copy.
809 LiveRange LR(DefIndex, RedefIndex, ValNo);
810 DEBUG(errs() << " replace range with " << LR);
811 interval.addRange(LR);
812 ValNo->addKill(RedefIndex);
814 // If this redefinition is dead, we need to add a dummy unit live
815 // range covering the def slot.
818 LiveRange(RedefIndex, MO.isEarlyClobber() ?
819 getNextSlot(getNextSlot(RedefIndex)) :
820 getNextSlot(RedefIndex), OldValNo));
823 errs() << " RESULT: ";
824 interval.print(errs(), tri_);
827 // Otherwise, this must be because of phi elimination. If this is the
828 // first redefinition of the vreg that we have seen, go back and change
829 // the live range in the PHI block to be a different value number.
830 if (interval.containsOneValue()) {
831 // Remove the old range that we now know has an incorrect number.
832 VNInfo *VNI = interval.getValNumInfo(0);
833 MachineInstr *Killer = vi.Kills[0];
834 phiJoinCopies.push_back(Killer);
835 LiveIndex Start = getMBBStartIdx(Killer->getParent());
837 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
839 errs() << " Removing [" << Start << "," << End << "] from: ";
840 interval.print(errs(), tri_);
843 interval.removeRange(Start, End);
844 assert(interval.ranges.size() == 1 &&
845 "Newly discovered PHI interval has >1 ranges.");
846 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
847 VNI->addKill(terminatorGaps[killMBB]);
848 VNI->setHasPHIKill(true);
850 errs() << " RESULT: ";
851 interval.print(errs(), tri_);
854 // Replace the interval with one of a NEW value number. Note that this
855 // value number isn't actually defined by an instruction, weird huh? :)
856 LiveRange LR(Start, End,
857 interval.getNextValue(LiveIndex(mbb->getNumber()),
858 0, false, VNInfoAllocator));
859 LR.valno->setIsPHIDef(true);
860 DEBUG(errs() << " replace range with " << LR);
861 interval.addRange(LR);
862 LR.valno->addKill(End);
864 errs() << " RESULT: ";
865 interval.print(errs(), tri_);
869 // In the case of PHI elimination, each variable definition is only
870 // live until the end of the block. We've already taken care of the
871 // rest of the live range.
872 LiveIndex defIndex = getDefIndex(MIIdx);
873 if (MO.isEarlyClobber())
874 defIndex = getUseIndex(MIIdx);
877 MachineInstr *CopyMI = NULL;
878 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
879 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
880 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
881 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
882 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
884 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
886 LiveIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
887 LiveRange LR(defIndex, killIndex, ValNo);
888 interval.addRange(LR);
889 ValNo->addKill(terminatorGaps[mbb]);
890 ValNo->setHasPHIKill(true);
891 DEBUG(errs() << " +" << LR);
895 DEBUG(errs() << '\n');
898 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
899 MachineBasicBlock::iterator mi,
902 LiveInterval &interval,
903 MachineInstr *CopyMI) {
904 // A physical register cannot be live across basic block, so its
905 // lifetime must end somewhere in its defining basic block.
907 errs() << "\t\tregister: ";
908 printRegName(interval.reg, tri_);
911 LiveIndex baseIndex = MIIdx;
912 LiveIndex start = getDefIndex(baseIndex);
913 // Earlyclobbers move back one.
914 if (MO.isEarlyClobber())
915 start = getUseIndex(MIIdx);
916 LiveIndex end = start;
918 // If it is not used after definition, it is considered dead at
919 // the instruction defining it. Hence its interval is:
920 // [defSlot(def), defSlot(def)+1)
921 // For earlyclobbers, the defSlot was pushed back one; the extra
922 // advance below compensates.
924 DEBUG(errs() << " dead");
925 if (MO.isEarlyClobber())
926 end = getNextSlot(getNextSlot(start));
928 end = getNextSlot(start);
932 // If it is not dead on definition, it must be killed by a
933 // subsequent instruction. Hence its interval is:
934 // [defSlot(def), useSlot(kill)+1)
935 baseIndex = getNextIndex(baseIndex);
936 while (++mi != MBB->end()) {
937 while (baseIndex.getVecIndex() < i2miMap_.size() &&
938 getInstructionFromIndex(baseIndex) == 0)
939 baseIndex = getNextIndex(baseIndex);
940 if (mi->killsRegister(interval.reg, tri_)) {
941 DEBUG(errs() << " killed");
942 end = getNextSlot(getUseIndex(baseIndex));
945 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
947 if (mi->isRegTiedToUseOperand(DefIdx)) {
948 // Two-address instruction.
949 end = getDefIndex(baseIndex);
950 if (mi->getOperand(DefIdx).isEarlyClobber())
951 end = getUseIndex(baseIndex);
953 // Another instruction redefines the register before it is ever read.
954 // Then the register is essentially dead at the instruction that defines
955 // it. Hence its interval is:
956 // [defSlot(def), defSlot(def)+1)
957 DEBUG(errs() << " dead");
958 end = getNextSlot(start);
964 baseIndex = getNextIndex(baseIndex);
967 // The only case we should have a dead physreg here without a killing or
968 // instruction where we know it's dead is if it is live-in to the function
969 // and never used. Another possible case is the implicit use of the
970 // physical register has been deleted by two-address pass.
971 end = getNextSlot(start);
974 assert(start < end && "did not find end of interval?");
976 // Already exists? Extend old live interval.
977 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
978 bool Extend = OldLR != interval.end();
979 VNInfo *ValNo = Extend
980 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
981 if (MO.isEarlyClobber() && Extend)
982 ValNo->setHasRedefByEC(true);
983 LiveRange LR(start, end, ValNo);
984 interval.addRange(LR);
985 LR.valno->addKill(end);
986 DEBUG(errs() << " +" << LR << '\n');
989 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
990 MachineBasicBlock::iterator MI,
994 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
995 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
996 getOrCreateInterval(MO.getReg()));
997 else if (allocatableRegs_[MO.getReg()]) {
998 MachineInstr *CopyMI = NULL;
999 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1000 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
1001 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1002 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
1003 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1005 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1006 getOrCreateInterval(MO.getReg()), CopyMI);
1007 // Def of a register also defines its sub-registers.
1008 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1009 // If MI also modifies the sub-register explicitly, avoid processing it
1010 // more than once. Do not pass in TRI here so it checks for exact match.
1011 if (!MI->modifiesRegister(*AS))
1012 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1013 getOrCreateInterval(*AS), 0);
1017 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1019 LiveInterval &interval, bool isAlias) {
1021 errs() << "\t\tlivein register: ";
1022 printRegName(interval.reg, tri_);
1025 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1026 // be considered a livein.
1027 MachineBasicBlock::iterator mi = MBB->begin();
1028 LiveIndex baseIndex = MIIdx;
1029 LiveIndex start = baseIndex;
1030 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1031 getInstructionFromIndex(baseIndex) == 0)
1032 baseIndex = getNextIndex(baseIndex);
1033 LiveIndex end = baseIndex;
1034 bool SeenDefUse = false;
1036 while (mi != MBB->end()) {
1037 if (mi->killsRegister(interval.reg, tri_)) {
1038 DEBUG(errs() << " killed");
1039 end = getNextSlot(getUseIndex(baseIndex));
1042 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1043 // Another instruction redefines the register before it is ever read.
1044 // Then the register is essentially dead at the instruction that defines
1045 // it. Hence its interval is:
1046 // [defSlot(def), defSlot(def)+1)
1047 DEBUG(errs() << " dead");
1048 end = getNextSlot(getDefIndex(start));
1053 baseIndex = getNextIndex(baseIndex);
1055 if (mi != MBB->end()) {
1056 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1057 getInstructionFromIndex(baseIndex) == 0)
1058 baseIndex = getNextIndex(baseIndex);
1062 // Live-in register might not be used at all.
1065 DEBUG(errs() << " dead");
1066 end = getNextSlot(getDefIndex(MIIdx));
1068 DEBUG(errs() << " live through");
1074 interval.getNextValue(LiveIndex(MBB->getNumber()),
1075 0, false, VNInfoAllocator);
1076 vni->setIsPHIDef(true);
1077 LiveRange LR(start, end, vni);
1079 interval.addRange(LR);
1080 LR.valno->addKill(end);
1081 DEBUG(errs() << " +" << LR << '\n');
1085 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1086 SmallVector<MachineInstr*,16> &IdentCopies,
1087 SmallVector<MachineInstr*,16> &OtherCopies) {
1088 bool HaveConflict = false;
1089 unsigned NumIdent = 0;
1090 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
1091 re = mri_->def_end(); ri != re; ++ri) {
1092 MachineInstr *MI = &*ri;
1093 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1094 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1096 if (SrcReg != DstInt.reg) {
1097 OtherCopies.push_back(MI);
1098 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1100 IdentCopies.push_back(MI);
1106 return false; // Let coalescer handle it
1107 return IdentCopies.size() > OtherCopies.size();
1110 void LiveIntervals::performEarlyCoalescing() {
1111 if (!EarlyCoalescing)
1114 /// Perform early coalescing: eliminate copies which feed into phi joins
1115 /// and whose sources are defined by the phi joins.
1116 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1117 MachineInstr *Join = phiJoinCopies[i];
1118 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1121 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1122 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1124 assert(isMove && "PHI join instruction must be a move!");
1129 LiveInterval &DstInt = getInterval(PHIDst);
1130 LiveInterval &SrcInt = getInterval(PHISrc);
1131 SmallVector<MachineInstr*, 16> IdentCopies;
1132 SmallVector<MachineInstr*, 16> OtherCopies;
1133 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1136 DEBUG(errs() << "PHI Join: " << *Join);
1137 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1138 VNInfo *VNI = DstInt.getValNumInfo(0);
1140 // Change the non-identity copies to directly target the phi destination.
1141 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1142 MachineInstr *PHICopy = OtherCopies[i];
1143 DEBUG(errs() << "Moving: " << *PHICopy);
1145 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1146 LiveIndex DefIndex = getDefIndex(MIIndex);
1147 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1148 LiveIndex StartIndex = SLR->start;
1149 LiveIndex EndIndex = SLR->end;
1151 // Delete val# defined by the now identity copy and add the range from
1152 // beginning of the mbb to the end of the range.
1153 SrcInt.removeValNo(SLR->valno);
1154 DEBUG(errs() << " added range [" << StartIndex << ','
1155 << EndIndex << "] to reg" << DstInt.reg << '\n');
1156 if (DstInt.liveAt(StartIndex))
1157 DstInt.removeRange(StartIndex, EndIndex);
1158 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1160 NewVNI->setHasPHIKill(true);
1161 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1162 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1163 MachineOperand &MO = PHICopy->getOperand(j);
1164 if (!MO.isReg() || MO.getReg() != PHISrc)
1170 // Now let's eliminate all the would-be identity copies.
1171 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1172 MachineInstr *PHICopy = IdentCopies[i];
1173 DEBUG(errs() << "Coalescing: " << *PHICopy);
1175 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1176 LiveIndex DefIndex = getDefIndex(MIIndex);
1177 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1178 LiveIndex StartIndex = SLR->start;
1179 LiveIndex EndIndex = SLR->end;
1181 // Delete val# defined by the now identity copy and add the range from
1182 // beginning of the mbb to the end of the range.
1183 SrcInt.removeValNo(SLR->valno);
1184 RemoveMachineInstrFromMaps(PHICopy);
1185 PHICopy->eraseFromParent();
1186 DEBUG(errs() << " added range [" << StartIndex << ','
1187 << EndIndex << "] to reg" << DstInt.reg << '\n');
1188 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1191 // Remove the phi join and update the phi block liveness.
1192 LiveIndex MIIndex = getInstructionIndex(Join);
1193 LiveIndex UseIndex = getUseIndex(MIIndex);
1194 LiveIndex DefIndex = getDefIndex(MIIndex);
1195 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1196 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1197 DLR->valno->setCopy(0);
1198 DLR->valno->setIsDefAccurate(false);
1199 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1200 SrcInt.removeRange(SLR->start, SLR->end);
1201 assert(SrcInt.empty());
1202 removeInterval(PHISrc);
1203 RemoveMachineInstrFromMaps(Join);
1204 Join->eraseFromParent();
1210 /// computeIntervals - computes the live intervals for virtual
1211 /// registers. for some ordering of the machine instructions [1,N] a
1212 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1213 /// which a variable is live
1214 void LiveIntervals::computeIntervals() {
1215 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1216 << "********** Function: "
1217 << ((Value*)mf_->getFunction())->getName() << '\n');
1219 SmallVector<unsigned, 8> UndefUses;
1220 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1221 MBBI != E; ++MBBI) {
1222 MachineBasicBlock *MBB = MBBI;
1223 // Track the index of the current machine instr.
1224 LiveIndex MIIndex = getMBBStartIdx(MBB);
1225 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1227 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1229 // Create intervals for live-ins to this BB first.
1230 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1231 LE = MBB->livein_end(); LI != LE; ++LI) {
1232 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1233 // Multiple live-ins can alias the same register.
1234 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1235 if (!hasInterval(*AS))
1236 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1240 // Skip over empty initial indices.
1241 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1242 getInstructionFromIndex(MIIndex) == 0)
1243 MIIndex = getNextIndex(MIIndex);
1245 for (; MI != miEnd; ++MI) {
1246 DEBUG(errs() << MIIndex << "\t" << *MI);
1249 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1250 MachineOperand &MO = MI->getOperand(i);
1251 if (!MO.isReg() || !MO.getReg())
1254 // handle register defs - build intervals
1256 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1257 else if (MO.isUndef())
1258 UndefUses.push_back(MO.getReg());
1261 // Skip over the empty slots after each instruction.
1262 unsigned Slots = MI->getDesc().getNumDefs();
1267 MIIndex = getNextIndex(MIIndex);
1269 // Skip over empty indices.
1270 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1271 getInstructionFromIndex(MIIndex) == 0)
1272 MIIndex = getNextIndex(MIIndex);
1276 // Create empty intervals for registers defined by implicit_def's (except
1277 // for those implicit_def that define values which are liveout of their
1279 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1280 unsigned UndefReg = UndefUses[i];
1281 (void)getOrCreateInterval(UndefReg);
1285 bool LiveIntervals::findLiveInMBBs(
1286 LiveIndex Start, LiveIndex End,
1287 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1288 std::vector<IdxMBBPair>::const_iterator I =
1289 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1291 bool ResVal = false;
1292 while (I != Idx2MBBMap.end()) {
1293 if (I->first >= End)
1295 MBBs.push_back(I->second);
1302 bool LiveIntervals::findReachableMBBs(
1303 LiveIndex Start, LiveIndex End,
1304 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1305 std::vector<IdxMBBPair>::const_iterator I =
1306 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1308 bool ResVal = false;
1309 while (I != Idx2MBBMap.end()) {
1312 MachineBasicBlock *MBB = I->second;
1313 if (getMBBEndIdx(MBB) > End)
1315 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1316 SE = MBB->succ_end(); SI != SE; ++SI)
1317 MBBs.push_back(*SI);
1324 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1325 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1326 return new LiveInterval(reg, Weight);
1329 /// dupInterval - Duplicate a live interval. The caller is responsible for
1330 /// managing the allocated memory.
1331 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1332 LiveInterval *NewLI = createInterval(li->reg);
1333 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1337 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1338 /// copy field and returns the source register that defines it.
1339 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1340 if (!VNI->getCopy())
1343 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1344 // If it's extracting out of a physical register, return the sub-register.
1345 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1346 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1347 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1349 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1350 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1351 return VNI->getCopy()->getOperand(2).getReg();
1353 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1354 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1356 llvm_unreachable("Unrecognized copy instruction!");
1360 //===----------------------------------------------------------------------===//
1361 // Register allocator hooks.
1364 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1365 /// allow one) virtual register operand, then its uses are implicitly using
1366 /// the register. Returns the virtual register.
1367 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1368 MachineInstr *MI) const {
1370 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1371 MachineOperand &MO = MI->getOperand(i);
1372 if (!MO.isReg() || !MO.isUse())
1374 unsigned Reg = MO.getReg();
1375 if (Reg == 0 || Reg == li.reg)
1378 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1379 !allocatableRegs_[Reg])
1381 // FIXME: For now, only remat MI with at most one register operand.
1383 "Can't rematerialize instruction with multiple register operand!");
1384 RegOp = MO.getReg();
1392 /// isValNoAvailableAt - Return true if the val# of the specified interval
1393 /// which reaches the given instruction also reaches the specified use index.
1394 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1395 LiveIndex UseIdx) const {
1396 LiveIndex Index = getInstructionIndex(MI);
1397 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1398 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1399 return UI != li.end() && UI->valno == ValNo;
1402 /// isReMaterializable - Returns true if the definition MI of the specified
1403 /// val# of the specified interval is re-materializable.
1404 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1405 const VNInfo *ValNo, MachineInstr *MI,
1406 SmallVectorImpl<LiveInterval*> &SpillIs,
1411 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1415 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1416 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1417 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1418 // this but remember this is not safe to fold into a two-address
1420 // This is a load from fixed stack slot. It can be rematerialized.
1423 // If the target-specific rules don't identify an instruction as
1424 // being trivially rematerializable, use some target-independent
1426 if (!tii_->isTriviallyReMaterializable(MI)) {
1427 if (!EnableAggressiveRemat)
1430 const TargetInstrDesc &TID = MI->getDesc();
1432 // Avoid instructions obviously unsafe for remat.
1433 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable() ||
1437 // Avoid instructions which load from potentially varying memory.
1438 if (TID.mayLoad() && !MI->isInvariantLoad(aa_))
1441 // If any of the registers accessed are non-constant, conservatively assume
1442 // the instruction is not rematerializable.
1443 unsigned ImpUse = 0;
1444 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1445 const MachineOperand &MO = MI->getOperand(i);
1447 unsigned Reg = MO.getReg();
1450 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1452 // If the physreg has no defs anywhere, it's just an ambient register
1453 // and we can freely move its uses. Alternatively, if it's allocatable,
1454 // it could get allocated to something with a def during allocation.
1455 if (!mri_->def_empty(Reg))
1457 if (allocatableRegs_.test(Reg))
1459 // Check for a def among the register's aliases too.
1460 for (const unsigned *Alias = tri_->getAliasSet(Reg); *Alias; ++Alias) {
1461 unsigned AliasReg = *Alias;
1462 if (!mri_->def_empty(AliasReg))
1464 if (allocatableRegs_.test(AliasReg))
1468 // A physreg def. We can't remat it.
1474 // Only allow one def, and that in the first operand.
1475 if (MO.isDef() != (i == 0))
1478 // Only allow constant-valued registers.
1479 bool IsLiveIn = mri_->isLiveIn(Reg);
1480 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1481 E = mri_->def_end();
1483 // For the def, it should be the only def of that register.
1484 if (MO.isDef() && (next(I) != E || IsLiveIn))
1488 // Only allow one use other register use, as that's all the
1489 // remat mechanisms support currently.
1490 if (Reg != li.reg) {
1493 else if (Reg != ImpUse)
1496 // For the use, there should be only one associated def.
1497 if (I != E && (next(I) != E || IsLiveIn))
1504 unsigned ImpUse = getReMatImplicitUse(li, MI);
1506 const LiveInterval &ImpLi = getInterval(ImpUse);
1507 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1508 re = mri_->use_end(); ri != re; ++ri) {
1509 MachineInstr *UseMI = &*ri;
1510 LiveIndex UseIdx = getInstructionIndex(UseMI);
1511 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1513 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1517 // If a register operand of the re-materialized instruction is going to
1518 // be spilled next, then it's not legal to re-materialize this instruction.
1519 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1520 if (ImpUse == SpillIs[i]->reg)
1526 /// isReMaterializable - Returns true if the definition MI of the specified
1527 /// val# of the specified interval is re-materializable.
1528 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1529 const VNInfo *ValNo, MachineInstr *MI) {
1530 SmallVector<LiveInterval*, 4> Dummy1;
1532 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1535 /// isReMaterializable - Returns true if every definition of MI of every
1536 /// val# of the specified interval is re-materializable.
1537 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1538 SmallVectorImpl<LiveInterval*> &SpillIs,
1541 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1543 const VNInfo *VNI = *i;
1544 if (VNI->isUnused())
1545 continue; // Dead val#.
1546 // Is the def for the val# rematerializable?
1547 if (!VNI->isDefAccurate())
1549 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1550 bool DefIsLoad = false;
1552 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1554 isLoad |= DefIsLoad;
1559 /// FilterFoldedOps - Filter out two-address use operands. Return
1560 /// true if it finds any issue with the operands that ought to prevent
1562 static bool FilterFoldedOps(MachineInstr *MI,
1563 SmallVector<unsigned, 2> &Ops,
1565 SmallVector<unsigned, 2> &FoldOps) {
1567 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1568 unsigned OpIdx = Ops[i];
1569 MachineOperand &MO = MI->getOperand(OpIdx);
1570 // FIXME: fold subreg use.
1574 MRInfo |= (unsigned)VirtRegMap::isMod;
1576 // Filter out two-address use operand(s).
1577 if (MI->isRegTiedToDefOperand(OpIdx)) {
1578 MRInfo = VirtRegMap::isModRef;
1581 MRInfo |= (unsigned)VirtRegMap::isRef;
1583 FoldOps.push_back(OpIdx);
1589 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1590 /// slot / to reg or any rematerialized load into ith operand of specified
1591 /// MI. If it is successul, MI is updated with the newly created MI and
1593 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1594 VirtRegMap &vrm, MachineInstr *DefMI,
1596 SmallVector<unsigned, 2> &Ops,
1597 bool isSS, int Slot, unsigned Reg) {
1598 // If it is an implicit def instruction, just delete it.
1599 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1600 RemoveMachineInstrFromMaps(MI);
1601 vrm.RemoveMachineInstrFromMaps(MI);
1602 MI->eraseFromParent();
1607 // Filter the list of operand indexes that are to be folded. Abort if
1608 // any operand will prevent folding.
1609 unsigned MRInfo = 0;
1610 SmallVector<unsigned, 2> FoldOps;
1611 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1614 // The only time it's safe to fold into a two address instruction is when
1615 // it's folding reload and spill from / into a spill stack slot.
1616 if (DefMI && (MRInfo & VirtRegMap::isMod))
1619 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1620 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1622 // Remember this instruction uses the spill slot.
1623 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1625 // Attempt to fold the memory reference into the instruction. If
1626 // we can do this, we don't need to insert spill code.
1627 MachineBasicBlock &MBB = *MI->getParent();
1628 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1629 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1630 vrm.transferSpillPts(MI, fmi);
1631 vrm.transferRestorePts(MI, fmi);
1632 vrm.transferEmergencySpills(MI, fmi);
1634 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1635 mi2iMap_[fmi] = InstrIdx;
1636 MI = MBB.insert(MBB.erase(MI), fmi);
1643 /// canFoldMemoryOperand - Returns true if the specified load / store
1644 /// folding is possible.
1645 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1646 SmallVector<unsigned, 2> &Ops,
1648 // Filter the list of operand indexes that are to be folded. Abort if
1649 // any operand will prevent folding.
1650 unsigned MRInfo = 0;
1651 SmallVector<unsigned, 2> FoldOps;
1652 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1655 // It's only legal to remat for a use, not a def.
1656 if (ReMat && (MRInfo & VirtRegMap::isMod))
1659 return tii_->canFoldMemoryOperand(MI, FoldOps);
1662 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1663 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1664 for (LiveInterval::Ranges::const_iterator
1665 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1666 std::vector<IdxMBBPair>::const_iterator II =
1667 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1668 if (II == Idx2MBBMap.end())
1670 if (I->end > II->first) // crossing a MBB.
1672 MBBs.insert(II->second);
1673 if (MBBs.size() > 1)
1679 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1680 /// interval on to-be re-materialized operands of MI) with new register.
1681 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1682 MachineInstr *MI, unsigned NewVReg,
1684 // There is an implicit use. That means one of the other operand is
1685 // being remat'ed and the remat'ed instruction has li.reg as an
1686 // use operand. Make sure we rewrite that as well.
1687 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1688 MachineOperand &MO = MI->getOperand(i);
1691 unsigned Reg = MO.getReg();
1692 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1694 if (!vrm.isReMaterialized(Reg))
1696 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1697 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1699 UseMO->setReg(NewVReg);
1703 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1704 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1705 bool LiveIntervals::
1706 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1707 bool TrySplit, LiveIndex index, LiveIndex end,
1709 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1710 unsigned Slot, int LdSlot,
1711 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1713 const TargetRegisterClass* rc,
1714 SmallVector<int, 4> &ReMatIds,
1715 const MachineLoopInfo *loopInfo,
1716 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1717 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1718 std::vector<LiveInterval*> &NewLIs) {
1719 bool CanFold = false;
1721 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1722 MachineOperand& mop = MI->getOperand(i);
1725 unsigned Reg = mop.getReg();
1726 unsigned RegI = Reg;
1727 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1732 bool TryFold = !DefIsReMat;
1733 bool FoldSS = true; // Default behavior unless it's a remat.
1734 int FoldSlot = Slot;
1736 // If this is the rematerializable definition MI itself and
1737 // all of its uses are rematerialized, simply delete it.
1738 if (MI == ReMatOrigDefMI && CanDelete) {
1739 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1741 RemoveMachineInstrFromMaps(MI);
1742 vrm.RemoveMachineInstrFromMaps(MI);
1743 MI->eraseFromParent();
1747 // If def for this use can't be rematerialized, then try folding.
1748 // If def is rematerializable and it's a load, also try folding.
1749 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1751 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1757 // Scan all of the operands of this instruction rewriting operands
1758 // to use NewVReg instead of li.reg as appropriate. We do this for
1761 // 1. If the instr reads the same spilled vreg multiple times, we
1762 // want to reuse the NewVReg.
1763 // 2. If the instr is a two-addr instruction, we are required to
1764 // keep the src/dst regs pinned.
1766 // Keep track of whether we replace a use and/or def so that we can
1767 // create the spill interval with the appropriate range.
1769 HasUse = mop.isUse();
1770 HasDef = mop.isDef();
1771 SmallVector<unsigned, 2> Ops;
1773 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1774 const MachineOperand &MOj = MI->getOperand(j);
1777 unsigned RegJ = MOj.getReg();
1778 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1782 if (!MOj.isUndef()) {
1783 HasUse |= MOj.isUse();
1784 HasDef |= MOj.isDef();
1789 // Create a new virtual register for the spill interval.
1790 // Create the new register now so we can map the fold instruction
1791 // to the new register so when it is unfolded we get the correct
1793 bool CreatedNewVReg = false;
1795 NewVReg = mri_->createVirtualRegister(rc);
1797 CreatedNewVReg = true;
1803 // Do not fold load / store here if we are splitting. We'll find an
1804 // optimal point to insert a load / store later.
1806 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1807 Ops, FoldSS, FoldSlot, NewVReg)) {
1808 // Folding the load/store can completely change the instruction in
1809 // unpredictable ways, rescan it from the beginning.
1812 // We need to give the new vreg the same stack slot as the
1813 // spilled interval.
1814 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1820 if (isNotInMIMap(MI))
1822 goto RestartInstruction;
1825 // We'll try to fold it later if it's profitable.
1826 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1830 mop.setReg(NewVReg);
1831 if (mop.isImplicit())
1832 rewriteImplicitOps(li, MI, NewVReg, vrm);
1834 // Reuse NewVReg for other reads.
1835 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1836 MachineOperand &mopj = MI->getOperand(Ops[j]);
1837 mopj.setReg(NewVReg);
1838 if (mopj.isImplicit())
1839 rewriteImplicitOps(li, MI, NewVReg, vrm);
1842 if (CreatedNewVReg) {
1844 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1845 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1846 // Each valnum may have its own remat id.
1847 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1849 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1851 if (!CanDelete || (HasUse && HasDef)) {
1852 // If this is a two-addr instruction then its use operands are
1853 // rematerializable but its def is not. It should be assigned a
1855 vrm.assignVirt2StackSlot(NewVReg, Slot);
1858 vrm.assignVirt2StackSlot(NewVReg, Slot);
1860 } else if (HasUse && HasDef &&
1861 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1862 // If this interval hasn't been assigned a stack slot (because earlier
1863 // def is a deleted remat def), do it now.
1864 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1865 vrm.assignVirt2StackSlot(NewVReg, Slot);
1868 // Re-matting an instruction with virtual register use. Add the
1869 // register as an implicit use on the use MI.
1870 if (DefIsReMat && ImpUse)
1871 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1873 // Create a new register interval for this spill / remat.
1874 LiveInterval &nI = getOrCreateInterval(NewVReg);
1875 if (CreatedNewVReg) {
1876 NewLIs.push_back(&nI);
1877 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1879 vrm.setIsSplitFromReg(NewVReg, li.reg);
1883 if (CreatedNewVReg) {
1884 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1885 nI.getNextValue(LiveIndex(), 0, false,
1887 DEBUG(errs() << " +" << LR);
1890 // Extend the split live interval to this def / use.
1891 LiveIndex End = getNextSlot(getUseIndex(index));
1892 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1893 nI.getValNumInfo(nI.getNumValNums()-1));
1894 DEBUG(errs() << " +" << LR);
1899 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1900 nI.getNextValue(LiveIndex(), 0, false,
1902 DEBUG(errs() << " +" << LR);
1907 errs() << "\t\t\t\tAdded new interval: ";
1908 nI.print(errs(), tri_);
1914 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1916 MachineBasicBlock *MBB,
1917 LiveIndex Idx) const {
1918 LiveIndex End = getMBBEndIdx(MBB);
1919 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1920 if (VNI->kills[j].isPHIIndex())
1923 LiveIndex KillIdx = VNI->kills[j];
1924 if (KillIdx > Idx && KillIdx < End)
1930 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1931 /// during spilling.
1933 struct RewriteInfo {
1938 RewriteInfo(LiveIndex i, MachineInstr *mi, bool u, bool d)
1939 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1942 struct RewriteInfoCompare {
1943 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1944 return LHS.Index < RHS.Index;
1949 void LiveIntervals::
1950 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1951 LiveInterval::Ranges::const_iterator &I,
1952 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1953 unsigned Slot, int LdSlot,
1954 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1956 const TargetRegisterClass* rc,
1957 SmallVector<int, 4> &ReMatIds,
1958 const MachineLoopInfo *loopInfo,
1959 BitVector &SpillMBBs,
1960 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1961 BitVector &RestoreMBBs,
1962 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1963 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1964 std::vector<LiveInterval*> &NewLIs) {
1965 bool AllCanFold = true;
1966 unsigned NewVReg = 0;
1967 LiveIndex start = getBaseIndex(I->start);
1968 LiveIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1970 // First collect all the def / use in this live range that will be rewritten.
1971 // Make sure they are sorted according to instruction index.
1972 std::vector<RewriteInfo> RewriteMIs;
1973 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1974 re = mri_->reg_end(); ri != re; ) {
1975 MachineInstr *MI = &*ri;
1976 MachineOperand &O = ri.getOperand();
1978 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1979 LiveIndex index = getInstructionIndex(MI);
1980 if (index < start || index >= end)
1984 // Must be defined by an implicit def. It should not be spilled. Note,
1985 // this is for correctness reason. e.g.
1986 // 8 %reg1024<def> = IMPLICIT_DEF
1987 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1988 // The live range [12, 14) are not part of the r1024 live interval since
1989 // it's defined by an implicit def. It will not conflicts with live
1990 // interval of r1025. Now suppose both registers are spilled, you can
1991 // easily see a situation where both registers are reloaded before
1992 // the INSERT_SUBREG and both target registers that would overlap.
1994 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1996 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1998 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1999 // Now rewrite the defs and uses.
2000 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
2001 RewriteInfo &rwi = RewriteMIs[i];
2003 LiveIndex index = rwi.Index;
2004 bool MIHasUse = rwi.HasUse;
2005 bool MIHasDef = rwi.HasDef;
2006 MachineInstr *MI = rwi.MI;
2007 // If MI def and/or use the same register multiple times, then there
2008 // are multiple entries.
2009 unsigned NumUses = MIHasUse;
2010 while (i != e && RewriteMIs[i].MI == MI) {
2011 assert(RewriteMIs[i].Index == index);
2012 bool isUse = RewriteMIs[i].HasUse;
2013 if (isUse) ++NumUses;
2015 MIHasDef |= RewriteMIs[i].HasDef;
2018 MachineBasicBlock *MBB = MI->getParent();
2020 if (ImpUse && MI != ReMatDefMI) {
2021 // Re-matting an instruction with virtual register use. Update the
2022 // register interval's spill weight to HUGE_VALF to prevent it from
2024 LiveInterval &ImpLi = getInterval(ImpUse);
2025 ImpLi.weight = HUGE_VALF;
2028 unsigned MBBId = MBB->getNumber();
2029 unsigned ThisVReg = 0;
2031 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2032 if (NVI != MBBVRegsMap.end()) {
2033 ThisVReg = NVI->second;
2040 // It's better to start a new interval to avoid artifically
2041 // extend the new interval.
2042 if (MIHasDef && !MIHasUse) {
2043 MBBVRegsMap.erase(MBB->getNumber());
2049 bool IsNew = ThisVReg == 0;
2051 // This ends the previous live interval. If all of its def / use
2052 // can be folded, give it a low spill weight.
2053 if (NewVReg && TrySplit && AllCanFold) {
2054 LiveInterval &nI = getOrCreateInterval(NewVReg);
2061 bool HasDef = false;
2062 bool HasUse = false;
2063 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2064 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2065 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2066 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2067 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2068 if (!HasDef && !HasUse)
2071 AllCanFold &= CanFold;
2073 // Update weight of spill interval.
2074 LiveInterval &nI = getOrCreateInterval(NewVReg);
2076 // The spill weight is now infinity as it cannot be spilled again.
2077 nI.weight = HUGE_VALF;
2081 // Keep track of the last def and first use in each MBB.
2083 if (MI != ReMatOrigDefMI || !CanDelete) {
2084 bool HasKill = false;
2086 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2088 // If this is a two-address code, then this index starts a new VNInfo.
2089 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2091 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2093 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2094 SpillIdxes.find(MBBId);
2096 if (SII == SpillIdxes.end()) {
2097 std::vector<SRInfo> S;
2098 S.push_back(SRInfo(index, NewVReg, true));
2099 SpillIdxes.insert(std::make_pair(MBBId, S));
2100 } else if (SII->second.back().vreg != NewVReg) {
2101 SII->second.push_back(SRInfo(index, NewVReg, true));
2102 } else if (index > SII->second.back().index) {
2103 // If there is an earlier def and this is a two-address
2104 // instruction, then it's not possible to fold the store (which
2105 // would also fold the load).
2106 SRInfo &Info = SII->second.back();
2108 Info.canFold = !HasUse;
2110 SpillMBBs.set(MBBId);
2111 } else if (SII != SpillIdxes.end() &&
2112 SII->second.back().vreg == NewVReg &&
2113 index > SII->second.back().index) {
2114 // There is an earlier def that's not killed (must be two-address).
2115 // The spill is no longer needed.
2116 SII->second.pop_back();
2117 if (SII->second.empty()) {
2118 SpillIdxes.erase(MBBId);
2119 SpillMBBs.reset(MBBId);
2126 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2127 SpillIdxes.find(MBBId);
2128 if (SII != SpillIdxes.end() &&
2129 SII->second.back().vreg == NewVReg &&
2130 index > SII->second.back().index)
2131 // Use(s) following the last def, it's not safe to fold the spill.
2132 SII->second.back().canFold = false;
2133 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2134 RestoreIdxes.find(MBBId);
2135 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2136 // If we are splitting live intervals, only fold if it's the first
2137 // use and there isn't another use later in the MBB.
2138 RII->second.back().canFold = false;
2140 // Only need a reload if there isn't an earlier def / use.
2141 if (RII == RestoreIdxes.end()) {
2142 std::vector<SRInfo> Infos;
2143 Infos.push_back(SRInfo(index, NewVReg, true));
2144 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2146 RII->second.push_back(SRInfo(index, NewVReg, true));
2148 RestoreMBBs.set(MBBId);
2152 // Update spill weight.
2153 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2154 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2157 if (NewVReg && TrySplit && AllCanFold) {
2158 // If all of its def / use can be folded, give it a low spill weight.
2159 LiveInterval &nI = getOrCreateInterval(NewVReg);
2164 bool LiveIntervals::alsoFoldARestore(int Id, LiveIndex index,
2165 unsigned vr, BitVector &RestoreMBBs,
2166 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2167 if (!RestoreMBBs[Id])
2169 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2170 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2171 if (Restores[i].index == index &&
2172 Restores[i].vreg == vr &&
2173 Restores[i].canFold)
2178 void LiveIntervals::eraseRestoreInfo(int Id, LiveIndex index,
2179 unsigned vr, BitVector &RestoreMBBs,
2180 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2181 if (!RestoreMBBs[Id])
2183 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2184 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2185 if (Restores[i].index == index && Restores[i].vreg)
2186 Restores[i].index = LiveIndex();
2189 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2190 /// spilled and create empty intervals for their uses.
2192 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2193 const TargetRegisterClass* rc,
2194 std::vector<LiveInterval*> &NewLIs) {
2195 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2196 re = mri_->reg_end(); ri != re; ) {
2197 MachineOperand &O = ri.getOperand();
2198 MachineInstr *MI = &*ri;
2201 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2202 "Register def was not rewritten?");
2203 RemoveMachineInstrFromMaps(MI);
2204 vrm.RemoveMachineInstrFromMaps(MI);
2205 MI->eraseFromParent();
2207 // This must be an use of an implicit_def so it's not part of the live
2208 // interval. Create a new empty live interval for it.
2209 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2210 unsigned NewVReg = mri_->createVirtualRegister(rc);
2212 vrm.setIsImplicitlyDefined(NewVReg);
2213 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2214 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2215 MachineOperand &MO = MI->getOperand(i);
2216 if (MO.isReg() && MO.getReg() == li.reg) {
2225 std::vector<LiveInterval*> LiveIntervals::
2226 addIntervalsForSpillsFast(const LiveInterval &li,
2227 const MachineLoopInfo *loopInfo,
2229 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2231 std::vector<LiveInterval*> added;
2233 assert(li.weight != HUGE_VALF &&
2234 "attempt to spill already spilled interval!");
2237 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2242 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2244 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2245 while (RI != mri_->reg_end()) {
2246 MachineInstr* MI = &*RI;
2248 SmallVector<unsigned, 2> Indices;
2249 bool HasUse = false;
2250 bool HasDef = false;
2252 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2253 MachineOperand& mop = MI->getOperand(i);
2254 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2256 HasUse |= MI->getOperand(i).isUse();
2257 HasDef |= MI->getOperand(i).isDef();
2259 Indices.push_back(i);
2262 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2263 Indices, true, slot, li.reg)) {
2264 unsigned NewVReg = mri_->createVirtualRegister(rc);
2266 vrm.assignVirt2StackSlot(NewVReg, slot);
2268 // create a new register for this spill
2269 LiveInterval &nI = getOrCreateInterval(NewVReg);
2271 // the spill weight is now infinity as it
2272 // cannot be spilled again
2273 nI.weight = HUGE_VALF;
2275 // Rewrite register operands to use the new vreg.
2276 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2277 E = Indices.end(); I != E; ++I) {
2278 MI->getOperand(*I).setReg(NewVReg);
2280 if (MI->getOperand(*I).isUse())
2281 MI->getOperand(*I).setIsKill(true);
2284 // Fill in the new live interval.
2285 LiveIndex index = getInstructionIndex(MI);
2287 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2288 nI.getNextValue(LiveIndex(), 0, false,
2289 getVNInfoAllocator()));
2290 DEBUG(errs() << " +" << LR);
2292 vrm.addRestorePoint(NewVReg, MI);
2295 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2296 nI.getNextValue(LiveIndex(), 0, false,
2297 getVNInfoAllocator()));
2298 DEBUG(errs() << " +" << LR);
2300 vrm.addSpillPoint(NewVReg, true, MI);
2303 added.push_back(&nI);
2306 errs() << "\t\t\t\tadded new interval: ";
2313 RI = mri_->reg_begin(li.reg);
2319 std::vector<LiveInterval*> LiveIntervals::
2320 addIntervalsForSpills(const LiveInterval &li,
2321 SmallVectorImpl<LiveInterval*> &SpillIs,
2322 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2324 if (EnableFastSpilling)
2325 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2327 assert(li.weight != HUGE_VALF &&
2328 "attempt to spill already spilled interval!");
2331 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2332 li.print(errs(), tri_);
2336 // Each bit specify whether a spill is required in the MBB.
2337 BitVector SpillMBBs(mf_->getNumBlockIDs());
2338 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2339 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2340 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2341 DenseMap<unsigned,unsigned> MBBVRegsMap;
2342 std::vector<LiveInterval*> NewLIs;
2343 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2345 unsigned NumValNums = li.getNumValNums();
2346 SmallVector<MachineInstr*, 4> ReMatDefs;
2347 ReMatDefs.resize(NumValNums, NULL);
2348 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2349 ReMatOrigDefs.resize(NumValNums, NULL);
2350 SmallVector<int, 4> ReMatIds;
2351 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2352 BitVector ReMatDelete(NumValNums);
2353 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2355 // Spilling a split live interval. It cannot be split any further. Also,
2356 // it's also guaranteed to be a single val# / range interval.
2357 if (vrm.getPreSplitReg(li.reg)) {
2358 vrm.setIsSplitFromReg(li.reg, 0);
2359 // Unset the split kill marker on the last use.
2360 LiveIndex KillIdx = vrm.getKillPoint(li.reg);
2361 if (KillIdx != LiveIndex()) {
2362 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2363 assert(KillMI && "Last use disappeared?");
2364 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2365 assert(KillOp != -1 && "Last use disappeared?");
2366 KillMI->getOperand(KillOp).setIsKill(false);
2368 vrm.removeKillPoint(li.reg);
2369 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2370 Slot = vrm.getStackSlot(li.reg);
2371 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2372 MachineInstr *ReMatDefMI = DefIsReMat ?
2373 vrm.getReMaterializedMI(li.reg) : NULL;
2375 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2376 bool isLoad = isLoadSS ||
2377 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2378 bool IsFirstRange = true;
2379 for (LiveInterval::Ranges::const_iterator
2380 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2381 // If this is a split live interval with multiple ranges, it means there
2382 // are two-address instructions that re-defined the value. Only the
2383 // first def can be rematerialized!
2385 // Note ReMatOrigDefMI has already been deleted.
2386 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2387 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2388 false, vrm, rc, ReMatIds, loopInfo,
2389 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2390 MBBVRegsMap, NewLIs);
2392 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2393 Slot, 0, false, false, false,
2394 false, vrm, rc, ReMatIds, loopInfo,
2395 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2396 MBBVRegsMap, NewLIs);
2398 IsFirstRange = false;
2401 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2405 bool TrySplit = !intervalIsInOneMBB(li);
2408 bool NeedStackSlot = false;
2409 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2411 const VNInfo *VNI = *i;
2412 unsigned VN = VNI->id;
2413 if (VNI->isUnused())
2414 continue; // Dead val#.
2415 // Is the def for the val# rematerializable?
2416 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2417 ? getInstructionFromIndex(VNI->def) : 0;
2419 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2420 // Remember how to remat the def of this val#.
2421 ReMatOrigDefs[VN] = ReMatDefMI;
2422 // Original def may be modified so we have to make a copy here.
2423 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2424 CloneMIs.push_back(Clone);
2425 ReMatDefs[VN] = Clone;
2427 bool CanDelete = true;
2428 if (VNI->hasPHIKill()) {
2429 // A kill is a phi node, not all of its uses can be rematerialized.
2430 // It must not be deleted.
2432 // Need a stack slot if there is any live range where uses cannot be
2434 NeedStackSlot = true;
2437 ReMatDelete.set(VN);
2439 // Need a stack slot if there is any live range where uses cannot be
2441 NeedStackSlot = true;
2445 // One stack slot per live interval.
2446 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2447 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2448 Slot = vrm.assignVirt2StackSlot(li.reg);
2450 // This case only occurs when the prealloc splitter has already assigned
2451 // a stack slot to this vreg.
2453 Slot = vrm.getStackSlot(li.reg);
2456 // Create new intervals and rewrite defs and uses.
2457 for (LiveInterval::Ranges::const_iterator
2458 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2459 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2460 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2461 bool DefIsReMat = ReMatDefMI != NULL;
2462 bool CanDelete = ReMatDelete[I->valno->id];
2464 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2465 bool isLoad = isLoadSS ||
2466 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2467 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2468 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2469 CanDelete, vrm, rc, ReMatIds, loopInfo,
2470 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2471 MBBVRegsMap, NewLIs);
2474 // Insert spills / restores if we are splitting.
2476 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2480 SmallPtrSet<LiveInterval*, 4> AddedKill;
2481 SmallVector<unsigned, 2> Ops;
2482 if (NeedStackSlot) {
2483 int Id = SpillMBBs.find_first();
2485 std::vector<SRInfo> &spills = SpillIdxes[Id];
2486 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2487 LiveIndex index = spills[i].index;
2488 unsigned VReg = spills[i].vreg;
2489 LiveInterval &nI = getOrCreateInterval(VReg);
2490 bool isReMat = vrm.isReMaterialized(VReg);
2491 MachineInstr *MI = getInstructionFromIndex(index);
2492 bool CanFold = false;
2493 bool FoundUse = false;
2495 if (spills[i].canFold) {
2497 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2498 MachineOperand &MO = MI->getOperand(j);
2499 if (!MO.isReg() || MO.getReg() != VReg)
2506 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2507 RestoreMBBs, RestoreIdxes))) {
2508 // MI has two-address uses of the same register. If the use
2509 // isn't the first and only use in the BB, then we can't fold
2510 // it. FIXME: Move this to rewriteInstructionsForSpills.
2517 // Fold the store into the def if possible.
2518 bool Folded = false;
2519 if (CanFold && !Ops.empty()) {
2520 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2523 // Also folded uses, do not issue a load.
2524 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2525 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2527 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2531 // Otherwise tell the spiller to issue a spill.
2533 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2534 bool isKill = LR->end == getStoreIndex(index);
2535 if (!MI->registerDefIsDead(nI.reg))
2536 // No need to spill a dead def.
2537 vrm.addSpillPoint(VReg, isKill, MI);
2539 AddedKill.insert(&nI);
2542 Id = SpillMBBs.find_next(Id);
2546 int Id = RestoreMBBs.find_first();
2548 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2549 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2550 LiveIndex index = restores[i].index;
2551 if (index == LiveIndex())
2553 unsigned VReg = restores[i].vreg;
2554 LiveInterval &nI = getOrCreateInterval(VReg);
2555 bool isReMat = vrm.isReMaterialized(VReg);
2556 MachineInstr *MI = getInstructionFromIndex(index);
2557 bool CanFold = false;
2559 if (restores[i].canFold) {
2561 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2562 MachineOperand &MO = MI->getOperand(j);
2563 if (!MO.isReg() || MO.getReg() != VReg)
2567 // If this restore were to be folded, it would have been folded
2576 // Fold the load into the use if possible.
2577 bool Folded = false;
2578 if (CanFold && !Ops.empty()) {
2580 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2582 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2584 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2585 // If the rematerializable def is a load, also try to fold it.
2586 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2587 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2588 Ops, isLoadSS, LdSlot, VReg);
2590 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2592 // Re-matting an instruction with virtual register use. Add the
2593 // register as an implicit use on the use MI and update the register
2594 // interval's spill weight to HUGE_VALF to prevent it from being
2596 LiveInterval &ImpLi = getInterval(ImpUse);
2597 ImpLi.weight = HUGE_VALF;
2598 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2603 // If folding is not possible / failed, then tell the spiller to issue a
2604 // load / rematerialization for us.
2606 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2608 vrm.addRestorePoint(VReg, MI);
2610 Id = RestoreMBBs.find_next(Id);
2613 // Finalize intervals: add kills, finalize spill weights, and filter out
2615 std::vector<LiveInterval*> RetNewLIs;
2616 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2617 LiveInterval *LI = NewLIs[i];
2619 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2620 if (!AddedKill.count(LI)) {
2621 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2622 LiveIndex LastUseIdx = getBaseIndex(LR->end);
2623 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2624 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2625 assert(UseIdx != -1);
2626 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2627 LastUse->getOperand(UseIdx).setIsKill();
2628 vrm.addKillPoint(LI->reg, LastUseIdx);
2631 RetNewLIs.push_back(LI);
2635 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2639 /// hasAllocatableSuperReg - Return true if the specified physical register has
2640 /// any super register that's allocatable.
2641 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2642 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2643 if (allocatableRegs_[*AS] && hasInterval(*AS))
2648 /// getRepresentativeReg - Find the largest super register of the specified
2649 /// physical register.
2650 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2651 // Find the largest super-register that is allocatable.
2652 unsigned BestReg = Reg;
2653 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2654 unsigned SuperReg = *AS;
2655 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2663 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2664 /// specified interval that conflicts with the specified physical register.
2665 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2666 unsigned PhysReg) const {
2667 unsigned NumConflicts = 0;
2668 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2669 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2670 E = mri_->reg_end(); I != E; ++I) {
2671 MachineOperand &O = I.getOperand();
2672 MachineInstr *MI = O.getParent();
2673 LiveIndex Index = getInstructionIndex(MI);
2674 if (pli.liveAt(Index))
2677 return NumConflicts;
2680 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2681 /// around all defs and uses of the specified interval. Return true if it
2682 /// was able to cut its interval.
2683 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2684 unsigned PhysReg, VirtRegMap &vrm) {
2685 unsigned SpillReg = getRepresentativeReg(PhysReg);
2687 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2688 // If there are registers which alias PhysReg, but which are not a
2689 // sub-register of the chosen representative super register. Assert
2690 // since we can't handle it yet.
2691 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2692 tri_->isSuperRegister(*AS, SpillReg));
2695 LiveInterval &pli = getInterval(SpillReg);
2696 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2697 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2698 E = mri_->reg_end(); I != E; ++I) {
2699 MachineOperand &O = I.getOperand();
2700 MachineInstr *MI = O.getParent();
2701 if (SeenMIs.count(MI))
2704 LiveIndex Index = getInstructionIndex(MI);
2705 if (pli.liveAt(Index)) {
2706 vrm.addEmergencySpill(SpillReg, MI);
2707 LiveIndex StartIdx = getLoadIndex(Index);
2708 LiveIndex EndIdx = getNextSlot(getStoreIndex(Index));
2709 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2710 pli.removeRange(StartIdx, EndIdx);
2714 raw_string_ostream Msg(msg);
2715 Msg << "Ran out of registers during register allocation!";
2716 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2717 Msg << "\nPlease check your inline asm statement for invalid "
2718 << "constraints:\n";
2719 MI->print(Msg, tm_);
2721 llvm_report_error(Msg.str());
2723 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2724 if (!hasInterval(*AS))
2726 LiveInterval &spli = getInterval(*AS);
2727 if (spli.liveAt(Index))
2728 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2735 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2736 MachineInstr* startInst) {
2737 LiveInterval& Interval = getOrCreateInterval(reg);
2738 VNInfo* VN = Interval.getNextValue(
2739 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2740 startInst, true, getVNInfoAllocator());
2741 VN->setHasPHIKill(true);
2742 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2744 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2745 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2746 Interval.addRange(LR);