1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
59 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden);
62 STATISTIC(numIntervals , "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
65 STATISTIC(numCoalescing, "Number of early coalescing performed");
67 char LiveIntervals::ID = 0;
68 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addPreserved<LiveVariables>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreservedID(MachineLoopInfoID);
77 AU.addPreservedID(MachineDominatorsID);
80 AU.addPreservedID(PHIEliminationID);
81 AU.addRequiredID(PHIEliminationID);
84 AU.addRequiredID(TwoAddressInstructionPassID);
85 MachineFunctionPass::getAnalysisUsage(AU);
88 void LiveIntervals::releaseMemory() {
89 // Free the live intervals themselves.
90 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
91 E = r2iMap_.end(); I != E; ++I)
99 terminatorGaps.clear();
100 phiJoinCopies.clear();
102 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
112 unsigned OpIdx, const TargetInstrInfo *tii_){
113 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
114 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
118 if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
120 if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
125 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
126 /// there is one implicit_def for each use. Add isUndef marker to
127 /// implicit_def defs and their uses.
128 void LiveIntervals::processImplicitDefs() {
129 SmallSet<unsigned, 8> ImpDefRegs;
130 SmallVector<MachineInstr*, 8> ImpDefMIs;
131 MachineBasicBlock *Entry = mf_->begin();
132 SmallPtrSet<MachineBasicBlock*,16> Visited;
133 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
134 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
136 MachineBasicBlock *MBB = *DFI;
137 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
139 MachineInstr *MI = &*I;
141 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
142 unsigned Reg = MI->getOperand(0).getReg();
143 ImpDefRegs.insert(Reg);
144 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
145 for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
146 ImpDefRegs.insert(*SS);
148 ImpDefMIs.push_back(MI);
152 if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
153 MachineOperand &MO = MI->getOperand(2);
154 if (ImpDefRegs.count(MO.getReg())) {
155 // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
156 // This is an identity copy, eliminate it now.
158 LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
161 MI->eraseFromParent();
166 bool ChangedToImpDef = false;
167 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
168 MachineOperand& MO = MI->getOperand(i);
169 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
171 unsigned Reg = MO.getReg();
174 if (!ImpDefRegs.count(Reg))
176 // Use is a copy, just turn it into an implicit_def.
177 if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
178 bool isKill = MO.isKill();
179 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
180 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
181 MI->RemoveOperand(j);
183 ImpDefRegs.erase(Reg);
184 LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
187 ChangedToImpDef = true;
192 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
193 // Make sure other uses of
194 for (unsigned j = i+1; j != e; ++j) {
195 MachineOperand &MOJ = MI->getOperand(j);
196 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
199 ImpDefRegs.erase(Reg);
203 if (ChangedToImpDef) {
204 // Backtrack to process this new implicit_def.
207 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
208 MachineOperand& MO = MI->getOperand(i);
209 if (!MO.isReg() || !MO.isDef())
211 ImpDefRegs.erase(MO.getReg());
216 // Any outstanding liveout implicit_def's?
217 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
218 MachineInstr *MI = ImpDefMIs[i];
219 unsigned Reg = MI->getOperand(0).getReg();
220 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
221 !ImpDefRegs.count(Reg)) {
222 // Delete all "local" implicit_def's. That include those which define
223 // physical registers since they cannot be liveout.
224 MI->eraseFromParent();
228 // If there are multiple defs of the same register and at least one
229 // is not an implicit_def, do not insert implicit_def's before the
232 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
233 DE = mri_->def_end(); DI != DE; ++DI) {
234 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
242 // The only implicit_def which we want to keep are those that are live
244 MI->eraseFromParent();
246 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
247 UE = mri_->use_end(); UI != UE; ) {
248 MachineOperand &RMO = UI.getOperand();
249 MachineInstr *RMI = &*UI;
251 MachineBasicBlock *RMBB = RMI->getParent();
255 // Turn a copy use into an implicit_def.
256 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
257 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
259 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
260 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
261 RMI->RemoveOperand(j);
265 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
266 unsigned NewVReg = mri_->createVirtualRegister(RC);
278 void LiveIntervals::computeNumbering() {
279 Index2MiMap OldI2MI = i2miMap_;
280 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
286 terminatorGaps.clear();
287 phiJoinCopies.clear();
291 // Number MachineInstrs and MachineBasicBlocks.
292 // Initialize MBB indexes to a sentinal.
293 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
294 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
296 MachineInstrIndex MIIndex;
297 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
299 MachineInstrIndex StartIdx = MIIndex;
301 // Insert an empty slot at the beginning of each block.
302 MIIndex = getNextIndex(MIIndex);
303 i2miMap_.push_back(0);
305 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
308 if (I == MBB->getFirstTerminator()) {
309 // Leave a gap for before terminators, this is where we will point
311 MachineInstrIndex tGap(true, MIIndex);
313 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
315 "Multiple 'first' terminators encountered during numbering.");
316 inserted = inserted; // Avoid compiler warning if assertions turned off.
317 i2miMap_.push_back(0);
319 MIIndex = getNextIndex(MIIndex);
322 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
323 assert(inserted && "multiple MachineInstr -> index mappings");
325 i2miMap_.push_back(I);
326 MIIndex = getNextIndex(MIIndex);
329 // Insert max(1, numdefs) empty slots after every instruction.
330 unsigned Slots = I->getDesc().getNumDefs();
334 MIIndex = getNextIndex(MIIndex);
335 i2miMap_.push_back(0);
340 if (MBB->getFirstTerminator() == MBB->end()) {
341 // Leave a gap for before terminators, this is where we will point
343 MachineInstrIndex tGap(true, MIIndex);
345 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
347 "Multiple 'first' terminators encountered during numbering.");
348 inserted = inserted; // Avoid compiler warning if assertions turned off.
349 i2miMap_.push_back(0);
351 MIIndex = getNextIndex(MIIndex);
354 // Set the MBB2IdxMap entry for this MBB.
355 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
356 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
359 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
361 if (!OldI2MI.empty())
362 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
363 for (LiveInterval::iterator LI = OI->second->begin(),
364 LE = OI->second->end(); LI != LE; ++LI) {
366 // Remap the start index of the live range to the corresponding new
367 // number, or our best guess at what it _should_ correspond to if the
368 // original instruction has been erased. This is either the following
369 // instruction or its predecessor.
370 unsigned index = LI->start.getVecIndex();
371 MachineInstrIndex::Slot offset = LI->start.getSlot();
372 if (LI->start.isLoad()) {
373 std::vector<IdxMBBPair>::const_iterator I =
374 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
375 // Take the pair containing the index
376 std::vector<IdxMBBPair>::const_iterator J =
377 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
379 LI->start = getMBBStartIdx(J->second);
381 LI->start = MachineInstrIndex(
382 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
383 (MachineInstrIndex::Slot)offset);
386 // Remap the ending index in the same way that we remapped the start,
387 // except for the final step where we always map to the immediately
388 // following instruction.
389 index = (getPrevSlot(LI->end)).getVecIndex();
390 offset = LI->end.getSlot();
391 if (LI->end.isLoad()) {
392 // VReg dies at end of block.
393 std::vector<IdxMBBPair>::const_iterator I =
394 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
397 LI->end = getNextSlot(getMBBEndIdx(I->second));
399 unsigned idx = index;
400 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
402 if (index != OldI2MI.size())
404 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
405 (idx == index ? offset : MachineInstrIndex::LOAD));
408 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
412 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
413 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
416 // Remap the VNInfo def index, which works the same as the
417 // start indices above. VN's with special sentinel defs
418 // don't need to be remapped.
419 if (vni->isDefAccurate() && !vni->isUnused()) {
420 unsigned index = vni->def.getVecIndex();
421 MachineInstrIndex::Slot offset = vni->def.getSlot();
422 if (vni->def.isLoad()) {
423 std::vector<IdxMBBPair>::const_iterator I =
424 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
425 // Take the pair containing the index
426 std::vector<IdxMBBPair>::const_iterator J =
427 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
429 vni->def = getMBBStartIdx(J->second);
431 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
435 // Remap the VNInfo kill indices, which works the same as
436 // the end indices above.
437 for (size_t i = 0; i < vni->kills.size(); ++i) {
438 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
439 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
441 if (vni->kills[i].isLoad()) {
442 assert("Value killed at a load slot.");
443 /*std::vector<IdxMBBPair>::const_iterator I =
444 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
447 vni->kills[i] = getMBBEndIdx(I->second);*/
449 if (vni->kills[i].isPHIIndex()) {
450 std::vector<IdxMBBPair>::const_iterator I =
451 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
453 vni->kills[i] = terminatorGaps[I->second];
455 assert(OldI2MI[index] != 0 &&
456 "Kill refers to instruction not present in index maps.");
457 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
461 unsigned idx = index;
462 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
464 if (index != OldI2MI.size())
465 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
466 (idx == index ? offset : 0);
468 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
476 void LiveIntervals::scaleNumbering(int factor) {
478 // * scale MBB begin and end points
479 // * scale all ranges.
480 // * Update VNI structures.
481 // * Scale instruction numberings
483 // Scale the MBB indices.
485 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
486 MBB != MBBE; ++MBB) {
487 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
488 mbbIndices.first = mbbIndices.first.scale(factor);
489 mbbIndices.second = mbbIndices.second.scale(factor);
490 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
492 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
494 // Scale terminator gaps.
495 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
496 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
498 terminatorGaps[TGI->first] = TGI->second.scale(factor);
501 // Scale the intervals.
502 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
503 LI->second->scaleNumbering(factor);
506 // Scale MachineInstrs.
507 Mi2IndexMap oldmi2iMap = mi2iMap_;
508 MachineInstrIndex highestSlot;
509 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
511 MachineInstrIndex newSlot = MI->second.scale(factor);
512 mi2iMap_[MI->first] = newSlot;
513 highestSlot = std::max(highestSlot, newSlot);
516 unsigned highestVIndex = highestSlot.getVecIndex();
518 i2miMap_.resize(highestVIndex + 1);
519 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
521 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
527 /// runOnMachineFunction - Register allocate the whole function
529 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
531 mri_ = &mf_->getRegInfo();
532 tm_ = &fn.getTarget();
533 tri_ = tm_->getRegisterInfo();
534 tii_ = tm_->getInstrInfo();
535 aa_ = &getAnalysis<AliasAnalysis>();
536 lv_ = &getAnalysis<LiveVariables>();
537 allocatableRegs_ = tri_->getAllocatableSet(fn);
539 processImplicitDefs();
542 performEarlyCoalescing();
544 numIntervals += getNumIntervals();
550 /// print - Implement the dump method.
551 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
552 OS << "********** INTERVALS **********\n";
553 for (const_iterator I = begin(), E = end(); I != E; ++I) {
554 I->second->print(OS, tri_);
561 void LiveIntervals::printInstrs(raw_ostream &OS) const {
562 OS << "********** MACHINEINSTRS **********\n";
564 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
565 mbbi != mbbe; ++mbbi) {
566 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
567 for (MachineBasicBlock::iterator mii = mbbi->begin(),
568 mie = mbbi->end(); mii != mie; ++mii) {
569 OS << getInstructionIndex(mii) << '\t' << *mii;
574 void LiveIntervals::dumpInstrs() const {
578 /// conflictsWithPhysRegDef - Returns true if the specified register
579 /// is defined during the duration of the specified interval.
580 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
581 VirtRegMap &vrm, unsigned reg) {
582 for (LiveInterval::Ranges::const_iterator
583 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
584 for (MachineInstrIndex index = getBaseIndex(I->start),
585 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
586 index = getNextIndex(index)) {
587 // skip deleted instructions
588 while (index != end && !getInstructionFromIndex(index))
589 index = getNextIndex(index);
590 if (index == end) break;
592 MachineInstr *MI = getInstructionFromIndex(index);
593 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
594 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
595 if (SrcReg == li.reg || DstReg == li.reg)
597 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
598 MachineOperand& mop = MI->getOperand(i);
601 unsigned PhysReg = mop.getReg();
602 if (PhysReg == 0 || PhysReg == li.reg)
604 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
605 if (!vrm.hasPhys(PhysReg))
607 PhysReg = vrm.getPhys(PhysReg);
609 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
618 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
619 /// it can check use as well.
620 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
621 unsigned Reg, bool CheckUse,
622 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
623 for (LiveInterval::Ranges::const_iterator
624 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
625 for (MachineInstrIndex index = getBaseIndex(I->start),
626 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
627 index = getNextIndex(index)) {
628 // Skip deleted instructions.
629 MachineInstr *MI = 0;
630 while (index != end) {
631 MI = getInstructionFromIndex(index);
634 index = getNextIndex(index);
636 if (index == end) break;
638 if (JoinedCopies.count(MI))
640 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
641 MachineOperand& MO = MI->getOperand(i);
644 if (MO.isUse() && !CheckUse)
646 unsigned PhysReg = MO.getReg();
647 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
649 if (tri_->isSubRegister(Reg, PhysReg))
659 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
660 if (TargetRegisterInfo::isPhysicalRegister(reg))
661 errs() << tri_->getName(reg);
663 errs() << "%reg" << reg;
667 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
668 MachineBasicBlock::iterator mi,
669 MachineInstrIndex MIIdx,
672 LiveInterval &interval) {
674 errs() << "\t\tregister: ";
675 printRegName(interval.reg, tri_);
678 // Virtual registers may be defined multiple times (due to phi
679 // elimination and 2-addr elimination). Much of what we do only has to be
680 // done once for the vreg. We use an empty interval to detect the first
681 // time we see a vreg.
682 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
683 if (interval.empty()) {
684 // Get the Idx of the defining instructions.
685 MachineInstrIndex defIndex = getDefIndex(MIIdx);
686 // Earlyclobbers move back one, so that they overlap the live range
688 if (MO.isEarlyClobber())
689 defIndex = getUseIndex(MIIdx);
691 MachineInstr *CopyMI = NULL;
692 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
693 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
694 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
695 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
696 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
698 // Earlyclobbers move back one.
699 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
701 assert(ValNo->id == 0 && "First value in interval is not 0?");
703 // Loop over all of the blocks that the vreg is defined in. There are
704 // two cases we have to handle here. The most common case is a vreg
705 // whose lifetime is contained within a basic block. In this case there
706 // will be a single kill, in MBB, which comes after the definition.
707 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
708 // FIXME: what about dead vars?
709 MachineInstrIndex killIdx;
710 if (vi.Kills[0] != mi)
711 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
712 else if (MO.isEarlyClobber())
713 // Earlyclobbers that die in this instruction move up one extra, to
714 // compensate for having the starting point moved back one. This
715 // gets them to overlap the live range of other outputs.
716 killIdx = getNextSlot(getNextSlot(defIndex));
718 killIdx = getNextSlot(defIndex);
720 // If the kill happens after the definition, we have an intra-block
722 if (killIdx > defIndex) {
723 assert(vi.AliveBlocks.empty() &&
724 "Shouldn't be alive across any blocks!");
725 LiveRange LR(defIndex, killIdx, ValNo);
726 interval.addRange(LR);
727 DEBUG(errs() << " +" << LR << "\n");
728 ValNo->addKill(killIdx);
733 // The other case we handle is when a virtual register lives to the end
734 // of the defining block, potentially live across some blocks, then is
735 // live into some number of blocks, but gets killed. Start by adding a
736 // range that goes from this definition to the end of the defining block.
737 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
738 DEBUG(errs() << " +" << NewLR);
739 interval.addRange(NewLR);
741 // Iterate over all of the blocks that the variable is completely
742 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
744 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
745 E = vi.AliveBlocks.end(); I != E; ++I) {
746 LiveRange LR(getMBBStartIdx(*I),
747 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
749 interval.addRange(LR);
750 DEBUG(errs() << " +" << LR);
753 // Finally, this virtual register is live from the start of any killing
754 // block to the 'use' slot of the killing instruction.
755 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
756 MachineInstr *Kill = vi.Kills[i];
757 MachineInstrIndex killIdx =
758 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
759 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
760 interval.addRange(LR);
761 ValNo->addKill(killIdx);
762 DEBUG(errs() << " +" << LR);
766 // If this is the second time we see a virtual register definition, it
767 // must be due to phi elimination or two addr elimination. If this is
768 // the result of two address elimination, then the vreg is one of the
769 // def-and-use register operand.
770 if (mi->isRegTiedToUseOperand(MOIdx)) {
771 // If this is a two-address definition, then we have already processed
772 // the live range. The only problem is that we didn't realize there
773 // are actually two values in the live interval. Because of this we
774 // need to take the LiveRegion that defines this register and split it
776 assert(interval.containsOneValue());
777 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
778 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
779 if (MO.isEarlyClobber())
780 RedefIndex = getUseIndex(MIIdx);
782 const LiveRange *OldLR =
783 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
784 VNInfo *OldValNo = OldLR->valno;
786 // Delete the initial value, which should be short and continuous,
787 // because the 2-addr copy must be in the same MBB as the redef.
788 interval.removeRange(DefIndex, RedefIndex);
790 // Two-address vregs should always only be redefined once. This means
791 // that at this point, there should be exactly one value number in it.
792 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
794 // The new value number (#1) is defined by the instruction we claimed
796 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
797 false, // update at *
799 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
801 // Value#0 is now defined by the 2-addr instruction.
802 OldValNo->def = RedefIndex;
803 OldValNo->setCopy(0);
804 if (MO.isEarlyClobber())
805 OldValNo->setHasRedefByEC(true);
807 // Add the new live interval which replaces the range for the input copy.
808 LiveRange LR(DefIndex, RedefIndex, ValNo);
809 DEBUG(errs() << " replace range with " << LR);
810 interval.addRange(LR);
811 ValNo->addKill(RedefIndex);
813 // If this redefinition is dead, we need to add a dummy unit live
814 // range covering the def slot.
817 LiveRange(RedefIndex, MO.isEarlyClobber() ?
818 getNextSlot(getNextSlot(RedefIndex)) :
819 getNextSlot(RedefIndex), OldValNo));
822 errs() << " RESULT: ";
823 interval.print(errs(), tri_);
826 // Otherwise, this must be because of phi elimination. If this is the
827 // first redefinition of the vreg that we have seen, go back and change
828 // the live range in the PHI block to be a different value number.
829 if (interval.containsOneValue()) {
830 // Remove the old range that we now know has an incorrect number.
831 VNInfo *VNI = interval.getValNumInfo(0);
832 MachineInstr *Killer = vi.Kills[0];
833 phiJoinCopies.push_back(Killer);
834 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
835 MachineInstrIndex End =
836 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
838 errs() << " Removing [" << Start << "," << End << "] from: ";
839 interval.print(errs(), tri_);
842 interval.removeRange(Start, End);
843 assert(interval.ranges.size() == 1 &&
844 "Newly discovered PHI interval has >1 ranges.");
845 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
846 VNI->addKill(terminatorGaps[killMBB]);
847 VNI->setHasPHIKill(true);
849 errs() << " RESULT: ";
850 interval.print(errs(), tri_);
853 // Replace the interval with one of a NEW value number. Note that this
854 // value number isn't actually defined by an instruction, weird huh? :)
855 LiveRange LR(Start, End,
856 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
857 0, false, VNInfoAllocator));
858 LR.valno->setIsPHIDef(true);
859 DEBUG(errs() << " replace range with " << LR);
860 interval.addRange(LR);
861 LR.valno->addKill(End);
863 errs() << " RESULT: ";
864 interval.print(errs(), tri_);
868 // In the case of PHI elimination, each variable definition is only
869 // live until the end of the block. We've already taken care of the
870 // rest of the live range.
871 MachineInstrIndex defIndex = getDefIndex(MIIdx);
872 if (MO.isEarlyClobber())
873 defIndex = getUseIndex(MIIdx);
876 MachineInstr *CopyMI = NULL;
877 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
878 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
879 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
880 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
881 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
883 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
885 MachineInstrIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
886 LiveRange LR(defIndex, killIndex, ValNo);
887 interval.addRange(LR);
888 ValNo->addKill(terminatorGaps[mbb]);
889 ValNo->setHasPHIKill(true);
890 DEBUG(errs() << " +" << LR);
894 DEBUG(errs() << '\n');
897 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
898 MachineBasicBlock::iterator mi,
899 MachineInstrIndex MIIdx,
901 LiveInterval &interval,
902 MachineInstr *CopyMI) {
903 // A physical register cannot be live across basic block, so its
904 // lifetime must end somewhere in its defining basic block.
906 errs() << "\t\tregister: ";
907 printRegName(interval.reg, tri_);
910 MachineInstrIndex baseIndex = MIIdx;
911 MachineInstrIndex start = getDefIndex(baseIndex);
912 // Earlyclobbers move back one.
913 if (MO.isEarlyClobber())
914 start = getUseIndex(MIIdx);
915 MachineInstrIndex end = start;
917 // If it is not used after definition, it is considered dead at
918 // the instruction defining it. Hence its interval is:
919 // [defSlot(def), defSlot(def)+1)
920 // For earlyclobbers, the defSlot was pushed back one; the extra
921 // advance below compensates.
923 DEBUG(errs() << " dead");
924 if (MO.isEarlyClobber())
925 end = getNextSlot(getNextSlot(start));
927 end = getNextSlot(start);
931 // If it is not dead on definition, it must be killed by a
932 // subsequent instruction. Hence its interval is:
933 // [defSlot(def), useSlot(kill)+1)
934 baseIndex = getNextIndex(baseIndex);
935 while (++mi != MBB->end()) {
936 while (baseIndex.getVecIndex() < i2miMap_.size() &&
937 getInstructionFromIndex(baseIndex) == 0)
938 baseIndex = getNextIndex(baseIndex);
939 if (mi->killsRegister(interval.reg, tri_)) {
940 DEBUG(errs() << " killed");
941 end = getNextSlot(getUseIndex(baseIndex));
944 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
946 if (mi->isRegTiedToUseOperand(DefIdx)) {
947 // Two-address instruction.
948 end = getDefIndex(baseIndex);
949 if (mi->getOperand(DefIdx).isEarlyClobber())
950 end = getUseIndex(baseIndex);
952 // Another instruction redefines the register before it is ever read.
953 // Then the register is essentially dead at the instruction that defines
954 // it. Hence its interval is:
955 // [defSlot(def), defSlot(def)+1)
956 DEBUG(errs() << " dead");
957 end = getNextSlot(start);
963 baseIndex = getNextIndex(baseIndex);
966 // The only case we should have a dead physreg here without a killing or
967 // instruction where we know it's dead is if it is live-in to the function
968 // and never used. Another possible case is the implicit use of the
969 // physical register has been deleted by two-address pass.
970 end = getNextSlot(start);
973 assert(start < end && "did not find end of interval?");
975 // Already exists? Extend old live interval.
976 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
977 bool Extend = OldLR != interval.end();
978 VNInfo *ValNo = Extend
979 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
980 if (MO.isEarlyClobber() && Extend)
981 ValNo->setHasRedefByEC(true);
982 LiveRange LR(start, end, ValNo);
983 interval.addRange(LR);
984 LR.valno->addKill(end);
985 DEBUG(errs() << " +" << LR << '\n');
988 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
989 MachineBasicBlock::iterator MI,
990 MachineInstrIndex MIIdx,
993 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
994 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
995 getOrCreateInterval(MO.getReg()));
996 else if (allocatableRegs_[MO.getReg()]) {
997 MachineInstr *CopyMI = NULL;
998 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
999 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
1000 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1001 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
1002 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1004 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1005 getOrCreateInterval(MO.getReg()), CopyMI);
1006 // Def of a register also defines its sub-registers.
1007 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1008 // If MI also modifies the sub-register explicitly, avoid processing it
1009 // more than once. Do not pass in TRI here so it checks for exact match.
1010 if (!MI->modifiesRegister(*AS))
1011 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1012 getOrCreateInterval(*AS), 0);
1016 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1017 MachineInstrIndex MIIdx,
1018 LiveInterval &interval, bool isAlias) {
1020 errs() << "\t\tlivein register: ";
1021 printRegName(interval.reg, tri_);
1024 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1025 // be considered a livein.
1026 MachineBasicBlock::iterator mi = MBB->begin();
1027 MachineInstrIndex baseIndex = MIIdx;
1028 MachineInstrIndex start = baseIndex;
1029 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1030 getInstructionFromIndex(baseIndex) == 0)
1031 baseIndex = getNextIndex(baseIndex);
1032 MachineInstrIndex end = baseIndex;
1033 bool SeenDefUse = false;
1035 while (mi != MBB->end()) {
1036 if (mi->killsRegister(interval.reg, tri_)) {
1037 DEBUG(errs() << " killed");
1038 end = getNextSlot(getUseIndex(baseIndex));
1041 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1042 // Another instruction redefines the register before it is ever read.
1043 // Then the register is essentially dead at the instruction that defines
1044 // it. Hence its interval is:
1045 // [defSlot(def), defSlot(def)+1)
1046 DEBUG(errs() << " dead");
1047 end = getNextSlot(getDefIndex(start));
1052 baseIndex = getNextIndex(baseIndex);
1054 if (mi != MBB->end()) {
1055 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1056 getInstructionFromIndex(baseIndex) == 0)
1057 baseIndex = getNextIndex(baseIndex);
1061 // Live-in register might not be used at all.
1064 DEBUG(errs() << " dead");
1065 end = getNextSlot(getDefIndex(MIIdx));
1067 DEBUG(errs() << " live through");
1073 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1074 0, false, VNInfoAllocator);
1075 vni->setIsPHIDef(true);
1076 LiveRange LR(start, end, vni);
1078 interval.addRange(LR);
1079 LR.valno->addKill(end);
1080 DEBUG(errs() << " +" << LR << '\n');
1084 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1085 SmallVector<MachineInstr*,16> &IdentCopies,
1086 SmallVector<MachineInstr*,16> &OtherCopies) {
1087 bool HaveConflict = false;
1088 unsigned NumIdent = 0;
1089 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(SrcInt.reg),
1090 re = mri_->reg_end(); ri != re; ++ri) {
1091 MachineOperand &O = ri.getOperand();
1095 MachineInstr *MI = &*ri;
1096 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1097 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1099 if (SrcReg != DstInt.reg) {
1100 OtherCopies.push_back(MI);
1101 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1103 IdentCopies.push_back(MI);
1109 return false; // Let coalescer handle it
1110 return IdentCopies.size() > OtherCopies.size();
1113 void LiveIntervals::performEarlyCoalescing() {
1114 if (!EarlyCoalescing)
1117 /// Perform early coalescing: eliminate copies which feed into phi joins
1118 /// and whose sources are defined by the phi joins.
1119 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1120 MachineInstr *Join = phiJoinCopies[i];
1121 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1124 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1125 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1127 assert(isMove && "PHI join instruction must be a move!");
1132 LiveInterval &DstInt = getInterval(PHIDst);
1133 LiveInterval &SrcInt = getInterval(PHISrc);
1134 SmallVector<MachineInstr*, 16> IdentCopies;
1135 SmallVector<MachineInstr*, 16> OtherCopies;
1136 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1139 DEBUG(errs() << "PHI Join: " << *Join);
1140 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1141 VNInfo *VNI = DstInt.getValNumInfo(0);
1143 // Change the non-identity copies to directly target the phi destination.
1144 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1145 MachineInstr *PHICopy = OtherCopies[i];
1146 DEBUG(errs() << "Moving: " << *PHICopy);
1148 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1149 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1150 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1151 MachineInstrIndex StartIndex = SLR->start;
1152 MachineInstrIndex EndIndex = SLR->end;
1154 // Delete val# defined by the now identity copy and add the range from
1155 // beginning of the mbb to the end of the range.
1156 SrcInt.removeValNo(SLR->valno);
1157 DEBUG(errs() << " added range [" << StartIndex << ','
1158 << EndIndex << "] to reg" << DstInt.reg << '\n');
1159 if (DstInt.liveAt(StartIndex))
1160 DstInt.removeRange(StartIndex, EndIndex);
1161 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1163 NewVNI->setHasPHIKill(true);
1164 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1165 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1166 MachineOperand &MO = PHICopy->getOperand(j);
1167 if (!MO.isReg() || MO.getReg() != PHISrc)
1173 // Now let's eliminate all the would-be identity copies.
1174 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1175 MachineInstr *PHICopy = IdentCopies[i];
1176 DEBUG(errs() << "Coalescing: " << *PHICopy);
1178 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1179 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1180 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1181 MachineInstrIndex StartIndex = SLR->start;
1182 MachineInstrIndex EndIndex = SLR->end;
1184 // Delete val# defined by the now identity copy and add the range from
1185 // beginning of the mbb to the end of the range.
1186 SrcInt.removeValNo(SLR->valno);
1187 RemoveMachineInstrFromMaps(PHICopy);
1188 PHICopy->eraseFromParent();
1189 DEBUG(errs() << " added range [" << StartIndex << ','
1190 << EndIndex << "] to reg" << DstInt.reg << '\n');
1191 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1194 // Remove the phi join and update the phi block liveness.
1195 MachineInstrIndex MIIndex = getInstructionIndex(Join);
1196 MachineInstrIndex UseIndex = getUseIndex(MIIndex);
1197 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1198 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1199 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1200 DLR->valno->setCopy(0);
1201 DLR->valno->setIsDefAccurate(false);
1202 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1203 SrcInt.removeRange(SLR->start, SLR->end);
1204 assert(SrcInt.empty());
1205 removeInterval(PHISrc);
1206 RemoveMachineInstrFromMaps(Join);
1207 Join->eraseFromParent();
1213 /// computeIntervals - computes the live intervals for virtual
1214 /// registers. for some ordering of the machine instructions [1,N] a
1215 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1216 /// which a variable is live
1217 void LiveIntervals::computeIntervals() {
1218 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1219 << "********** Function: "
1220 << ((Value*)mf_->getFunction())->getName() << '\n');
1222 SmallVector<unsigned, 8> UndefUses;
1223 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1224 MBBI != E; ++MBBI) {
1225 MachineBasicBlock *MBB = MBBI;
1226 // Track the index of the current machine instr.
1227 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1228 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1230 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1232 // Create intervals for live-ins to this BB first.
1233 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1234 LE = MBB->livein_end(); LI != LE; ++LI) {
1235 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1236 // Multiple live-ins can alias the same register.
1237 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1238 if (!hasInterval(*AS))
1239 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1243 // Skip over empty initial indices.
1244 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1245 getInstructionFromIndex(MIIndex) == 0)
1246 MIIndex = getNextIndex(MIIndex);
1248 for (; MI != miEnd; ++MI) {
1249 DEBUG(errs() << MIIndex << "\t" << *MI);
1252 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1253 MachineOperand &MO = MI->getOperand(i);
1254 if (!MO.isReg() || !MO.getReg())
1257 // handle register defs - build intervals
1259 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1260 else if (MO.isUndef())
1261 UndefUses.push_back(MO.getReg());
1264 // Skip over the empty slots after each instruction.
1265 unsigned Slots = MI->getDesc().getNumDefs();
1270 MIIndex = getNextIndex(MIIndex);
1272 // Skip over empty indices.
1273 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1274 getInstructionFromIndex(MIIndex) == 0)
1275 MIIndex = getNextIndex(MIIndex);
1279 // Create empty intervals for registers defined by implicit_def's (except
1280 // for those implicit_def that define values which are liveout of their
1282 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1283 unsigned UndefReg = UndefUses[i];
1284 (void)getOrCreateInterval(UndefReg);
1288 bool LiveIntervals::findLiveInMBBs(
1289 MachineInstrIndex Start, MachineInstrIndex End,
1290 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1291 std::vector<IdxMBBPair>::const_iterator I =
1292 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1294 bool ResVal = false;
1295 while (I != Idx2MBBMap.end()) {
1296 if (I->first >= End)
1298 MBBs.push_back(I->second);
1305 bool LiveIntervals::findReachableMBBs(
1306 MachineInstrIndex Start, MachineInstrIndex End,
1307 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1308 std::vector<IdxMBBPair>::const_iterator I =
1309 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1311 bool ResVal = false;
1312 while (I != Idx2MBBMap.end()) {
1315 MachineBasicBlock *MBB = I->second;
1316 if (getMBBEndIdx(MBB) > End)
1318 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1319 SE = MBB->succ_end(); SI != SE; ++SI)
1320 MBBs.push_back(*SI);
1327 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1328 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1329 return new LiveInterval(reg, Weight);
1332 /// dupInterval - Duplicate a live interval. The caller is responsible for
1333 /// managing the allocated memory.
1334 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1335 LiveInterval *NewLI = createInterval(li->reg);
1336 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1340 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1341 /// copy field and returns the source register that defines it.
1342 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1343 if (!VNI->getCopy())
1346 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1347 // If it's extracting out of a physical register, return the sub-register.
1348 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1349 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1350 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1352 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1353 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1354 return VNI->getCopy()->getOperand(2).getReg();
1356 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1357 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1359 llvm_unreachable("Unrecognized copy instruction!");
1363 //===----------------------------------------------------------------------===//
1364 // Register allocator hooks.
1367 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1368 /// allow one) virtual register operand, then its uses are implicitly using
1369 /// the register. Returns the virtual register.
1370 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1371 MachineInstr *MI) const {
1373 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1374 MachineOperand &MO = MI->getOperand(i);
1375 if (!MO.isReg() || !MO.isUse())
1377 unsigned Reg = MO.getReg();
1378 if (Reg == 0 || Reg == li.reg)
1381 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1382 !allocatableRegs_[Reg])
1384 // FIXME: For now, only remat MI with at most one register operand.
1386 "Can't rematerialize instruction with multiple register operand!");
1387 RegOp = MO.getReg();
1395 /// isValNoAvailableAt - Return true if the val# of the specified interval
1396 /// which reaches the given instruction also reaches the specified use index.
1397 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1398 MachineInstrIndex UseIdx) const {
1399 MachineInstrIndex Index = getInstructionIndex(MI);
1400 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1401 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1402 return UI != li.end() && UI->valno == ValNo;
1405 /// isReMaterializable - Returns true if the definition MI of the specified
1406 /// val# of the specified interval is re-materializable.
1407 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1408 const VNInfo *ValNo, MachineInstr *MI,
1409 SmallVectorImpl<LiveInterval*> &SpillIs,
1414 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1418 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1419 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1420 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1421 // this but remember this is not safe to fold into a two-address
1423 // This is a load from fixed stack slot. It can be rematerialized.
1426 // If the target-specific rules don't identify an instruction as
1427 // being trivially rematerializable, use some target-independent
1429 if (!MI->getDesc().isRematerializable() ||
1430 !tii_->isTriviallyReMaterializable(MI)) {
1431 if (!EnableAggressiveRemat)
1434 // If the instruction accesses memory but the memoperands have been lost,
1435 // we can't analyze it.
1436 const TargetInstrDesc &TID = MI->getDesc();
1437 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1440 // Avoid instructions obviously unsafe for remat.
1441 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1444 // If the instruction accesses memory and the memory could be non-constant,
1445 // assume the instruction is not rematerializable.
1446 for (std::list<MachineMemOperand>::const_iterator
1447 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1448 const MachineMemOperand &MMO = *I;
1449 if (MMO.isVolatile() || MMO.isStore())
1451 const Value *V = MMO.getValue();
1454 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1455 if (!PSV->isConstant(mf_->getFrameInfo()))
1457 } else if (!aa_->pointsToConstantMemory(V))
1461 // If any of the registers accessed are non-constant, conservatively assume
1462 // the instruction is not rematerializable.
1463 unsigned ImpUse = 0;
1464 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1465 const MachineOperand &MO = MI->getOperand(i);
1467 unsigned Reg = MO.getReg();
1470 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1473 // Only allow one def, and that in the first operand.
1474 if (MO.isDef() != (i == 0))
1477 // Only allow constant-valued registers.
1478 bool IsLiveIn = mri_->isLiveIn(Reg);
1479 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1480 E = mri_->def_end();
1482 // For the def, it should be the only def of that register.
1483 if (MO.isDef() && (next(I) != E || IsLiveIn))
1487 // Only allow one use other register use, as that's all the
1488 // remat mechanisms support currently.
1489 if (Reg != li.reg) {
1492 else if (Reg != ImpUse)
1495 // For the use, there should be only one associated def.
1496 if (I != E && (next(I) != E || IsLiveIn))
1503 unsigned ImpUse = getReMatImplicitUse(li, MI);
1505 const LiveInterval &ImpLi = getInterval(ImpUse);
1506 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1507 re = mri_->use_end(); ri != re; ++ri) {
1508 MachineInstr *UseMI = &*ri;
1509 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1510 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1512 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1516 // If a register operand of the re-materialized instruction is going to
1517 // be spilled next, then it's not legal to re-materialize this instruction.
1518 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1519 if (ImpUse == SpillIs[i]->reg)
1525 /// isReMaterializable - Returns true if the definition MI of the specified
1526 /// val# of the specified interval is re-materializable.
1527 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1528 const VNInfo *ValNo, MachineInstr *MI) {
1529 SmallVector<LiveInterval*, 4> Dummy1;
1531 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1534 /// isReMaterializable - Returns true if every definition of MI of every
1535 /// val# of the specified interval is re-materializable.
1536 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1537 SmallVectorImpl<LiveInterval*> &SpillIs,
1540 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1542 const VNInfo *VNI = *i;
1543 if (VNI->isUnused())
1544 continue; // Dead val#.
1545 // Is the def for the val# rematerializable?
1546 if (!VNI->isDefAccurate())
1548 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1549 bool DefIsLoad = false;
1551 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1553 isLoad |= DefIsLoad;
1558 /// FilterFoldedOps - Filter out two-address use operands. Return
1559 /// true if it finds any issue with the operands that ought to prevent
1561 static bool FilterFoldedOps(MachineInstr *MI,
1562 SmallVector<unsigned, 2> &Ops,
1564 SmallVector<unsigned, 2> &FoldOps) {
1566 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1567 unsigned OpIdx = Ops[i];
1568 MachineOperand &MO = MI->getOperand(OpIdx);
1569 // FIXME: fold subreg use.
1573 MRInfo |= (unsigned)VirtRegMap::isMod;
1575 // Filter out two-address use operand(s).
1576 if (MI->isRegTiedToDefOperand(OpIdx)) {
1577 MRInfo = VirtRegMap::isModRef;
1580 MRInfo |= (unsigned)VirtRegMap::isRef;
1582 FoldOps.push_back(OpIdx);
1588 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1589 /// slot / to reg or any rematerialized load into ith operand of specified
1590 /// MI. If it is successul, MI is updated with the newly created MI and
1592 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1593 VirtRegMap &vrm, MachineInstr *DefMI,
1594 MachineInstrIndex InstrIdx,
1595 SmallVector<unsigned, 2> &Ops,
1596 bool isSS, int Slot, unsigned Reg) {
1597 // If it is an implicit def instruction, just delete it.
1598 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1599 RemoveMachineInstrFromMaps(MI);
1600 vrm.RemoveMachineInstrFromMaps(MI);
1601 MI->eraseFromParent();
1606 // Filter the list of operand indexes that are to be folded. Abort if
1607 // any operand will prevent folding.
1608 unsigned MRInfo = 0;
1609 SmallVector<unsigned, 2> FoldOps;
1610 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1613 // The only time it's safe to fold into a two address instruction is when
1614 // it's folding reload and spill from / into a spill stack slot.
1615 if (DefMI && (MRInfo & VirtRegMap::isMod))
1618 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1619 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1621 // Remember this instruction uses the spill slot.
1622 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1624 // Attempt to fold the memory reference into the instruction. If
1625 // we can do this, we don't need to insert spill code.
1626 MachineBasicBlock &MBB = *MI->getParent();
1627 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1628 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1629 vrm.transferSpillPts(MI, fmi);
1630 vrm.transferRestorePts(MI, fmi);
1631 vrm.transferEmergencySpills(MI, fmi);
1633 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1634 mi2iMap_[fmi] = InstrIdx;
1635 MI = MBB.insert(MBB.erase(MI), fmi);
1642 /// canFoldMemoryOperand - Returns true if the specified load / store
1643 /// folding is possible.
1644 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1645 SmallVector<unsigned, 2> &Ops,
1647 // Filter the list of operand indexes that are to be folded. Abort if
1648 // any operand will prevent folding.
1649 unsigned MRInfo = 0;
1650 SmallVector<unsigned, 2> FoldOps;
1651 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1654 // It's only legal to remat for a use, not a def.
1655 if (ReMat && (MRInfo & VirtRegMap::isMod))
1658 return tii_->canFoldMemoryOperand(MI, FoldOps);
1661 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1662 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1663 for (LiveInterval::Ranges::const_iterator
1664 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1665 std::vector<IdxMBBPair>::const_iterator II =
1666 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1667 if (II == Idx2MBBMap.end())
1669 if (I->end > II->first) // crossing a MBB.
1671 MBBs.insert(II->second);
1672 if (MBBs.size() > 1)
1678 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1679 /// interval on to-be re-materialized operands of MI) with new register.
1680 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1681 MachineInstr *MI, unsigned NewVReg,
1683 // There is an implicit use. That means one of the other operand is
1684 // being remat'ed and the remat'ed instruction has li.reg as an
1685 // use operand. Make sure we rewrite that as well.
1686 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1687 MachineOperand &MO = MI->getOperand(i);
1690 unsigned Reg = MO.getReg();
1691 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1693 if (!vrm.isReMaterialized(Reg))
1695 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1696 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1698 UseMO->setReg(NewVReg);
1702 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1703 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1704 bool LiveIntervals::
1705 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1706 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1708 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1709 unsigned Slot, int LdSlot,
1710 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1712 const TargetRegisterClass* rc,
1713 SmallVector<int, 4> &ReMatIds,
1714 const MachineLoopInfo *loopInfo,
1715 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1716 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1717 std::vector<LiveInterval*> &NewLIs) {
1718 bool CanFold = false;
1720 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1721 MachineOperand& mop = MI->getOperand(i);
1724 unsigned Reg = mop.getReg();
1725 unsigned RegI = Reg;
1726 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1731 bool TryFold = !DefIsReMat;
1732 bool FoldSS = true; // Default behavior unless it's a remat.
1733 int FoldSlot = Slot;
1735 // If this is the rematerializable definition MI itself and
1736 // all of its uses are rematerialized, simply delete it.
1737 if (MI == ReMatOrigDefMI && CanDelete) {
1738 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1740 RemoveMachineInstrFromMaps(MI);
1741 vrm.RemoveMachineInstrFromMaps(MI);
1742 MI->eraseFromParent();
1746 // If def for this use can't be rematerialized, then try folding.
1747 // If def is rematerializable and it's a load, also try folding.
1748 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1750 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1756 // Scan all of the operands of this instruction rewriting operands
1757 // to use NewVReg instead of li.reg as appropriate. We do this for
1760 // 1. If the instr reads the same spilled vreg multiple times, we
1761 // want to reuse the NewVReg.
1762 // 2. If the instr is a two-addr instruction, we are required to
1763 // keep the src/dst regs pinned.
1765 // Keep track of whether we replace a use and/or def so that we can
1766 // create the spill interval with the appropriate range.
1768 HasUse = mop.isUse();
1769 HasDef = mop.isDef();
1770 SmallVector<unsigned, 2> Ops;
1772 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1773 const MachineOperand &MOj = MI->getOperand(j);
1776 unsigned RegJ = MOj.getReg();
1777 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1781 if (!MOj.isUndef()) {
1782 HasUse |= MOj.isUse();
1783 HasDef |= MOj.isDef();
1788 // Create a new virtual register for the spill interval.
1789 // Create the new register now so we can map the fold instruction
1790 // to the new register so when it is unfolded we get the correct
1792 bool CreatedNewVReg = false;
1794 NewVReg = mri_->createVirtualRegister(rc);
1796 CreatedNewVReg = true;
1802 // Do not fold load / store here if we are splitting. We'll find an
1803 // optimal point to insert a load / store later.
1805 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1806 Ops, FoldSS, FoldSlot, NewVReg)) {
1807 // Folding the load/store can completely change the instruction in
1808 // unpredictable ways, rescan it from the beginning.
1811 // We need to give the new vreg the same stack slot as the
1812 // spilled interval.
1813 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1819 if (isNotInMIMap(MI))
1821 goto RestartInstruction;
1824 // We'll try to fold it later if it's profitable.
1825 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1829 mop.setReg(NewVReg);
1830 if (mop.isImplicit())
1831 rewriteImplicitOps(li, MI, NewVReg, vrm);
1833 // Reuse NewVReg for other reads.
1834 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1835 MachineOperand &mopj = MI->getOperand(Ops[j]);
1836 mopj.setReg(NewVReg);
1837 if (mopj.isImplicit())
1838 rewriteImplicitOps(li, MI, NewVReg, vrm);
1841 if (CreatedNewVReg) {
1843 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1844 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1845 // Each valnum may have its own remat id.
1846 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1848 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1850 if (!CanDelete || (HasUse && HasDef)) {
1851 // If this is a two-addr instruction then its use operands are
1852 // rematerializable but its def is not. It should be assigned a
1854 vrm.assignVirt2StackSlot(NewVReg, Slot);
1857 vrm.assignVirt2StackSlot(NewVReg, Slot);
1859 } else if (HasUse && HasDef &&
1860 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1861 // If this interval hasn't been assigned a stack slot (because earlier
1862 // def is a deleted remat def), do it now.
1863 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1864 vrm.assignVirt2StackSlot(NewVReg, Slot);
1867 // Re-matting an instruction with virtual register use. Add the
1868 // register as an implicit use on the use MI.
1869 if (DefIsReMat && ImpUse)
1870 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1872 // Create a new register interval for this spill / remat.
1873 LiveInterval &nI = getOrCreateInterval(NewVReg);
1874 if (CreatedNewVReg) {
1875 NewLIs.push_back(&nI);
1876 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1878 vrm.setIsSplitFromReg(NewVReg, li.reg);
1882 if (CreatedNewVReg) {
1883 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1884 nI.getNextValue(MachineInstrIndex(), 0, false,
1886 DEBUG(errs() << " +" << LR);
1889 // Extend the split live interval to this def / use.
1890 MachineInstrIndex End = getNextSlot(getUseIndex(index));
1891 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1892 nI.getValNumInfo(nI.getNumValNums()-1));
1893 DEBUG(errs() << " +" << LR);
1898 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1899 nI.getNextValue(MachineInstrIndex(), 0, false,
1901 DEBUG(errs() << " +" << LR);
1906 errs() << "\t\t\t\tAdded new interval: ";
1907 nI.print(errs(), tri_);
1913 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1915 MachineBasicBlock *MBB,
1916 MachineInstrIndex Idx) const {
1917 MachineInstrIndex End = getMBBEndIdx(MBB);
1918 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1919 if (VNI->kills[j].isPHIIndex())
1922 MachineInstrIndex KillIdx = VNI->kills[j];
1923 if (KillIdx > Idx && KillIdx < End)
1929 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1930 /// during spilling.
1932 struct RewriteInfo {
1933 MachineInstrIndex Index;
1937 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1938 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1941 struct RewriteInfoCompare {
1942 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1943 return LHS.Index < RHS.Index;
1948 void LiveIntervals::
1949 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1950 LiveInterval::Ranges::const_iterator &I,
1951 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1952 unsigned Slot, int LdSlot,
1953 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1955 const TargetRegisterClass* rc,
1956 SmallVector<int, 4> &ReMatIds,
1957 const MachineLoopInfo *loopInfo,
1958 BitVector &SpillMBBs,
1959 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1960 BitVector &RestoreMBBs,
1961 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1962 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1963 std::vector<LiveInterval*> &NewLIs) {
1964 bool AllCanFold = true;
1965 unsigned NewVReg = 0;
1966 MachineInstrIndex start = getBaseIndex(I->start);
1967 MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1969 // First collect all the def / use in this live range that will be rewritten.
1970 // Make sure they are sorted according to instruction index.
1971 std::vector<RewriteInfo> RewriteMIs;
1972 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1973 re = mri_->reg_end(); ri != re; ) {
1974 MachineInstr *MI = &*ri;
1975 MachineOperand &O = ri.getOperand();
1977 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1978 MachineInstrIndex index = getInstructionIndex(MI);
1979 if (index < start || index >= end)
1983 // Must be defined by an implicit def. It should not be spilled. Note,
1984 // this is for correctness reason. e.g.
1985 // 8 %reg1024<def> = IMPLICIT_DEF
1986 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1987 // The live range [12, 14) are not part of the r1024 live interval since
1988 // it's defined by an implicit def. It will not conflicts with live
1989 // interval of r1025. Now suppose both registers are spilled, you can
1990 // easily see a situation where both registers are reloaded before
1991 // the INSERT_SUBREG and both target registers that would overlap.
1993 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1995 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1997 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1998 // Now rewrite the defs and uses.
1999 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
2000 RewriteInfo &rwi = RewriteMIs[i];
2002 MachineInstrIndex index = rwi.Index;
2003 bool MIHasUse = rwi.HasUse;
2004 bool MIHasDef = rwi.HasDef;
2005 MachineInstr *MI = rwi.MI;
2006 // If MI def and/or use the same register multiple times, then there
2007 // are multiple entries.
2008 unsigned NumUses = MIHasUse;
2009 while (i != e && RewriteMIs[i].MI == MI) {
2010 assert(RewriteMIs[i].Index == index);
2011 bool isUse = RewriteMIs[i].HasUse;
2012 if (isUse) ++NumUses;
2014 MIHasDef |= RewriteMIs[i].HasDef;
2017 MachineBasicBlock *MBB = MI->getParent();
2019 if (ImpUse && MI != ReMatDefMI) {
2020 // Re-matting an instruction with virtual register use. Update the
2021 // register interval's spill weight to HUGE_VALF to prevent it from
2023 LiveInterval &ImpLi = getInterval(ImpUse);
2024 ImpLi.weight = HUGE_VALF;
2027 unsigned MBBId = MBB->getNumber();
2028 unsigned ThisVReg = 0;
2030 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2031 if (NVI != MBBVRegsMap.end()) {
2032 ThisVReg = NVI->second;
2039 // It's better to start a new interval to avoid artifically
2040 // extend the new interval.
2041 if (MIHasDef && !MIHasUse) {
2042 MBBVRegsMap.erase(MBB->getNumber());
2048 bool IsNew = ThisVReg == 0;
2050 // This ends the previous live interval. If all of its def / use
2051 // can be folded, give it a low spill weight.
2052 if (NewVReg && TrySplit && AllCanFold) {
2053 LiveInterval &nI = getOrCreateInterval(NewVReg);
2060 bool HasDef = false;
2061 bool HasUse = false;
2062 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2063 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2064 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2065 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2066 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2067 if (!HasDef && !HasUse)
2070 AllCanFold &= CanFold;
2072 // Update weight of spill interval.
2073 LiveInterval &nI = getOrCreateInterval(NewVReg);
2075 // The spill weight is now infinity as it cannot be spilled again.
2076 nI.weight = HUGE_VALF;
2080 // Keep track of the last def and first use in each MBB.
2082 if (MI != ReMatOrigDefMI || !CanDelete) {
2083 bool HasKill = false;
2085 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2087 // If this is a two-address code, then this index starts a new VNInfo.
2088 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2090 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2092 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2093 SpillIdxes.find(MBBId);
2095 if (SII == SpillIdxes.end()) {
2096 std::vector<SRInfo> S;
2097 S.push_back(SRInfo(index, NewVReg, true));
2098 SpillIdxes.insert(std::make_pair(MBBId, S));
2099 } else if (SII->second.back().vreg != NewVReg) {
2100 SII->second.push_back(SRInfo(index, NewVReg, true));
2101 } else if (index > SII->second.back().index) {
2102 // If there is an earlier def and this is a two-address
2103 // instruction, then it's not possible to fold the store (which
2104 // would also fold the load).
2105 SRInfo &Info = SII->second.back();
2107 Info.canFold = !HasUse;
2109 SpillMBBs.set(MBBId);
2110 } else if (SII != SpillIdxes.end() &&
2111 SII->second.back().vreg == NewVReg &&
2112 index > SII->second.back().index) {
2113 // There is an earlier def that's not killed (must be two-address).
2114 // The spill is no longer needed.
2115 SII->second.pop_back();
2116 if (SII->second.empty()) {
2117 SpillIdxes.erase(MBBId);
2118 SpillMBBs.reset(MBBId);
2125 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2126 SpillIdxes.find(MBBId);
2127 if (SII != SpillIdxes.end() &&
2128 SII->second.back().vreg == NewVReg &&
2129 index > SII->second.back().index)
2130 // Use(s) following the last def, it's not safe to fold the spill.
2131 SII->second.back().canFold = false;
2132 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2133 RestoreIdxes.find(MBBId);
2134 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2135 // If we are splitting live intervals, only fold if it's the first
2136 // use and there isn't another use later in the MBB.
2137 RII->second.back().canFold = false;
2139 // Only need a reload if there isn't an earlier def / use.
2140 if (RII == RestoreIdxes.end()) {
2141 std::vector<SRInfo> Infos;
2142 Infos.push_back(SRInfo(index, NewVReg, true));
2143 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2145 RII->second.push_back(SRInfo(index, NewVReg, true));
2147 RestoreMBBs.set(MBBId);
2151 // Update spill weight.
2152 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2153 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2156 if (NewVReg && TrySplit && AllCanFold) {
2157 // If all of its def / use can be folded, give it a low spill weight.
2158 LiveInterval &nI = getOrCreateInterval(NewVReg);
2163 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
2164 unsigned vr, BitVector &RestoreMBBs,
2165 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2166 if (!RestoreMBBs[Id])
2168 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2169 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2170 if (Restores[i].index == index &&
2171 Restores[i].vreg == vr &&
2172 Restores[i].canFold)
2177 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2178 unsigned vr, BitVector &RestoreMBBs,
2179 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2180 if (!RestoreMBBs[Id])
2182 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2183 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2184 if (Restores[i].index == index && Restores[i].vreg)
2185 Restores[i].index = MachineInstrIndex();
2188 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2189 /// spilled and create empty intervals for their uses.
2191 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2192 const TargetRegisterClass* rc,
2193 std::vector<LiveInterval*> &NewLIs) {
2194 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2195 re = mri_->reg_end(); ri != re; ) {
2196 MachineOperand &O = ri.getOperand();
2197 MachineInstr *MI = &*ri;
2200 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2201 "Register def was not rewritten?");
2202 RemoveMachineInstrFromMaps(MI);
2203 vrm.RemoveMachineInstrFromMaps(MI);
2204 MI->eraseFromParent();
2206 // This must be an use of an implicit_def so it's not part of the live
2207 // interval. Create a new empty live interval for it.
2208 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2209 unsigned NewVReg = mri_->createVirtualRegister(rc);
2211 vrm.setIsImplicitlyDefined(NewVReg);
2212 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2213 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2214 MachineOperand &MO = MI->getOperand(i);
2215 if (MO.isReg() && MO.getReg() == li.reg) {
2224 std::vector<LiveInterval*> LiveIntervals::
2225 addIntervalsForSpillsFast(const LiveInterval &li,
2226 const MachineLoopInfo *loopInfo,
2228 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2230 std::vector<LiveInterval*> added;
2232 assert(li.weight != HUGE_VALF &&
2233 "attempt to spill already spilled interval!");
2236 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2241 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2243 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2244 while (RI != mri_->reg_end()) {
2245 MachineInstr* MI = &*RI;
2247 SmallVector<unsigned, 2> Indices;
2248 bool HasUse = false;
2249 bool HasDef = false;
2251 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2252 MachineOperand& mop = MI->getOperand(i);
2253 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2255 HasUse |= MI->getOperand(i).isUse();
2256 HasDef |= MI->getOperand(i).isDef();
2258 Indices.push_back(i);
2261 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2262 Indices, true, slot, li.reg)) {
2263 unsigned NewVReg = mri_->createVirtualRegister(rc);
2265 vrm.assignVirt2StackSlot(NewVReg, slot);
2267 // create a new register for this spill
2268 LiveInterval &nI = getOrCreateInterval(NewVReg);
2270 // the spill weight is now infinity as it
2271 // cannot be spilled again
2272 nI.weight = HUGE_VALF;
2274 // Rewrite register operands to use the new vreg.
2275 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2276 E = Indices.end(); I != E; ++I) {
2277 MI->getOperand(*I).setReg(NewVReg);
2279 if (MI->getOperand(*I).isUse())
2280 MI->getOperand(*I).setIsKill(true);
2283 // Fill in the new live interval.
2284 MachineInstrIndex index = getInstructionIndex(MI);
2286 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2287 nI.getNextValue(MachineInstrIndex(), 0, false,
2288 getVNInfoAllocator()));
2289 DEBUG(errs() << " +" << LR);
2291 vrm.addRestorePoint(NewVReg, MI);
2294 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2295 nI.getNextValue(MachineInstrIndex(), 0, false,
2296 getVNInfoAllocator()));
2297 DEBUG(errs() << " +" << LR);
2299 vrm.addSpillPoint(NewVReg, true, MI);
2302 added.push_back(&nI);
2305 errs() << "\t\t\t\tadded new interval: ";
2312 RI = mri_->reg_begin(li.reg);
2318 std::vector<LiveInterval*> LiveIntervals::
2319 addIntervalsForSpills(const LiveInterval &li,
2320 SmallVectorImpl<LiveInterval*> &SpillIs,
2321 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2323 if (EnableFastSpilling)
2324 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2326 assert(li.weight != HUGE_VALF &&
2327 "attempt to spill already spilled interval!");
2330 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2331 li.print(errs(), tri_);
2335 // Each bit specify whether a spill is required in the MBB.
2336 BitVector SpillMBBs(mf_->getNumBlockIDs());
2337 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2338 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2339 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2340 DenseMap<unsigned,unsigned> MBBVRegsMap;
2341 std::vector<LiveInterval*> NewLIs;
2342 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2344 unsigned NumValNums = li.getNumValNums();
2345 SmallVector<MachineInstr*, 4> ReMatDefs;
2346 ReMatDefs.resize(NumValNums, NULL);
2347 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2348 ReMatOrigDefs.resize(NumValNums, NULL);
2349 SmallVector<int, 4> ReMatIds;
2350 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2351 BitVector ReMatDelete(NumValNums);
2352 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2354 // Spilling a split live interval. It cannot be split any further. Also,
2355 // it's also guaranteed to be a single val# / range interval.
2356 if (vrm.getPreSplitReg(li.reg)) {
2357 vrm.setIsSplitFromReg(li.reg, 0);
2358 // Unset the split kill marker on the last use.
2359 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2360 if (KillIdx != MachineInstrIndex()) {
2361 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2362 assert(KillMI && "Last use disappeared?");
2363 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2364 assert(KillOp != -1 && "Last use disappeared?");
2365 KillMI->getOperand(KillOp).setIsKill(false);
2367 vrm.removeKillPoint(li.reg);
2368 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2369 Slot = vrm.getStackSlot(li.reg);
2370 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2371 MachineInstr *ReMatDefMI = DefIsReMat ?
2372 vrm.getReMaterializedMI(li.reg) : NULL;
2374 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2375 bool isLoad = isLoadSS ||
2376 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2377 bool IsFirstRange = true;
2378 for (LiveInterval::Ranges::const_iterator
2379 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2380 // If this is a split live interval with multiple ranges, it means there
2381 // are two-address instructions that re-defined the value. Only the
2382 // first def can be rematerialized!
2384 // Note ReMatOrigDefMI has already been deleted.
2385 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2386 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2387 false, vrm, rc, ReMatIds, loopInfo,
2388 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2389 MBBVRegsMap, NewLIs);
2391 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2392 Slot, 0, false, false, false,
2393 false, vrm, rc, ReMatIds, loopInfo,
2394 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2395 MBBVRegsMap, NewLIs);
2397 IsFirstRange = false;
2400 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2404 bool TrySplit = !intervalIsInOneMBB(li);
2407 bool NeedStackSlot = false;
2408 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2410 const VNInfo *VNI = *i;
2411 unsigned VN = VNI->id;
2412 if (VNI->isUnused())
2413 continue; // Dead val#.
2414 // Is the def for the val# rematerializable?
2415 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2416 ? getInstructionFromIndex(VNI->def) : 0;
2418 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2419 // Remember how to remat the def of this val#.
2420 ReMatOrigDefs[VN] = ReMatDefMI;
2421 // Original def may be modified so we have to make a copy here.
2422 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2423 CloneMIs.push_back(Clone);
2424 ReMatDefs[VN] = Clone;
2426 bool CanDelete = true;
2427 if (VNI->hasPHIKill()) {
2428 // A kill is a phi node, not all of its uses can be rematerialized.
2429 // It must not be deleted.
2431 // Need a stack slot if there is any live range where uses cannot be
2433 NeedStackSlot = true;
2436 ReMatDelete.set(VN);
2438 // Need a stack slot if there is any live range where uses cannot be
2440 NeedStackSlot = true;
2444 // One stack slot per live interval.
2445 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2446 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2447 Slot = vrm.assignVirt2StackSlot(li.reg);
2449 // This case only occurs when the prealloc splitter has already assigned
2450 // a stack slot to this vreg.
2452 Slot = vrm.getStackSlot(li.reg);
2455 // Create new intervals and rewrite defs and uses.
2456 for (LiveInterval::Ranges::const_iterator
2457 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2458 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2459 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2460 bool DefIsReMat = ReMatDefMI != NULL;
2461 bool CanDelete = ReMatDelete[I->valno->id];
2463 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2464 bool isLoad = isLoadSS ||
2465 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2466 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2467 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2468 CanDelete, vrm, rc, ReMatIds, loopInfo,
2469 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2470 MBBVRegsMap, NewLIs);
2473 // Insert spills / restores if we are splitting.
2475 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2479 SmallPtrSet<LiveInterval*, 4> AddedKill;
2480 SmallVector<unsigned, 2> Ops;
2481 if (NeedStackSlot) {
2482 int Id = SpillMBBs.find_first();
2484 std::vector<SRInfo> &spills = SpillIdxes[Id];
2485 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2486 MachineInstrIndex index = spills[i].index;
2487 unsigned VReg = spills[i].vreg;
2488 LiveInterval &nI = getOrCreateInterval(VReg);
2489 bool isReMat = vrm.isReMaterialized(VReg);
2490 MachineInstr *MI = getInstructionFromIndex(index);
2491 bool CanFold = false;
2492 bool FoundUse = false;
2494 if (spills[i].canFold) {
2496 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2497 MachineOperand &MO = MI->getOperand(j);
2498 if (!MO.isReg() || MO.getReg() != VReg)
2505 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2506 RestoreMBBs, RestoreIdxes))) {
2507 // MI has two-address uses of the same register. If the use
2508 // isn't the first and only use in the BB, then we can't fold
2509 // it. FIXME: Move this to rewriteInstructionsForSpills.
2516 // Fold the store into the def if possible.
2517 bool Folded = false;
2518 if (CanFold && !Ops.empty()) {
2519 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2522 // Also folded uses, do not issue a load.
2523 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2524 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2526 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2530 // Otherwise tell the spiller to issue a spill.
2532 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2533 bool isKill = LR->end == getStoreIndex(index);
2534 if (!MI->registerDefIsDead(nI.reg))
2535 // No need to spill a dead def.
2536 vrm.addSpillPoint(VReg, isKill, MI);
2538 AddedKill.insert(&nI);
2541 Id = SpillMBBs.find_next(Id);
2545 int Id = RestoreMBBs.find_first();
2547 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2548 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2549 MachineInstrIndex index = restores[i].index;
2550 if (index == MachineInstrIndex())
2552 unsigned VReg = restores[i].vreg;
2553 LiveInterval &nI = getOrCreateInterval(VReg);
2554 bool isReMat = vrm.isReMaterialized(VReg);
2555 MachineInstr *MI = getInstructionFromIndex(index);
2556 bool CanFold = false;
2558 if (restores[i].canFold) {
2560 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2561 MachineOperand &MO = MI->getOperand(j);
2562 if (!MO.isReg() || MO.getReg() != VReg)
2566 // If this restore were to be folded, it would have been folded
2575 // Fold the load into the use if possible.
2576 bool Folded = false;
2577 if (CanFold && !Ops.empty()) {
2579 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2581 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2583 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2584 // If the rematerializable def is a load, also try to fold it.
2585 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2586 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2587 Ops, isLoadSS, LdSlot, VReg);
2589 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2591 // Re-matting an instruction with virtual register use. Add the
2592 // register as an implicit use on the use MI and update the register
2593 // interval's spill weight to HUGE_VALF to prevent it from being
2595 LiveInterval &ImpLi = getInterval(ImpUse);
2596 ImpLi.weight = HUGE_VALF;
2597 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2602 // If folding is not possible / failed, then tell the spiller to issue a
2603 // load / rematerialization for us.
2605 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2607 vrm.addRestorePoint(VReg, MI);
2609 Id = RestoreMBBs.find_next(Id);
2612 // Finalize intervals: add kills, finalize spill weights, and filter out
2614 std::vector<LiveInterval*> RetNewLIs;
2615 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2616 LiveInterval *LI = NewLIs[i];
2618 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2619 if (!AddedKill.count(LI)) {
2620 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2621 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2622 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2623 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2624 assert(UseIdx != -1);
2625 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2626 LastUse->getOperand(UseIdx).setIsKill();
2627 vrm.addKillPoint(LI->reg, LastUseIdx);
2630 RetNewLIs.push_back(LI);
2634 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2638 /// hasAllocatableSuperReg - Return true if the specified physical register has
2639 /// any super register that's allocatable.
2640 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2641 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2642 if (allocatableRegs_[*AS] && hasInterval(*AS))
2647 /// getRepresentativeReg - Find the largest super register of the specified
2648 /// physical register.
2649 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2650 // Find the largest super-register that is allocatable.
2651 unsigned BestReg = Reg;
2652 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2653 unsigned SuperReg = *AS;
2654 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2662 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2663 /// specified interval that conflicts with the specified physical register.
2664 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2665 unsigned PhysReg) const {
2666 unsigned NumConflicts = 0;
2667 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2668 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2669 E = mri_->reg_end(); I != E; ++I) {
2670 MachineOperand &O = I.getOperand();
2671 MachineInstr *MI = O.getParent();
2672 MachineInstrIndex Index = getInstructionIndex(MI);
2673 if (pli.liveAt(Index))
2676 return NumConflicts;
2679 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2680 /// around all defs and uses of the specified interval. Return true if it
2681 /// was able to cut its interval.
2682 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2683 unsigned PhysReg, VirtRegMap &vrm) {
2684 unsigned SpillReg = getRepresentativeReg(PhysReg);
2686 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2687 // If there are registers which alias PhysReg, but which are not a
2688 // sub-register of the chosen representative super register. Assert
2689 // since we can't handle it yet.
2690 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2691 tri_->isSuperRegister(*AS, SpillReg));
2694 LiveInterval &pli = getInterval(SpillReg);
2695 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2696 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2697 E = mri_->reg_end(); I != E; ++I) {
2698 MachineOperand &O = I.getOperand();
2699 MachineInstr *MI = O.getParent();
2700 if (SeenMIs.count(MI))
2703 MachineInstrIndex Index = getInstructionIndex(MI);
2704 if (pli.liveAt(Index)) {
2705 vrm.addEmergencySpill(SpillReg, MI);
2706 MachineInstrIndex StartIdx = getLoadIndex(Index);
2707 MachineInstrIndex EndIdx = getNextSlot(getStoreIndex(Index));
2708 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2709 pli.removeRange(StartIdx, EndIdx);
2713 raw_string_ostream Msg(msg);
2714 Msg << "Ran out of registers during register allocation!";
2715 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2716 Msg << "\nPlease check your inline asm statement for invalid "
2717 << "constraints:\n";
2718 MI->print(Msg, tm_);
2720 llvm_report_error(Msg.str());
2722 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2723 if (!hasInterval(*AS))
2725 LiveInterval &spli = getInterval(*AS);
2726 if (spli.liveAt(Index))
2727 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2734 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2735 MachineInstr* startInst) {
2736 LiveInterval& Interval = getOrCreateInterval(reg);
2737 VNInfo* VN = Interval.getNextValue(
2738 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2739 startInst, true, getVNInfoAllocator());
2740 VN->setHasPHIKill(true);
2741 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2743 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2744 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2745 Interval.addRange(LR);