1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/PseudoSourceValue.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
55 static cl::opt<bool> EnableFastSpilling("fast-spill",
56 cl::init(false), cl::Hidden);
58 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
60 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
61 cl::init(-1), cl::Hidden);
63 STATISTIC(numIntervals , "Number of original intervals");
64 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
65 STATISTIC(numSplits , "Number of intervals split");
66 STATISTIC(numCoalescing, "Number of early coalescing performed");
68 char LiveIntervals::ID = 0;
69 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<LiveVariables>();
77 AU.addPreservedID(MachineLoopInfoID);
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 MachineFunctionPass::getAnalysisUsage(AU);
89 void LiveIntervals::releaseMemory() {
90 // Free the live intervals themselves.
91 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
92 E = r2iMap_.end(); I != E; ++I)
100 terminatorGaps.clear();
101 phiJoinCopies.clear();
103 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
104 VNInfoAllocator.Reset();
105 while (!CloneMIs.empty()) {
106 MachineInstr *MI = CloneMIs.back();
108 mf_->DeleteMachineInstr(MI);
112 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
113 unsigned OpIdx, const TargetInstrInfo *tii_){
114 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
115 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
119 if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
121 if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
140 MachineInstr *MI = &*I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
146 for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
147 ImpDefRegs.insert(*SS);
149 ImpDefMIs.push_back(MI);
153 if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
154 MachineOperand &MO = MI->getOperand(2);
155 if (ImpDefRegs.count(MO.getReg())) {
156 // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
157 // This is an identity copy, eliminate it now.
159 LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
162 MI->eraseFromParent();
167 bool ChangedToImpDef = false;
168 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
169 MachineOperand& MO = MI->getOperand(i);
170 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
172 unsigned Reg = MO.getReg();
175 if (!ImpDefRegs.count(Reg))
177 // Use is a copy, just turn it into an implicit_def.
178 if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
179 bool isKill = MO.isKill();
180 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
181 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
182 MI->RemoveOperand(j);
184 ImpDefRegs.erase(Reg);
185 LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
188 ChangedToImpDef = true;
193 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
194 // Make sure other uses of
195 for (unsigned j = i+1; j != e; ++j) {
196 MachineOperand &MOJ = MI->getOperand(j);
197 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
200 ImpDefRegs.erase(Reg);
204 if (ChangedToImpDef) {
205 // Backtrack to process this new implicit_def.
208 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
209 MachineOperand& MO = MI->getOperand(i);
210 if (!MO.isReg() || !MO.isDef())
212 ImpDefRegs.erase(MO.getReg());
217 // Any outstanding liveout implicit_def's?
218 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
219 MachineInstr *MI = ImpDefMIs[i];
220 unsigned Reg = MI->getOperand(0).getReg();
221 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
222 !ImpDefRegs.count(Reg)) {
223 // Delete all "local" implicit_def's. That include those which define
224 // physical registers since they cannot be liveout.
225 MI->eraseFromParent();
229 // If there are multiple defs of the same register and at least one
230 // is not an implicit_def, do not insert implicit_def's before the
233 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
234 DE = mri_->def_end(); DI != DE; ++DI) {
235 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
243 // The only implicit_def which we want to keep are those that are live
245 MI->eraseFromParent();
247 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
248 UE = mri_->use_end(); UI != UE; ) {
249 MachineOperand &RMO = UI.getOperand();
250 MachineInstr *RMI = &*UI;
252 MachineBasicBlock *RMBB = RMI->getParent();
256 // Turn a copy use into an implicit_def.
257 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
258 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
260 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
261 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
262 RMI->RemoveOperand(j);
266 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
267 unsigned NewVReg = mri_->createVirtualRegister(RC);
279 void LiveIntervals::computeNumbering() {
280 Index2MiMap OldI2MI = i2miMap_;
281 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
287 terminatorGaps.clear();
288 phiJoinCopies.clear();
292 // Number MachineInstrs and MachineBasicBlocks.
293 // Initialize MBB indexes to a sentinal.
294 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
295 std::make_pair(LiveIndex(),LiveIndex()));
298 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
300 LiveIndex StartIdx = MIIndex;
302 // Insert an empty slot at the beginning of each block.
303 MIIndex = getNextIndex(MIIndex);
304 i2miMap_.push_back(0);
306 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
309 if (I == MBB->getFirstTerminator()) {
310 // Leave a gap for before terminators, this is where we will point
312 LiveIndex tGap(true, MIIndex);
314 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
316 "Multiple 'first' terminators encountered during numbering.");
317 inserted = inserted; // Avoid compiler warning if assertions turned off.
318 i2miMap_.push_back(0);
320 MIIndex = getNextIndex(MIIndex);
323 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
324 assert(inserted && "multiple MachineInstr -> index mappings");
326 i2miMap_.push_back(I);
327 MIIndex = getNextIndex(MIIndex);
330 // Insert max(1, numdefs) empty slots after every instruction.
331 unsigned Slots = I->getDesc().getNumDefs();
335 MIIndex = getNextIndex(MIIndex);
336 i2miMap_.push_back(0);
341 if (MBB->getFirstTerminator() == MBB->end()) {
342 // Leave a gap for before terminators, this is where we will point
344 LiveIndex tGap(true, MIIndex);
346 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
348 "Multiple 'first' terminators encountered during numbering.");
349 inserted = inserted; // Avoid compiler warning if assertions turned off.
350 i2miMap_.push_back(0);
352 MIIndex = getNextIndex(MIIndex);
355 // Set the MBB2IdxMap entry for this MBB.
356 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
357 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
360 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
362 if (!OldI2MI.empty())
363 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
364 for (LiveInterval::iterator LI = OI->second->begin(),
365 LE = OI->second->end(); LI != LE; ++LI) {
367 // Remap the start index of the live range to the corresponding new
368 // number, or our best guess at what it _should_ correspond to if the
369 // original instruction has been erased. This is either the following
370 // instruction or its predecessor.
371 unsigned index = LI->start.getVecIndex();
372 LiveIndex::Slot offset = LI->start.getSlot();
373 if (LI->start.isLoad()) {
374 std::vector<IdxMBBPair>::const_iterator I =
375 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
376 // Take the pair containing the index
377 std::vector<IdxMBBPair>::const_iterator J =
378 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
380 LI->start = getMBBStartIdx(J->second);
382 LI->start = LiveIndex(
383 LiveIndex(mi2iMap_[OldI2MI[index]]),
384 (LiveIndex::Slot)offset);
387 // Remap the ending index in the same way that we remapped the start,
388 // except for the final step where we always map to the immediately
389 // following instruction.
390 index = (getPrevSlot(LI->end)).getVecIndex();
391 offset = LI->end.getSlot();
392 if (LI->end.isLoad()) {
393 // VReg dies at end of block.
394 std::vector<IdxMBBPair>::const_iterator I =
395 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
398 LI->end = getNextSlot(getMBBEndIdx(I->second));
400 unsigned idx = index;
401 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
403 if (index != OldI2MI.size())
405 LiveIndex(mi2iMap_[OldI2MI[index]],
406 (idx == index ? offset : LiveIndex::LOAD));
409 LiveIndex(LiveIndex::NUM * i2miMap_.size());
413 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
414 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
417 // Remap the VNInfo def index, which works the same as the
418 // start indices above. VN's with special sentinel defs
419 // don't need to be remapped.
420 if (vni->isDefAccurate() && !vni->isUnused()) {
421 unsigned index = vni->def.getVecIndex();
422 LiveIndex::Slot offset = vni->def.getSlot();
423 if (vni->def.isLoad()) {
424 std::vector<IdxMBBPair>::const_iterator I =
425 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
426 // Take the pair containing the index
427 std::vector<IdxMBBPair>::const_iterator J =
428 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
430 vni->def = getMBBStartIdx(J->second);
432 vni->def = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
436 // Remap the VNInfo kill indices, which works the same as
437 // the end indices above.
438 for (size_t i = 0; i < vni->kills.size(); ++i) {
439 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
440 LiveIndex::Slot offset = vni->kills[i].getSlot();
442 if (vni->kills[i].isLoad()) {
443 assert("Value killed at a load slot.");
444 /*std::vector<IdxMBBPair>::const_iterator I =
445 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
448 vni->kills[i] = getMBBEndIdx(I->second);*/
450 if (vni->kills[i].isPHIIndex()) {
451 std::vector<IdxMBBPair>::const_iterator I =
452 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
454 vni->kills[i] = terminatorGaps[I->second];
456 assert(OldI2MI[index] != 0 &&
457 "Kill refers to instruction not present in index maps.");
458 vni->kills[i] = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
462 unsigned idx = index;
463 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
465 if (index != OldI2MI.size())
466 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
467 (idx == index ? offset : 0);
469 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
477 void LiveIntervals::scaleNumbering(int factor) {
479 // * scale MBB begin and end points
480 // * scale all ranges.
481 // * Update VNI structures.
482 // * Scale instruction numberings
484 // Scale the MBB indices.
486 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
487 MBB != MBBE; ++MBB) {
488 std::pair<LiveIndex, LiveIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
489 mbbIndices.first = mbbIndices.first.scale(factor);
490 mbbIndices.second = mbbIndices.second.scale(factor);
491 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
493 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
495 // Scale terminator gaps.
496 for (DenseMap<MachineBasicBlock*, LiveIndex>::iterator
497 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
499 terminatorGaps[TGI->first] = TGI->second.scale(factor);
502 // Scale the intervals.
503 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
504 LI->second->scaleNumbering(factor);
507 // Scale MachineInstrs.
508 Mi2IndexMap oldmi2iMap = mi2iMap_;
509 LiveIndex highestSlot;
510 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
512 LiveIndex newSlot = MI->second.scale(factor);
513 mi2iMap_[MI->first] = newSlot;
514 highestSlot = std::max(highestSlot, newSlot);
517 unsigned highestVIndex = highestSlot.getVecIndex();
519 i2miMap_.resize(highestVIndex + 1);
520 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
522 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
528 /// runOnMachineFunction - Register allocate the whole function
530 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
532 mri_ = &mf_->getRegInfo();
533 tm_ = &fn.getTarget();
534 tri_ = tm_->getRegisterInfo();
535 tii_ = tm_->getInstrInfo();
536 aa_ = &getAnalysis<AliasAnalysis>();
537 lv_ = &getAnalysis<LiveVariables>();
538 allocatableRegs_ = tri_->getAllocatableSet(fn);
540 processImplicitDefs();
543 performEarlyCoalescing();
545 numIntervals += getNumIntervals();
551 /// print - Implement the dump method.
552 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
553 OS << "********** INTERVALS **********\n";
554 for (const_iterator I = begin(), E = end(); I != E; ++I) {
555 I->second->print(OS, tri_);
562 void LiveIntervals::printInstrs(raw_ostream &OS) const {
563 OS << "********** MACHINEINSTRS **********\n";
565 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
566 mbbi != mbbe; ++mbbi) {
567 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
568 for (MachineBasicBlock::iterator mii = mbbi->begin(),
569 mie = mbbi->end(); mii != mie; ++mii) {
570 OS << getInstructionIndex(mii) << '\t' << *mii;
575 void LiveIntervals::dumpInstrs() const {
579 /// conflictsWithPhysRegDef - Returns true if the specified register
580 /// is defined during the duration of the specified interval.
581 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
582 VirtRegMap &vrm, unsigned reg) {
583 for (LiveInterval::Ranges::const_iterator
584 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
585 for (LiveIndex index = getBaseIndex(I->start),
586 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
587 index = getNextIndex(index)) {
588 // skip deleted instructions
589 while (index != end && !getInstructionFromIndex(index))
590 index = getNextIndex(index);
591 if (index == end) break;
593 MachineInstr *MI = getInstructionFromIndex(index);
594 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
595 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
596 if (SrcReg == li.reg || DstReg == li.reg)
598 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
599 MachineOperand& mop = MI->getOperand(i);
602 unsigned PhysReg = mop.getReg();
603 if (PhysReg == 0 || PhysReg == li.reg)
605 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
606 if (!vrm.hasPhys(PhysReg))
608 PhysReg = vrm.getPhys(PhysReg);
610 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
619 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
620 /// it can check use as well.
621 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
622 unsigned Reg, bool CheckUse,
623 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
624 for (LiveInterval::Ranges::const_iterator
625 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
626 for (LiveIndex index = getBaseIndex(I->start),
627 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
628 index = getNextIndex(index)) {
629 // Skip deleted instructions.
630 MachineInstr *MI = 0;
631 while (index != end) {
632 MI = getInstructionFromIndex(index);
635 index = getNextIndex(index);
637 if (index == end) break;
639 if (JoinedCopies.count(MI))
641 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
642 MachineOperand& MO = MI->getOperand(i);
645 if (MO.isUse() && !CheckUse)
647 unsigned PhysReg = MO.getReg();
648 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
650 if (tri_->isSubRegister(Reg, PhysReg))
660 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
661 if (TargetRegisterInfo::isPhysicalRegister(reg))
662 errs() << tri_->getName(reg);
664 errs() << "%reg" << reg;
668 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
669 MachineBasicBlock::iterator mi,
673 LiveInterval &interval) {
675 errs() << "\t\tregister: ";
676 printRegName(interval.reg, tri_);
679 // Virtual registers may be defined multiple times (due to phi
680 // elimination and 2-addr elimination). Much of what we do only has to be
681 // done once for the vreg. We use an empty interval to detect the first
682 // time we see a vreg.
683 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
684 if (interval.empty()) {
685 // Get the Idx of the defining instructions.
686 LiveIndex defIndex = getDefIndex(MIIdx);
687 // Earlyclobbers move back one, so that they overlap the live range
689 if (MO.isEarlyClobber())
690 defIndex = getUseIndex(MIIdx);
692 MachineInstr *CopyMI = NULL;
693 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
694 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
695 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
696 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
697 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
699 // Earlyclobbers move back one.
700 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
702 assert(ValNo->id == 0 && "First value in interval is not 0?");
704 // Loop over all of the blocks that the vreg is defined in. There are
705 // two cases we have to handle here. The most common case is a vreg
706 // whose lifetime is contained within a basic block. In this case there
707 // will be a single kill, in MBB, which comes after the definition.
708 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
709 // FIXME: what about dead vars?
711 if (vi.Kills[0] != mi)
712 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
713 else if (MO.isEarlyClobber())
714 // Earlyclobbers that die in this instruction move up one extra, to
715 // compensate for having the starting point moved back one. This
716 // gets them to overlap the live range of other outputs.
717 killIdx = getNextSlot(getNextSlot(defIndex));
719 killIdx = getNextSlot(defIndex);
721 // If the kill happens after the definition, we have an intra-block
723 if (killIdx > defIndex) {
724 assert(vi.AliveBlocks.empty() &&
725 "Shouldn't be alive across any blocks!");
726 LiveRange LR(defIndex, killIdx, ValNo);
727 interval.addRange(LR);
728 DEBUG(errs() << " +" << LR << "\n");
729 ValNo->addKill(killIdx);
734 // The other case we handle is when a virtual register lives to the end
735 // of the defining block, potentially live across some blocks, then is
736 // live into some number of blocks, but gets killed. Start by adding a
737 // range that goes from this definition to the end of the defining block.
738 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
739 DEBUG(errs() << " +" << NewLR);
740 interval.addRange(NewLR);
742 // Iterate over all of the blocks that the variable is completely
743 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
745 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
746 E = vi.AliveBlocks.end(); I != E; ++I) {
747 LiveRange LR(getMBBStartIdx(*I),
748 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
750 interval.addRange(LR);
751 DEBUG(errs() << " +" << LR);
754 // Finally, this virtual register is live from the start of any killing
755 // block to the 'use' slot of the killing instruction.
756 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
757 MachineInstr *Kill = vi.Kills[i];
759 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
760 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
761 interval.addRange(LR);
762 ValNo->addKill(killIdx);
763 DEBUG(errs() << " +" << LR);
767 // If this is the second time we see a virtual register definition, it
768 // must be due to phi elimination or two addr elimination. If this is
769 // the result of two address elimination, then the vreg is one of the
770 // def-and-use register operand.
771 if (mi->isRegTiedToUseOperand(MOIdx)) {
772 // If this is a two-address definition, then we have already processed
773 // the live range. The only problem is that we didn't realize there
774 // are actually two values in the live interval. Because of this we
775 // need to take the LiveRegion that defines this register and split it
777 assert(interval.containsOneValue());
778 LiveIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
779 LiveIndex RedefIndex = getDefIndex(MIIdx);
780 if (MO.isEarlyClobber())
781 RedefIndex = getUseIndex(MIIdx);
783 const LiveRange *OldLR =
784 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
785 VNInfo *OldValNo = OldLR->valno;
787 // Delete the initial value, which should be short and continuous,
788 // because the 2-addr copy must be in the same MBB as the redef.
789 interval.removeRange(DefIndex, RedefIndex);
791 // Two-address vregs should always only be redefined once. This means
792 // that at this point, there should be exactly one value number in it.
793 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
795 // The new value number (#1) is defined by the instruction we claimed
797 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
798 false, // update at *
800 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
802 // Value#0 is now defined by the 2-addr instruction.
803 OldValNo->def = RedefIndex;
804 OldValNo->setCopy(0);
805 if (MO.isEarlyClobber())
806 OldValNo->setHasRedefByEC(true);
808 // Add the new live interval which replaces the range for the input copy.
809 LiveRange LR(DefIndex, RedefIndex, ValNo);
810 DEBUG(errs() << " replace range with " << LR);
811 interval.addRange(LR);
812 ValNo->addKill(RedefIndex);
814 // If this redefinition is dead, we need to add a dummy unit live
815 // range covering the def slot.
818 LiveRange(RedefIndex, MO.isEarlyClobber() ?
819 getNextSlot(getNextSlot(RedefIndex)) :
820 getNextSlot(RedefIndex), OldValNo));
823 errs() << " RESULT: ";
824 interval.print(errs(), tri_);
827 // Otherwise, this must be because of phi elimination. If this is the
828 // first redefinition of the vreg that we have seen, go back and change
829 // the live range in the PHI block to be a different value number.
830 if (interval.containsOneValue()) {
831 // Remove the old range that we now know has an incorrect number.
832 VNInfo *VNI = interval.getValNumInfo(0);
833 MachineInstr *Killer = vi.Kills[0];
834 phiJoinCopies.push_back(Killer);
835 LiveIndex Start = getMBBStartIdx(Killer->getParent());
837 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
839 errs() << " Removing [" << Start << "," << End << "] from: ";
840 interval.print(errs(), tri_);
843 interval.removeRange(Start, End);
844 assert(interval.ranges.size() == 1 &&
845 "Newly discovered PHI interval has >1 ranges.");
846 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
847 VNI->addKill(terminatorGaps[killMBB]);
848 VNI->setHasPHIKill(true);
850 errs() << " RESULT: ";
851 interval.print(errs(), tri_);
854 // Replace the interval with one of a NEW value number. Note that this
855 // value number isn't actually defined by an instruction, weird huh? :)
856 LiveRange LR(Start, End,
857 interval.getNextValue(LiveIndex(mbb->getNumber()),
858 0, false, VNInfoAllocator));
859 LR.valno->setIsPHIDef(true);
860 DEBUG(errs() << " replace range with " << LR);
861 interval.addRange(LR);
862 LR.valno->addKill(End);
864 errs() << " RESULT: ";
865 interval.print(errs(), tri_);
869 // In the case of PHI elimination, each variable definition is only
870 // live until the end of the block. We've already taken care of the
871 // rest of the live range.
872 LiveIndex defIndex = getDefIndex(MIIdx);
873 if (MO.isEarlyClobber())
874 defIndex = getUseIndex(MIIdx);
877 MachineInstr *CopyMI = NULL;
878 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
879 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
880 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
881 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
882 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
884 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
886 LiveIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
887 LiveRange LR(defIndex, killIndex, ValNo);
888 interval.addRange(LR);
889 ValNo->addKill(terminatorGaps[mbb]);
890 ValNo->setHasPHIKill(true);
891 DEBUG(errs() << " +" << LR);
895 DEBUG(errs() << '\n');
898 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
899 MachineBasicBlock::iterator mi,
902 LiveInterval &interval,
903 MachineInstr *CopyMI) {
904 // A physical register cannot be live across basic block, so its
905 // lifetime must end somewhere in its defining basic block.
907 errs() << "\t\tregister: ";
908 printRegName(interval.reg, tri_);
911 LiveIndex baseIndex = MIIdx;
912 LiveIndex start = getDefIndex(baseIndex);
913 // Earlyclobbers move back one.
914 if (MO.isEarlyClobber())
915 start = getUseIndex(MIIdx);
916 LiveIndex end = start;
918 // If it is not used after definition, it is considered dead at
919 // the instruction defining it. Hence its interval is:
920 // [defSlot(def), defSlot(def)+1)
921 // For earlyclobbers, the defSlot was pushed back one; the extra
922 // advance below compensates.
924 DEBUG(errs() << " dead");
925 if (MO.isEarlyClobber())
926 end = getNextSlot(getNextSlot(start));
928 end = getNextSlot(start);
932 // If it is not dead on definition, it must be killed by a
933 // subsequent instruction. Hence its interval is:
934 // [defSlot(def), useSlot(kill)+1)
935 baseIndex = getNextIndex(baseIndex);
936 while (++mi != MBB->end()) {
937 while (baseIndex.getVecIndex() < i2miMap_.size() &&
938 getInstructionFromIndex(baseIndex) == 0)
939 baseIndex = getNextIndex(baseIndex);
940 if (mi->killsRegister(interval.reg, tri_)) {
941 DEBUG(errs() << " killed");
942 end = getNextSlot(getUseIndex(baseIndex));
945 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
947 if (mi->isRegTiedToUseOperand(DefIdx)) {
948 // Two-address instruction.
949 end = getDefIndex(baseIndex);
950 if (mi->getOperand(DefIdx).isEarlyClobber())
951 end = getUseIndex(baseIndex);
953 // Another instruction redefines the register before it is ever read.
954 // Then the register is essentially dead at the instruction that defines
955 // it. Hence its interval is:
956 // [defSlot(def), defSlot(def)+1)
957 DEBUG(errs() << " dead");
958 end = getNextSlot(start);
964 baseIndex = getNextIndex(baseIndex);
967 // The only case we should have a dead physreg here without a killing or
968 // instruction where we know it's dead is if it is live-in to the function
969 // and never used. Another possible case is the implicit use of the
970 // physical register has been deleted by two-address pass.
971 end = getNextSlot(start);
974 assert(start < end && "did not find end of interval?");
976 // Already exists? Extend old live interval.
977 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
978 bool Extend = OldLR != interval.end();
979 VNInfo *ValNo = Extend
980 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
981 if (MO.isEarlyClobber() && Extend)
982 ValNo->setHasRedefByEC(true);
983 LiveRange LR(start, end, ValNo);
984 interval.addRange(LR);
985 LR.valno->addKill(end);
986 DEBUG(errs() << " +" << LR << '\n');
989 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
990 MachineBasicBlock::iterator MI,
994 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
995 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
996 getOrCreateInterval(MO.getReg()));
997 else if (allocatableRegs_[MO.getReg()]) {
998 MachineInstr *CopyMI = NULL;
999 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1000 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
1001 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1002 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
1003 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1005 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1006 getOrCreateInterval(MO.getReg()), CopyMI);
1007 // Def of a register also defines its sub-registers.
1008 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1009 // If MI also modifies the sub-register explicitly, avoid processing it
1010 // more than once. Do not pass in TRI here so it checks for exact match.
1011 if (!MI->modifiesRegister(*AS))
1012 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1013 getOrCreateInterval(*AS), 0);
1017 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1019 LiveInterval &interval, bool isAlias) {
1021 errs() << "\t\tlivein register: ";
1022 printRegName(interval.reg, tri_);
1025 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1026 // be considered a livein.
1027 MachineBasicBlock::iterator mi = MBB->begin();
1028 LiveIndex baseIndex = MIIdx;
1029 LiveIndex start = baseIndex;
1030 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1031 getInstructionFromIndex(baseIndex) == 0)
1032 baseIndex = getNextIndex(baseIndex);
1033 LiveIndex end = baseIndex;
1034 bool SeenDefUse = false;
1036 while (mi != MBB->end()) {
1037 if (mi->killsRegister(interval.reg, tri_)) {
1038 DEBUG(errs() << " killed");
1039 end = getNextSlot(getUseIndex(baseIndex));
1042 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1043 // Another instruction redefines the register before it is ever read.
1044 // Then the register is essentially dead at the instruction that defines
1045 // it. Hence its interval is:
1046 // [defSlot(def), defSlot(def)+1)
1047 DEBUG(errs() << " dead");
1048 end = getNextSlot(getDefIndex(start));
1053 baseIndex = getNextIndex(baseIndex);
1055 if (mi != MBB->end()) {
1056 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1057 getInstructionFromIndex(baseIndex) == 0)
1058 baseIndex = getNextIndex(baseIndex);
1062 // Live-in register might not be used at all.
1065 DEBUG(errs() << " dead");
1066 end = getNextSlot(getDefIndex(MIIdx));
1068 DEBUG(errs() << " live through");
1074 interval.getNextValue(LiveIndex(MBB->getNumber()),
1075 0, false, VNInfoAllocator);
1076 vni->setIsPHIDef(true);
1077 LiveRange LR(start, end, vni);
1079 interval.addRange(LR);
1080 LR.valno->addKill(end);
1081 DEBUG(errs() << " +" << LR << '\n');
1085 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1086 SmallVector<MachineInstr*,16> &IdentCopies,
1087 SmallVector<MachineInstr*,16> &OtherCopies) {
1088 bool HaveConflict = false;
1089 unsigned NumIdent = 0;
1090 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
1091 re = mri_->def_end(); ri != re; ++ri) {
1092 MachineInstr *MI = &*ri;
1093 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1094 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1096 if (SrcReg != DstInt.reg) {
1097 OtherCopies.push_back(MI);
1098 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1100 IdentCopies.push_back(MI);
1106 return false; // Let coalescer handle it
1107 return IdentCopies.size() > OtherCopies.size();
1110 void LiveIntervals::performEarlyCoalescing() {
1111 if (!EarlyCoalescing)
1114 /// Perform early coalescing: eliminate copies which feed into phi joins
1115 /// and whose sources are defined by the phi joins.
1116 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1117 MachineInstr *Join = phiJoinCopies[i];
1118 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1121 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1122 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1124 assert(isMove && "PHI join instruction must be a move!");
1129 LiveInterval &DstInt = getInterval(PHIDst);
1130 LiveInterval &SrcInt = getInterval(PHISrc);
1131 SmallVector<MachineInstr*, 16> IdentCopies;
1132 SmallVector<MachineInstr*, 16> OtherCopies;
1133 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1136 DEBUG(errs() << "PHI Join: " << *Join);
1137 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1138 VNInfo *VNI = DstInt.getValNumInfo(0);
1140 // Change the non-identity copies to directly target the phi destination.
1141 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1142 MachineInstr *PHICopy = OtherCopies[i];
1143 DEBUG(errs() << "Moving: " << *PHICopy);
1145 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1146 LiveIndex DefIndex = getDefIndex(MIIndex);
1147 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1148 LiveIndex StartIndex = SLR->start;
1149 LiveIndex EndIndex = SLR->end;
1151 // Delete val# defined by the now identity copy and add the range from
1152 // beginning of the mbb to the end of the range.
1153 SrcInt.removeValNo(SLR->valno);
1154 DEBUG(errs() << " added range [" << StartIndex << ','
1155 << EndIndex << "] to reg" << DstInt.reg << '\n');
1156 if (DstInt.liveAt(StartIndex))
1157 DstInt.removeRange(StartIndex, EndIndex);
1158 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1160 NewVNI->setHasPHIKill(true);
1161 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1162 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1163 MachineOperand &MO = PHICopy->getOperand(j);
1164 if (!MO.isReg() || MO.getReg() != PHISrc)
1170 // Now let's eliminate all the would-be identity copies.
1171 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1172 MachineInstr *PHICopy = IdentCopies[i];
1173 DEBUG(errs() << "Coalescing: " << *PHICopy);
1175 LiveIndex MIIndex = getInstructionIndex(PHICopy);
1176 LiveIndex DefIndex = getDefIndex(MIIndex);
1177 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1178 LiveIndex StartIndex = SLR->start;
1179 LiveIndex EndIndex = SLR->end;
1181 // Delete val# defined by the now identity copy and add the range from
1182 // beginning of the mbb to the end of the range.
1183 SrcInt.removeValNo(SLR->valno);
1184 RemoveMachineInstrFromMaps(PHICopy);
1185 PHICopy->eraseFromParent();
1186 DEBUG(errs() << " added range [" << StartIndex << ','
1187 << EndIndex << "] to reg" << DstInt.reg << '\n');
1188 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1191 // Remove the phi join and update the phi block liveness.
1192 LiveIndex MIIndex = getInstructionIndex(Join);
1193 LiveIndex UseIndex = getUseIndex(MIIndex);
1194 LiveIndex DefIndex = getDefIndex(MIIndex);
1195 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1196 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1197 DLR->valno->setCopy(0);
1198 DLR->valno->setIsDefAccurate(false);
1199 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1200 SrcInt.removeRange(SLR->start, SLR->end);
1201 assert(SrcInt.empty());
1202 removeInterval(PHISrc);
1203 RemoveMachineInstrFromMaps(Join);
1204 Join->eraseFromParent();
1210 /// computeIntervals - computes the live intervals for virtual
1211 /// registers. for some ordering of the machine instructions [1,N] a
1212 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1213 /// which a variable is live
1214 void LiveIntervals::computeIntervals() {
1215 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1216 << "********** Function: "
1217 << ((Value*)mf_->getFunction())->getName() << '\n');
1219 SmallVector<unsigned, 8> UndefUses;
1220 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1221 MBBI != E; ++MBBI) {
1222 MachineBasicBlock *MBB = MBBI;
1223 // Track the index of the current machine instr.
1224 LiveIndex MIIndex = getMBBStartIdx(MBB);
1225 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1227 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1229 // Create intervals for live-ins to this BB first.
1230 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1231 LE = MBB->livein_end(); LI != LE; ++LI) {
1232 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1233 // Multiple live-ins can alias the same register.
1234 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1235 if (!hasInterval(*AS))
1236 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1240 // Skip over empty initial indices.
1241 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1242 getInstructionFromIndex(MIIndex) == 0)
1243 MIIndex = getNextIndex(MIIndex);
1245 for (; MI != miEnd; ++MI) {
1246 DEBUG(errs() << MIIndex << "\t" << *MI);
1249 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1250 MachineOperand &MO = MI->getOperand(i);
1251 if (!MO.isReg() || !MO.getReg())
1254 // handle register defs - build intervals
1256 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1257 else if (MO.isUndef())
1258 UndefUses.push_back(MO.getReg());
1261 // Skip over the empty slots after each instruction.
1262 unsigned Slots = MI->getDesc().getNumDefs();
1267 MIIndex = getNextIndex(MIIndex);
1269 // Skip over empty indices.
1270 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1271 getInstructionFromIndex(MIIndex) == 0)
1272 MIIndex = getNextIndex(MIIndex);
1276 // Create empty intervals for registers defined by implicit_def's (except
1277 // for those implicit_def that define values which are liveout of their
1279 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1280 unsigned UndefReg = UndefUses[i];
1281 (void)getOrCreateInterval(UndefReg);
1285 bool LiveIntervals::findLiveInMBBs(
1286 LiveIndex Start, LiveIndex End,
1287 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1288 std::vector<IdxMBBPair>::const_iterator I =
1289 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1291 bool ResVal = false;
1292 while (I != Idx2MBBMap.end()) {
1293 if (I->first >= End)
1295 MBBs.push_back(I->second);
1302 bool LiveIntervals::findReachableMBBs(
1303 LiveIndex Start, LiveIndex End,
1304 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1305 std::vector<IdxMBBPair>::const_iterator I =
1306 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1308 bool ResVal = false;
1309 while (I != Idx2MBBMap.end()) {
1312 MachineBasicBlock *MBB = I->second;
1313 if (getMBBEndIdx(MBB) > End)
1315 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1316 SE = MBB->succ_end(); SI != SE; ++SI)
1317 MBBs.push_back(*SI);
1324 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1325 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1326 return new LiveInterval(reg, Weight);
1329 /// dupInterval - Duplicate a live interval. The caller is responsible for
1330 /// managing the allocated memory.
1331 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1332 LiveInterval *NewLI = createInterval(li->reg);
1333 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1337 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1338 /// copy field and returns the source register that defines it.
1339 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1340 if (!VNI->getCopy())
1343 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1344 // If it's extracting out of a physical register, return the sub-register.
1345 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1346 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1347 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1349 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1350 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1351 return VNI->getCopy()->getOperand(2).getReg();
1353 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1354 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1356 llvm_unreachable("Unrecognized copy instruction!");
1360 //===----------------------------------------------------------------------===//
1361 // Register allocator hooks.
1364 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1365 /// allow one) virtual register operand, then its uses are implicitly using
1366 /// the register. Returns the virtual register.
1367 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1368 MachineInstr *MI) const {
1370 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1371 MachineOperand &MO = MI->getOperand(i);
1372 if (!MO.isReg() || !MO.isUse())
1374 unsigned Reg = MO.getReg();
1375 if (Reg == 0 || Reg == li.reg)
1378 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1379 !allocatableRegs_[Reg])
1381 // FIXME: For now, only remat MI with at most one register operand.
1383 "Can't rematerialize instruction with multiple register operand!");
1384 RegOp = MO.getReg();
1392 /// isValNoAvailableAt - Return true if the val# of the specified interval
1393 /// which reaches the given instruction also reaches the specified use index.
1394 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1395 LiveIndex UseIdx) const {
1396 LiveIndex Index = getInstructionIndex(MI);
1397 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1398 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1399 return UI != li.end() && UI->valno == ValNo;
1402 /// isReMaterializable - Returns true if the definition MI of the specified
1403 /// val# of the specified interval is re-materializable.
1404 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1405 const VNInfo *ValNo, MachineInstr *MI,
1406 SmallVectorImpl<LiveInterval*> &SpillIs,
1411 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1415 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1416 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1417 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1418 // this but remember this is not safe to fold into a two-address
1420 // This is a load from fixed stack slot. It can be rematerialized.
1423 // If the target-specific rules don't identify an instruction as
1424 // being trivially rematerializable, use some target-independent
1426 if (!MI->getDesc().isRematerializable() ||
1427 !tii_->isTriviallyReMaterializable(MI)) {
1428 if (!EnableAggressiveRemat)
1431 // If the instruction accesses memory but the memoperands have been lost,
1432 // we can't analyze it.
1433 const TargetInstrDesc &TID = MI->getDesc();
1434 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1437 // Avoid instructions obviously unsafe for remat.
1438 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1441 // If the instruction accesses memory and the memory could be non-constant,
1442 // assume the instruction is not rematerializable.
1443 for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
1444 E = MI->memoperands_end(); I != E; ++I){
1445 const MachineMemOperand *MMO = *I;
1446 if (MMO->isVolatile() || MMO->isStore())
1448 const Value *V = MMO->getValue();
1451 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1452 if (!PSV->isConstant(mf_->getFrameInfo()))
1454 } else if (!aa_->pointsToConstantMemory(V))
1458 // If any of the registers accessed are non-constant, conservatively assume
1459 // the instruction is not rematerializable.
1460 unsigned ImpUse = 0;
1461 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1462 const MachineOperand &MO = MI->getOperand(i);
1464 unsigned Reg = MO.getReg();
1467 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1470 // Only allow one def, and that in the first operand.
1471 if (MO.isDef() != (i == 0))
1474 // Only allow constant-valued registers.
1475 bool IsLiveIn = mri_->isLiveIn(Reg);
1476 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1477 E = mri_->def_end();
1479 // For the def, it should be the only def of that register.
1480 if (MO.isDef() && (next(I) != E || IsLiveIn))
1484 // Only allow one use other register use, as that's all the
1485 // remat mechanisms support currently.
1486 if (Reg != li.reg) {
1489 else if (Reg != ImpUse)
1492 // For the use, there should be only one associated def.
1493 if (I != E && (next(I) != E || IsLiveIn))
1500 unsigned ImpUse = getReMatImplicitUse(li, MI);
1502 const LiveInterval &ImpLi = getInterval(ImpUse);
1503 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1504 re = mri_->use_end(); ri != re; ++ri) {
1505 MachineInstr *UseMI = &*ri;
1506 LiveIndex UseIdx = getInstructionIndex(UseMI);
1507 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1509 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1513 // If a register operand of the re-materialized instruction is going to
1514 // be spilled next, then it's not legal to re-materialize this instruction.
1515 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1516 if (ImpUse == SpillIs[i]->reg)
1522 /// isReMaterializable - Returns true if the definition MI of the specified
1523 /// val# of the specified interval is re-materializable.
1524 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1525 const VNInfo *ValNo, MachineInstr *MI) {
1526 SmallVector<LiveInterval*, 4> Dummy1;
1528 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1531 /// isReMaterializable - Returns true if every definition of MI of every
1532 /// val# of the specified interval is re-materializable.
1533 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1534 SmallVectorImpl<LiveInterval*> &SpillIs,
1537 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1539 const VNInfo *VNI = *i;
1540 if (VNI->isUnused())
1541 continue; // Dead val#.
1542 // Is the def for the val# rematerializable?
1543 if (!VNI->isDefAccurate())
1545 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1546 bool DefIsLoad = false;
1548 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1550 isLoad |= DefIsLoad;
1555 /// FilterFoldedOps - Filter out two-address use operands. Return
1556 /// true if it finds any issue with the operands that ought to prevent
1558 static bool FilterFoldedOps(MachineInstr *MI,
1559 SmallVector<unsigned, 2> &Ops,
1561 SmallVector<unsigned, 2> &FoldOps) {
1563 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1564 unsigned OpIdx = Ops[i];
1565 MachineOperand &MO = MI->getOperand(OpIdx);
1566 // FIXME: fold subreg use.
1570 MRInfo |= (unsigned)VirtRegMap::isMod;
1572 // Filter out two-address use operand(s).
1573 if (MI->isRegTiedToDefOperand(OpIdx)) {
1574 MRInfo = VirtRegMap::isModRef;
1577 MRInfo |= (unsigned)VirtRegMap::isRef;
1579 FoldOps.push_back(OpIdx);
1585 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1586 /// slot / to reg or any rematerialized load into ith operand of specified
1587 /// MI. If it is successul, MI is updated with the newly created MI and
1589 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1590 VirtRegMap &vrm, MachineInstr *DefMI,
1592 SmallVector<unsigned, 2> &Ops,
1593 bool isSS, int Slot, unsigned Reg) {
1594 // If it is an implicit def instruction, just delete it.
1595 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1596 RemoveMachineInstrFromMaps(MI);
1597 vrm.RemoveMachineInstrFromMaps(MI);
1598 MI->eraseFromParent();
1603 // Filter the list of operand indexes that are to be folded. Abort if
1604 // any operand will prevent folding.
1605 unsigned MRInfo = 0;
1606 SmallVector<unsigned, 2> FoldOps;
1607 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1610 // The only time it's safe to fold into a two address instruction is when
1611 // it's folding reload and spill from / into a spill stack slot.
1612 if (DefMI && (MRInfo & VirtRegMap::isMod))
1615 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1616 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1618 // Remember this instruction uses the spill slot.
1619 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1621 // Attempt to fold the memory reference into the instruction. If
1622 // we can do this, we don't need to insert spill code.
1623 MachineBasicBlock &MBB = *MI->getParent();
1624 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1625 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1626 vrm.transferSpillPts(MI, fmi);
1627 vrm.transferRestorePts(MI, fmi);
1628 vrm.transferEmergencySpills(MI, fmi);
1630 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1631 mi2iMap_[fmi] = InstrIdx;
1632 MI = MBB.insert(MBB.erase(MI), fmi);
1639 /// canFoldMemoryOperand - Returns true if the specified load / store
1640 /// folding is possible.
1641 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1642 SmallVector<unsigned, 2> &Ops,
1644 // Filter the list of operand indexes that are to be folded. Abort if
1645 // any operand will prevent folding.
1646 unsigned MRInfo = 0;
1647 SmallVector<unsigned, 2> FoldOps;
1648 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1651 // It's only legal to remat for a use, not a def.
1652 if (ReMat && (MRInfo & VirtRegMap::isMod))
1655 return tii_->canFoldMemoryOperand(MI, FoldOps);
1658 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1659 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1660 for (LiveInterval::Ranges::const_iterator
1661 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1662 std::vector<IdxMBBPair>::const_iterator II =
1663 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1664 if (II == Idx2MBBMap.end())
1666 if (I->end > II->first) // crossing a MBB.
1668 MBBs.insert(II->second);
1669 if (MBBs.size() > 1)
1675 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1676 /// interval on to-be re-materialized operands of MI) with new register.
1677 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1678 MachineInstr *MI, unsigned NewVReg,
1680 // There is an implicit use. That means one of the other operand is
1681 // being remat'ed and the remat'ed instruction has li.reg as an
1682 // use operand. Make sure we rewrite that as well.
1683 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1684 MachineOperand &MO = MI->getOperand(i);
1687 unsigned Reg = MO.getReg();
1688 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1690 if (!vrm.isReMaterialized(Reg))
1692 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1693 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1695 UseMO->setReg(NewVReg);
1699 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1700 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1701 bool LiveIntervals::
1702 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1703 bool TrySplit, LiveIndex index, LiveIndex end,
1705 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1706 unsigned Slot, int LdSlot,
1707 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1709 const TargetRegisterClass* rc,
1710 SmallVector<int, 4> &ReMatIds,
1711 const MachineLoopInfo *loopInfo,
1712 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1713 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1714 std::vector<LiveInterval*> &NewLIs) {
1715 bool CanFold = false;
1717 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1718 MachineOperand& mop = MI->getOperand(i);
1721 unsigned Reg = mop.getReg();
1722 unsigned RegI = Reg;
1723 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1728 bool TryFold = !DefIsReMat;
1729 bool FoldSS = true; // Default behavior unless it's a remat.
1730 int FoldSlot = Slot;
1732 // If this is the rematerializable definition MI itself and
1733 // all of its uses are rematerialized, simply delete it.
1734 if (MI == ReMatOrigDefMI && CanDelete) {
1735 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1737 RemoveMachineInstrFromMaps(MI);
1738 vrm.RemoveMachineInstrFromMaps(MI);
1739 MI->eraseFromParent();
1743 // If def for this use can't be rematerialized, then try folding.
1744 // If def is rematerializable and it's a load, also try folding.
1745 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1747 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1753 // Scan all of the operands of this instruction rewriting operands
1754 // to use NewVReg instead of li.reg as appropriate. We do this for
1757 // 1. If the instr reads the same spilled vreg multiple times, we
1758 // want to reuse the NewVReg.
1759 // 2. If the instr is a two-addr instruction, we are required to
1760 // keep the src/dst regs pinned.
1762 // Keep track of whether we replace a use and/or def so that we can
1763 // create the spill interval with the appropriate range.
1765 HasUse = mop.isUse();
1766 HasDef = mop.isDef();
1767 SmallVector<unsigned, 2> Ops;
1769 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1770 const MachineOperand &MOj = MI->getOperand(j);
1773 unsigned RegJ = MOj.getReg();
1774 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1778 if (!MOj.isUndef()) {
1779 HasUse |= MOj.isUse();
1780 HasDef |= MOj.isDef();
1785 // Create a new virtual register for the spill interval.
1786 // Create the new register now so we can map the fold instruction
1787 // to the new register so when it is unfolded we get the correct
1789 bool CreatedNewVReg = false;
1791 NewVReg = mri_->createVirtualRegister(rc);
1793 CreatedNewVReg = true;
1799 // Do not fold load / store here if we are splitting. We'll find an
1800 // optimal point to insert a load / store later.
1802 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1803 Ops, FoldSS, FoldSlot, NewVReg)) {
1804 // Folding the load/store can completely change the instruction in
1805 // unpredictable ways, rescan it from the beginning.
1808 // We need to give the new vreg the same stack slot as the
1809 // spilled interval.
1810 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1816 if (isNotInMIMap(MI))
1818 goto RestartInstruction;
1821 // We'll try to fold it later if it's profitable.
1822 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1826 mop.setReg(NewVReg);
1827 if (mop.isImplicit())
1828 rewriteImplicitOps(li, MI, NewVReg, vrm);
1830 // Reuse NewVReg for other reads.
1831 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1832 MachineOperand &mopj = MI->getOperand(Ops[j]);
1833 mopj.setReg(NewVReg);
1834 if (mopj.isImplicit())
1835 rewriteImplicitOps(li, MI, NewVReg, vrm);
1838 if (CreatedNewVReg) {
1840 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1841 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1842 // Each valnum may have its own remat id.
1843 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1845 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1847 if (!CanDelete || (HasUse && HasDef)) {
1848 // If this is a two-addr instruction then its use operands are
1849 // rematerializable but its def is not. It should be assigned a
1851 vrm.assignVirt2StackSlot(NewVReg, Slot);
1854 vrm.assignVirt2StackSlot(NewVReg, Slot);
1856 } else if (HasUse && HasDef &&
1857 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1858 // If this interval hasn't been assigned a stack slot (because earlier
1859 // def is a deleted remat def), do it now.
1860 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1861 vrm.assignVirt2StackSlot(NewVReg, Slot);
1864 // Re-matting an instruction with virtual register use. Add the
1865 // register as an implicit use on the use MI.
1866 if (DefIsReMat && ImpUse)
1867 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1869 // Create a new register interval for this spill / remat.
1870 LiveInterval &nI = getOrCreateInterval(NewVReg);
1871 if (CreatedNewVReg) {
1872 NewLIs.push_back(&nI);
1873 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1875 vrm.setIsSplitFromReg(NewVReg, li.reg);
1879 if (CreatedNewVReg) {
1880 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1881 nI.getNextValue(LiveIndex(), 0, false,
1883 DEBUG(errs() << " +" << LR);
1886 // Extend the split live interval to this def / use.
1887 LiveIndex End = getNextSlot(getUseIndex(index));
1888 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1889 nI.getValNumInfo(nI.getNumValNums()-1));
1890 DEBUG(errs() << " +" << LR);
1895 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1896 nI.getNextValue(LiveIndex(), 0, false,
1898 DEBUG(errs() << " +" << LR);
1903 errs() << "\t\t\t\tAdded new interval: ";
1904 nI.print(errs(), tri_);
1910 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1912 MachineBasicBlock *MBB,
1913 LiveIndex Idx) const {
1914 LiveIndex End = getMBBEndIdx(MBB);
1915 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1916 if (VNI->kills[j].isPHIIndex())
1919 LiveIndex KillIdx = VNI->kills[j];
1920 if (KillIdx > Idx && KillIdx < End)
1926 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1927 /// during spilling.
1929 struct RewriteInfo {
1934 RewriteInfo(LiveIndex i, MachineInstr *mi, bool u, bool d)
1935 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1938 struct RewriteInfoCompare {
1939 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1940 return LHS.Index < RHS.Index;
1945 void LiveIntervals::
1946 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1947 LiveInterval::Ranges::const_iterator &I,
1948 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1949 unsigned Slot, int LdSlot,
1950 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1952 const TargetRegisterClass* rc,
1953 SmallVector<int, 4> &ReMatIds,
1954 const MachineLoopInfo *loopInfo,
1955 BitVector &SpillMBBs,
1956 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1957 BitVector &RestoreMBBs,
1958 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1959 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1960 std::vector<LiveInterval*> &NewLIs) {
1961 bool AllCanFold = true;
1962 unsigned NewVReg = 0;
1963 LiveIndex start = getBaseIndex(I->start);
1964 LiveIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1966 // First collect all the def / use in this live range that will be rewritten.
1967 // Make sure they are sorted according to instruction index.
1968 std::vector<RewriteInfo> RewriteMIs;
1969 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1970 re = mri_->reg_end(); ri != re; ) {
1971 MachineInstr *MI = &*ri;
1972 MachineOperand &O = ri.getOperand();
1974 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1975 LiveIndex index = getInstructionIndex(MI);
1976 if (index < start || index >= end)
1980 // Must be defined by an implicit def. It should not be spilled. Note,
1981 // this is for correctness reason. e.g.
1982 // 8 %reg1024<def> = IMPLICIT_DEF
1983 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1984 // The live range [12, 14) are not part of the r1024 live interval since
1985 // it's defined by an implicit def. It will not conflicts with live
1986 // interval of r1025. Now suppose both registers are spilled, you can
1987 // easily see a situation where both registers are reloaded before
1988 // the INSERT_SUBREG and both target registers that would overlap.
1990 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1992 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1994 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1995 // Now rewrite the defs and uses.
1996 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1997 RewriteInfo &rwi = RewriteMIs[i];
1999 LiveIndex index = rwi.Index;
2000 bool MIHasUse = rwi.HasUse;
2001 bool MIHasDef = rwi.HasDef;
2002 MachineInstr *MI = rwi.MI;
2003 // If MI def and/or use the same register multiple times, then there
2004 // are multiple entries.
2005 unsigned NumUses = MIHasUse;
2006 while (i != e && RewriteMIs[i].MI == MI) {
2007 assert(RewriteMIs[i].Index == index);
2008 bool isUse = RewriteMIs[i].HasUse;
2009 if (isUse) ++NumUses;
2011 MIHasDef |= RewriteMIs[i].HasDef;
2014 MachineBasicBlock *MBB = MI->getParent();
2016 if (ImpUse && MI != ReMatDefMI) {
2017 // Re-matting an instruction with virtual register use. Update the
2018 // register interval's spill weight to HUGE_VALF to prevent it from
2020 LiveInterval &ImpLi = getInterval(ImpUse);
2021 ImpLi.weight = HUGE_VALF;
2024 unsigned MBBId = MBB->getNumber();
2025 unsigned ThisVReg = 0;
2027 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2028 if (NVI != MBBVRegsMap.end()) {
2029 ThisVReg = NVI->second;
2036 // It's better to start a new interval to avoid artifically
2037 // extend the new interval.
2038 if (MIHasDef && !MIHasUse) {
2039 MBBVRegsMap.erase(MBB->getNumber());
2045 bool IsNew = ThisVReg == 0;
2047 // This ends the previous live interval. If all of its def / use
2048 // can be folded, give it a low spill weight.
2049 if (NewVReg && TrySplit && AllCanFold) {
2050 LiveInterval &nI = getOrCreateInterval(NewVReg);
2057 bool HasDef = false;
2058 bool HasUse = false;
2059 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2060 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2061 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2062 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2063 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2064 if (!HasDef && !HasUse)
2067 AllCanFold &= CanFold;
2069 // Update weight of spill interval.
2070 LiveInterval &nI = getOrCreateInterval(NewVReg);
2072 // The spill weight is now infinity as it cannot be spilled again.
2073 nI.weight = HUGE_VALF;
2077 // Keep track of the last def and first use in each MBB.
2079 if (MI != ReMatOrigDefMI || !CanDelete) {
2080 bool HasKill = false;
2082 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2084 // If this is a two-address code, then this index starts a new VNInfo.
2085 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2087 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2089 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2090 SpillIdxes.find(MBBId);
2092 if (SII == SpillIdxes.end()) {
2093 std::vector<SRInfo> S;
2094 S.push_back(SRInfo(index, NewVReg, true));
2095 SpillIdxes.insert(std::make_pair(MBBId, S));
2096 } else if (SII->second.back().vreg != NewVReg) {
2097 SII->second.push_back(SRInfo(index, NewVReg, true));
2098 } else if (index > SII->second.back().index) {
2099 // If there is an earlier def and this is a two-address
2100 // instruction, then it's not possible to fold the store (which
2101 // would also fold the load).
2102 SRInfo &Info = SII->second.back();
2104 Info.canFold = !HasUse;
2106 SpillMBBs.set(MBBId);
2107 } else if (SII != SpillIdxes.end() &&
2108 SII->second.back().vreg == NewVReg &&
2109 index > SII->second.back().index) {
2110 // There is an earlier def that's not killed (must be two-address).
2111 // The spill is no longer needed.
2112 SII->second.pop_back();
2113 if (SII->second.empty()) {
2114 SpillIdxes.erase(MBBId);
2115 SpillMBBs.reset(MBBId);
2122 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2123 SpillIdxes.find(MBBId);
2124 if (SII != SpillIdxes.end() &&
2125 SII->second.back().vreg == NewVReg &&
2126 index > SII->second.back().index)
2127 // Use(s) following the last def, it's not safe to fold the spill.
2128 SII->second.back().canFold = false;
2129 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2130 RestoreIdxes.find(MBBId);
2131 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2132 // If we are splitting live intervals, only fold if it's the first
2133 // use and there isn't another use later in the MBB.
2134 RII->second.back().canFold = false;
2136 // Only need a reload if there isn't an earlier def / use.
2137 if (RII == RestoreIdxes.end()) {
2138 std::vector<SRInfo> Infos;
2139 Infos.push_back(SRInfo(index, NewVReg, true));
2140 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2142 RII->second.push_back(SRInfo(index, NewVReg, true));
2144 RestoreMBBs.set(MBBId);
2148 // Update spill weight.
2149 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2150 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2153 if (NewVReg && TrySplit && AllCanFold) {
2154 // If all of its def / use can be folded, give it a low spill weight.
2155 LiveInterval &nI = getOrCreateInterval(NewVReg);
2160 bool LiveIntervals::alsoFoldARestore(int Id, LiveIndex index,
2161 unsigned vr, BitVector &RestoreMBBs,
2162 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2163 if (!RestoreMBBs[Id])
2165 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2166 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2167 if (Restores[i].index == index &&
2168 Restores[i].vreg == vr &&
2169 Restores[i].canFold)
2174 void LiveIntervals::eraseRestoreInfo(int Id, LiveIndex index,
2175 unsigned vr, BitVector &RestoreMBBs,
2176 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2177 if (!RestoreMBBs[Id])
2179 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2180 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2181 if (Restores[i].index == index && Restores[i].vreg)
2182 Restores[i].index = LiveIndex();
2185 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2186 /// spilled and create empty intervals for their uses.
2188 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2189 const TargetRegisterClass* rc,
2190 std::vector<LiveInterval*> &NewLIs) {
2191 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2192 re = mri_->reg_end(); ri != re; ) {
2193 MachineOperand &O = ri.getOperand();
2194 MachineInstr *MI = &*ri;
2197 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2198 "Register def was not rewritten?");
2199 RemoveMachineInstrFromMaps(MI);
2200 vrm.RemoveMachineInstrFromMaps(MI);
2201 MI->eraseFromParent();
2203 // This must be an use of an implicit_def so it's not part of the live
2204 // interval. Create a new empty live interval for it.
2205 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2206 unsigned NewVReg = mri_->createVirtualRegister(rc);
2208 vrm.setIsImplicitlyDefined(NewVReg);
2209 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2210 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2211 MachineOperand &MO = MI->getOperand(i);
2212 if (MO.isReg() && MO.getReg() == li.reg) {
2221 std::vector<LiveInterval*> LiveIntervals::
2222 addIntervalsForSpillsFast(const LiveInterval &li,
2223 const MachineLoopInfo *loopInfo,
2225 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2227 std::vector<LiveInterval*> added;
2229 assert(li.weight != HUGE_VALF &&
2230 "attempt to spill already spilled interval!");
2233 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2238 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2240 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2241 while (RI != mri_->reg_end()) {
2242 MachineInstr* MI = &*RI;
2244 SmallVector<unsigned, 2> Indices;
2245 bool HasUse = false;
2246 bool HasDef = false;
2248 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2249 MachineOperand& mop = MI->getOperand(i);
2250 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2252 HasUse |= MI->getOperand(i).isUse();
2253 HasDef |= MI->getOperand(i).isDef();
2255 Indices.push_back(i);
2258 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2259 Indices, true, slot, li.reg)) {
2260 unsigned NewVReg = mri_->createVirtualRegister(rc);
2262 vrm.assignVirt2StackSlot(NewVReg, slot);
2264 // create a new register for this spill
2265 LiveInterval &nI = getOrCreateInterval(NewVReg);
2267 // the spill weight is now infinity as it
2268 // cannot be spilled again
2269 nI.weight = HUGE_VALF;
2271 // Rewrite register operands to use the new vreg.
2272 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2273 E = Indices.end(); I != E; ++I) {
2274 MI->getOperand(*I).setReg(NewVReg);
2276 if (MI->getOperand(*I).isUse())
2277 MI->getOperand(*I).setIsKill(true);
2280 // Fill in the new live interval.
2281 LiveIndex index = getInstructionIndex(MI);
2283 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2284 nI.getNextValue(LiveIndex(), 0, false,
2285 getVNInfoAllocator()));
2286 DEBUG(errs() << " +" << LR);
2288 vrm.addRestorePoint(NewVReg, MI);
2291 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2292 nI.getNextValue(LiveIndex(), 0, false,
2293 getVNInfoAllocator()));
2294 DEBUG(errs() << " +" << LR);
2296 vrm.addSpillPoint(NewVReg, true, MI);
2299 added.push_back(&nI);
2302 errs() << "\t\t\t\tadded new interval: ";
2309 RI = mri_->reg_begin(li.reg);
2315 std::vector<LiveInterval*> LiveIntervals::
2316 addIntervalsForSpills(const LiveInterval &li,
2317 SmallVectorImpl<LiveInterval*> &SpillIs,
2318 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2320 if (EnableFastSpilling)
2321 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2323 assert(li.weight != HUGE_VALF &&
2324 "attempt to spill already spilled interval!");
2327 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2328 li.print(errs(), tri_);
2332 // Each bit specify whether a spill is required in the MBB.
2333 BitVector SpillMBBs(mf_->getNumBlockIDs());
2334 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2335 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2336 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2337 DenseMap<unsigned,unsigned> MBBVRegsMap;
2338 std::vector<LiveInterval*> NewLIs;
2339 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2341 unsigned NumValNums = li.getNumValNums();
2342 SmallVector<MachineInstr*, 4> ReMatDefs;
2343 ReMatDefs.resize(NumValNums, NULL);
2344 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2345 ReMatOrigDefs.resize(NumValNums, NULL);
2346 SmallVector<int, 4> ReMatIds;
2347 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2348 BitVector ReMatDelete(NumValNums);
2349 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2351 // Spilling a split live interval. It cannot be split any further. Also,
2352 // it's also guaranteed to be a single val# / range interval.
2353 if (vrm.getPreSplitReg(li.reg)) {
2354 vrm.setIsSplitFromReg(li.reg, 0);
2355 // Unset the split kill marker on the last use.
2356 LiveIndex KillIdx = vrm.getKillPoint(li.reg);
2357 if (KillIdx != LiveIndex()) {
2358 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2359 assert(KillMI && "Last use disappeared?");
2360 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2361 assert(KillOp != -1 && "Last use disappeared?");
2362 KillMI->getOperand(KillOp).setIsKill(false);
2364 vrm.removeKillPoint(li.reg);
2365 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2366 Slot = vrm.getStackSlot(li.reg);
2367 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2368 MachineInstr *ReMatDefMI = DefIsReMat ?
2369 vrm.getReMaterializedMI(li.reg) : NULL;
2371 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2372 bool isLoad = isLoadSS ||
2373 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2374 bool IsFirstRange = true;
2375 for (LiveInterval::Ranges::const_iterator
2376 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2377 // If this is a split live interval with multiple ranges, it means there
2378 // are two-address instructions that re-defined the value. Only the
2379 // first def can be rematerialized!
2381 // Note ReMatOrigDefMI has already been deleted.
2382 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2383 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2384 false, vrm, rc, ReMatIds, loopInfo,
2385 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2386 MBBVRegsMap, NewLIs);
2388 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2389 Slot, 0, false, false, false,
2390 false, vrm, rc, ReMatIds, loopInfo,
2391 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2392 MBBVRegsMap, NewLIs);
2394 IsFirstRange = false;
2397 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2401 bool TrySplit = !intervalIsInOneMBB(li);
2404 bool NeedStackSlot = false;
2405 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2407 const VNInfo *VNI = *i;
2408 unsigned VN = VNI->id;
2409 if (VNI->isUnused())
2410 continue; // Dead val#.
2411 // Is the def for the val# rematerializable?
2412 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2413 ? getInstructionFromIndex(VNI->def) : 0;
2415 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2416 // Remember how to remat the def of this val#.
2417 ReMatOrigDefs[VN] = ReMatDefMI;
2418 // Original def may be modified so we have to make a copy here.
2419 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2420 CloneMIs.push_back(Clone);
2421 ReMatDefs[VN] = Clone;
2423 bool CanDelete = true;
2424 if (VNI->hasPHIKill()) {
2425 // A kill is a phi node, not all of its uses can be rematerialized.
2426 // It must not be deleted.
2428 // Need a stack slot if there is any live range where uses cannot be
2430 NeedStackSlot = true;
2433 ReMatDelete.set(VN);
2435 // Need a stack slot if there is any live range where uses cannot be
2437 NeedStackSlot = true;
2441 // One stack slot per live interval.
2442 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2443 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2444 Slot = vrm.assignVirt2StackSlot(li.reg);
2446 // This case only occurs when the prealloc splitter has already assigned
2447 // a stack slot to this vreg.
2449 Slot = vrm.getStackSlot(li.reg);
2452 // Create new intervals and rewrite defs and uses.
2453 for (LiveInterval::Ranges::const_iterator
2454 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2455 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2456 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2457 bool DefIsReMat = ReMatDefMI != NULL;
2458 bool CanDelete = ReMatDelete[I->valno->id];
2460 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2461 bool isLoad = isLoadSS ||
2462 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2463 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2464 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2465 CanDelete, vrm, rc, ReMatIds, loopInfo,
2466 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2467 MBBVRegsMap, NewLIs);
2470 // Insert spills / restores if we are splitting.
2472 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2476 SmallPtrSet<LiveInterval*, 4> AddedKill;
2477 SmallVector<unsigned, 2> Ops;
2478 if (NeedStackSlot) {
2479 int Id = SpillMBBs.find_first();
2481 std::vector<SRInfo> &spills = SpillIdxes[Id];
2482 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2483 LiveIndex index = spills[i].index;
2484 unsigned VReg = spills[i].vreg;
2485 LiveInterval &nI = getOrCreateInterval(VReg);
2486 bool isReMat = vrm.isReMaterialized(VReg);
2487 MachineInstr *MI = getInstructionFromIndex(index);
2488 bool CanFold = false;
2489 bool FoundUse = false;
2491 if (spills[i].canFold) {
2493 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2494 MachineOperand &MO = MI->getOperand(j);
2495 if (!MO.isReg() || MO.getReg() != VReg)
2502 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2503 RestoreMBBs, RestoreIdxes))) {
2504 // MI has two-address uses of the same register. If the use
2505 // isn't the first and only use in the BB, then we can't fold
2506 // it. FIXME: Move this to rewriteInstructionsForSpills.
2513 // Fold the store into the def if possible.
2514 bool Folded = false;
2515 if (CanFold && !Ops.empty()) {
2516 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2519 // Also folded uses, do not issue a load.
2520 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2521 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2523 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2527 // Otherwise tell the spiller to issue a spill.
2529 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2530 bool isKill = LR->end == getStoreIndex(index);
2531 if (!MI->registerDefIsDead(nI.reg))
2532 // No need to spill a dead def.
2533 vrm.addSpillPoint(VReg, isKill, MI);
2535 AddedKill.insert(&nI);
2538 Id = SpillMBBs.find_next(Id);
2542 int Id = RestoreMBBs.find_first();
2544 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2545 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2546 LiveIndex index = restores[i].index;
2547 if (index == LiveIndex())
2549 unsigned VReg = restores[i].vreg;
2550 LiveInterval &nI = getOrCreateInterval(VReg);
2551 bool isReMat = vrm.isReMaterialized(VReg);
2552 MachineInstr *MI = getInstructionFromIndex(index);
2553 bool CanFold = false;
2555 if (restores[i].canFold) {
2557 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2558 MachineOperand &MO = MI->getOperand(j);
2559 if (!MO.isReg() || MO.getReg() != VReg)
2563 // If this restore were to be folded, it would have been folded
2572 // Fold the load into the use if possible.
2573 bool Folded = false;
2574 if (CanFold && !Ops.empty()) {
2576 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2578 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2580 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2581 // If the rematerializable def is a load, also try to fold it.
2582 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2583 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2584 Ops, isLoadSS, LdSlot, VReg);
2586 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2588 // Re-matting an instruction with virtual register use. Add the
2589 // register as an implicit use on the use MI and update the register
2590 // interval's spill weight to HUGE_VALF to prevent it from being
2592 LiveInterval &ImpLi = getInterval(ImpUse);
2593 ImpLi.weight = HUGE_VALF;
2594 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2599 // If folding is not possible / failed, then tell the spiller to issue a
2600 // load / rematerialization for us.
2602 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2604 vrm.addRestorePoint(VReg, MI);
2606 Id = RestoreMBBs.find_next(Id);
2609 // Finalize intervals: add kills, finalize spill weights, and filter out
2611 std::vector<LiveInterval*> RetNewLIs;
2612 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2613 LiveInterval *LI = NewLIs[i];
2615 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2616 if (!AddedKill.count(LI)) {
2617 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2618 LiveIndex LastUseIdx = getBaseIndex(LR->end);
2619 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2620 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2621 assert(UseIdx != -1);
2622 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2623 LastUse->getOperand(UseIdx).setIsKill();
2624 vrm.addKillPoint(LI->reg, LastUseIdx);
2627 RetNewLIs.push_back(LI);
2631 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2635 /// hasAllocatableSuperReg - Return true if the specified physical register has
2636 /// any super register that's allocatable.
2637 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2638 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2639 if (allocatableRegs_[*AS] && hasInterval(*AS))
2644 /// getRepresentativeReg - Find the largest super register of the specified
2645 /// physical register.
2646 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2647 // Find the largest super-register that is allocatable.
2648 unsigned BestReg = Reg;
2649 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2650 unsigned SuperReg = *AS;
2651 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2659 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2660 /// specified interval that conflicts with the specified physical register.
2661 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2662 unsigned PhysReg) const {
2663 unsigned NumConflicts = 0;
2664 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2665 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2666 E = mri_->reg_end(); I != E; ++I) {
2667 MachineOperand &O = I.getOperand();
2668 MachineInstr *MI = O.getParent();
2669 LiveIndex Index = getInstructionIndex(MI);
2670 if (pli.liveAt(Index))
2673 return NumConflicts;
2676 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2677 /// around all defs and uses of the specified interval. Return true if it
2678 /// was able to cut its interval.
2679 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2680 unsigned PhysReg, VirtRegMap &vrm) {
2681 unsigned SpillReg = getRepresentativeReg(PhysReg);
2683 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2684 // If there are registers which alias PhysReg, but which are not a
2685 // sub-register of the chosen representative super register. Assert
2686 // since we can't handle it yet.
2687 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2688 tri_->isSuperRegister(*AS, SpillReg));
2691 LiveInterval &pli = getInterval(SpillReg);
2692 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2693 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2694 E = mri_->reg_end(); I != E; ++I) {
2695 MachineOperand &O = I.getOperand();
2696 MachineInstr *MI = O.getParent();
2697 if (SeenMIs.count(MI))
2700 LiveIndex Index = getInstructionIndex(MI);
2701 if (pli.liveAt(Index)) {
2702 vrm.addEmergencySpill(SpillReg, MI);
2703 LiveIndex StartIdx = getLoadIndex(Index);
2704 LiveIndex EndIdx = getNextSlot(getStoreIndex(Index));
2705 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2706 pli.removeRange(StartIdx, EndIdx);
2710 raw_string_ostream Msg(msg);
2711 Msg << "Ran out of registers during register allocation!";
2712 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2713 Msg << "\nPlease check your inline asm statement for invalid "
2714 << "constraints:\n";
2715 MI->print(Msg, tm_);
2717 llvm_report_error(Msg.str());
2719 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2720 if (!hasInterval(*AS))
2722 LiveInterval &spli = getInterval(*AS);
2723 if (spli.liveAt(Index))
2724 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2731 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2732 MachineInstr* startInst) {
2733 LiveInterval& Interval = getOrCreateInterval(reg);
2734 VNInfo* VN = Interval.getNextValue(
2735 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2736 startInst, true, getVNInfoAllocator());
2737 VN->setHasPHIKill(true);
2738 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2740 LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2741 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2742 Interval.addRange(LR);