1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.addRequired<AliasAnalysis>();
71 AU.addPreserved<AliasAnalysis>();
72 AU.addPreserved<LiveVariables>();
73 AU.addRequired<LiveVariables>();
74 AU.addPreservedID(MachineLoopInfoID);
75 AU.addPreservedID(MachineDominatorsID);
78 AU.addPreservedID(PHIEliminationID);
79 AU.addRequiredID(PHIEliminationID);
82 AU.addRequiredID(TwoAddressInstructionPassID);
83 MachineFunctionPass::getAnalysisUsage(AU);
86 void LiveIntervals::releaseMemory() {
87 // Free the live intervals themselves.
88 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
89 E = r2iMap_.end(); I != E; ++I)
97 terminatorGaps.clear();
99 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
100 VNInfoAllocator.Reset();
101 while (!ClonedMIs.empty()) {
102 MachineInstr *MI = ClonedMIs.back();
103 ClonedMIs.pop_back();
104 mf_->DeleteMachineInstr(MI);
108 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
109 /// there is one implicit_def for each use. Add isUndef marker to
110 /// implicit_def defs and their uses.
111 void LiveIntervals::processImplicitDefs() {
112 SmallSet<unsigned, 8> ImpDefRegs;
113 SmallVector<MachineInstr*, 8> ImpDefMIs;
114 MachineBasicBlock *Entry = mf_->begin();
115 SmallPtrSet<MachineBasicBlock*,16> Visited;
116 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
117 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
119 MachineBasicBlock *MBB = *DFI;
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 MachineInstr *MI = &*I;
124 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
125 unsigned Reg = MI->getOperand(0).getReg();
126 ImpDefRegs.insert(Reg);
127 ImpDefMIs.push_back(MI);
131 bool ChangedToImpDef = false;
132 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
133 MachineOperand& MO = MI->getOperand(i);
134 if (!MO.isReg() || !MO.isUse())
136 unsigned Reg = MO.getReg();
139 if (!ImpDefRegs.count(Reg))
141 // Use is a copy, just turn it into an implicit_def.
142 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
143 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
145 bool isKill = MO.isKill();
146 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
147 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
148 MI->RemoveOperand(j);
150 ImpDefRegs.erase(Reg);
151 ChangedToImpDef = true;
156 if (MO.isKill() || MI->isRegTiedToDefOperand(i))
157 ImpDefRegs.erase(Reg);
160 if (ChangedToImpDef) {
161 // Backtrack to process this new implicit_def.
164 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
165 MachineOperand& MO = MI->getOperand(i);
166 if (!MO.isReg() || !MO.isDef())
168 ImpDefRegs.erase(MO.getReg());
173 // Any outstanding liveout implicit_def's?
174 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
175 MachineInstr *MI = ImpDefMIs[i];
176 unsigned Reg = MI->getOperand(0).getReg();
177 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
178 !ImpDefRegs.count(Reg)) {
179 // Delete all "local" implicit_def's. That include those which define
180 // physical registers since they cannot be liveout.
181 MI->eraseFromParent();
185 // If there are multiple defs of the same register and at least one
186 // is not an implicit_def, do not insert implicit_def's before the
189 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
190 DE = mri_->def_end(); DI != DE; ++DI) {
191 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
199 // The only implicit_def which we want to keep are those that are live
201 MI->eraseFromParent();
203 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
204 UE = mri_->use_end(); UI != UE; ) {
205 MachineOperand &RMO = UI.getOperand();
206 MachineInstr *RMI = &*UI;
208 MachineBasicBlock *RMBB = RMI->getParent();
212 // Turn a copy use into an implicit_def.
213 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
214 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
216 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
217 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
218 RMI->RemoveOperand(j);
222 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
223 unsigned NewVReg = mri_->createVirtualRegister(RC);
234 void LiveIntervals::computeNumbering() {
235 Index2MiMap OldI2MI = i2miMap_;
236 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
242 terminatorGaps.clear();
246 // Number MachineInstrs and MachineBasicBlocks.
247 // Initialize MBB indexes to a sentinal.
248 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
250 unsigned MIIndex = 0;
251 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
253 unsigned StartIdx = MIIndex;
255 // Insert an empty slot at the beginning of each block.
256 MIIndex += InstrSlots::NUM;
257 i2miMap_.push_back(0);
259 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
262 if (I == MBB->getFirstTerminator()) {
263 // Leave a gap for before terminators, this is where we will point
266 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
268 "Multiple 'first' terminators encountered during numbering.");
269 inserted = inserted; // Avoid compiler warning if assertions turned off.
270 i2miMap_.push_back(0);
272 MIIndex += InstrSlots::NUM;
275 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
276 assert(inserted && "multiple MachineInstr -> index mappings");
278 i2miMap_.push_back(I);
279 MIIndex += InstrSlots::NUM;
282 // Insert max(1, numdefs) empty slots after every instruction.
283 unsigned Slots = I->getDesc().getNumDefs();
286 MIIndex += InstrSlots::NUM * Slots;
288 i2miMap_.push_back(0);
291 if (MBB->getFirstTerminator() == MBB->end()) {
292 // Leave a gap for before terminators, this is where we will point
295 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
297 "Multiple 'first' terminators encountered during numbering.");
298 inserted = inserted; // Avoid compiler warning if assertions turned off.
299 i2miMap_.push_back(0);
301 MIIndex += InstrSlots::NUM;
304 // Set the MBB2IdxMap entry for this MBB.
305 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
306 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
309 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
311 if (!OldI2MI.empty())
312 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
313 for (LiveInterval::iterator LI = OI->second->begin(),
314 LE = OI->second->end(); LI != LE; ++LI) {
316 // Remap the start index of the live range to the corresponding new
317 // number, or our best guess at what it _should_ correspond to if the
318 // original instruction has been erased. This is either the following
319 // instruction or its predecessor.
320 unsigned index = LI->start / InstrSlots::NUM;
321 unsigned offset = LI->start % InstrSlots::NUM;
322 if (offset == InstrSlots::LOAD) {
323 std::vector<IdxMBBPair>::const_iterator I =
324 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
325 // Take the pair containing the index
326 std::vector<IdxMBBPair>::const_iterator J =
327 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
329 LI->start = getMBBStartIdx(J->second);
331 LI->start = mi2iMap_[OldI2MI[index]] + offset;
334 // Remap the ending index in the same way that we remapped the start,
335 // except for the final step where we always map to the immediately
336 // following instruction.
337 index = (LI->end - 1) / InstrSlots::NUM;
338 offset = LI->end % InstrSlots::NUM;
339 if (offset == InstrSlots::LOAD) {
340 // VReg dies at end of block.
341 std::vector<IdxMBBPair>::const_iterator I =
342 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
345 LI->end = getMBBEndIdx(I->second) + 1;
347 unsigned idx = index;
348 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
350 if (index != OldI2MI.size())
351 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
353 LI->end = InstrSlots::NUM * i2miMap_.size();
357 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
358 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
361 // Remap the VNInfo def index, which works the same as the
362 // start indices above. VN's with special sentinel defs
363 // don't need to be remapped.
364 if (vni->isDefAccurate() && !vni->isUnused()) {
365 unsigned index = vni->def / InstrSlots::NUM;
366 unsigned offset = vni->def % InstrSlots::NUM;
367 if (offset == InstrSlots::LOAD) {
368 std::vector<IdxMBBPair>::const_iterator I =
369 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
370 // Take the pair containing the index
371 std::vector<IdxMBBPair>::const_iterator J =
372 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
374 vni->def = getMBBStartIdx(J->second);
376 vni->def = mi2iMap_[OldI2MI[index]] + offset;
380 // Remap the VNInfo kill indices, which works the same as
381 // the end indices above.
382 for (size_t i = 0; i < vni->kills.size(); ++i) {
383 unsigned killIdx = vni->kills[i].killIdx;
385 unsigned index = (killIdx - 1) / InstrSlots::NUM;
386 unsigned offset = killIdx % InstrSlots::NUM;
388 if (offset == InstrSlots::LOAD) {
389 assert("Value killed at a load slot.");
390 /*std::vector<IdxMBBPair>::const_iterator I =
391 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
394 vni->kills[i] = getMBBEndIdx(I->second);*/
396 if (vni->kills[i].isPHIKill) {
397 std::vector<IdxMBBPair>::const_iterator I =
398 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
400 vni->kills[i].killIdx = terminatorGaps[I->second];
402 assert(OldI2MI[index] != 0 &&
403 "Kill refers to instruction not present in index maps.");
404 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
408 unsigned idx = index;
409 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
411 if (index != OldI2MI.size())
412 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
413 (idx == index ? offset : 0);
415 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
423 void LiveIntervals::scaleNumbering(int factor) {
425 // * scale MBB begin and end points
426 // * scale all ranges.
427 // * Update VNI structures.
428 // * Scale instruction numberings
430 // Scale the MBB indices.
432 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
433 MBB != MBBE; ++MBB) {
434 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
435 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
436 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
437 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
439 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
441 // Scale terminator gaps.
442 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
443 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
445 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
448 // Scale the intervals.
449 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
450 LI->second->scaleNumbering(factor);
453 // Scale MachineInstrs.
454 Mi2IndexMap oldmi2iMap = mi2iMap_;
455 unsigned highestSlot = 0;
456 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
458 unsigned newSlot = InstrSlots::scale(MI->second, factor);
459 mi2iMap_[MI->first] = newSlot;
460 highestSlot = std::max(highestSlot, newSlot);
464 i2miMap_.resize(highestSlot + 1);
465 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
467 i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
473 /// runOnMachineFunction - Register allocate the whole function
475 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
477 mri_ = &mf_->getRegInfo();
478 tm_ = &fn.getTarget();
479 tri_ = tm_->getRegisterInfo();
480 tii_ = tm_->getInstrInfo();
481 aa_ = &getAnalysis<AliasAnalysis>();
482 lv_ = &getAnalysis<LiveVariables>();
483 allocatableRegs_ = tri_->getAllocatableSet(fn);
485 processImplicitDefs();
489 numIntervals += getNumIntervals();
495 /// print - Implement the dump method.
496 void LiveIntervals::print(std::ostream &O, const Module* ) const {
497 O << "********** INTERVALS **********\n";
498 for (const_iterator I = begin(), E = end(); I != E; ++I) {
499 I->second->print(O, tri_);
503 O << "********** MACHINEINSTRS **********\n";
504 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
505 mbbi != mbbe; ++mbbi) {
506 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
507 for (MachineBasicBlock::iterator mii = mbbi->begin(),
508 mie = mbbi->end(); mii != mie; ++mii) {
509 O << getInstructionIndex(mii) << '\t' << *mii;
514 /// conflictsWithPhysRegDef - Returns true if the specified register
515 /// is defined during the duration of the specified interval.
516 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
517 VirtRegMap &vrm, unsigned reg) {
518 for (LiveInterval::Ranges::const_iterator
519 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
520 for (unsigned index = getBaseIndex(I->start),
521 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
522 index += InstrSlots::NUM) {
523 // skip deleted instructions
524 while (index != end && !getInstructionFromIndex(index))
525 index += InstrSlots::NUM;
526 if (index == end) break;
528 MachineInstr *MI = getInstructionFromIndex(index);
529 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
530 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
531 if (SrcReg == li.reg || DstReg == li.reg)
533 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
534 MachineOperand& mop = MI->getOperand(i);
537 unsigned PhysReg = mop.getReg();
538 if (PhysReg == 0 || PhysReg == li.reg)
540 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
541 if (!vrm.hasPhys(PhysReg))
543 PhysReg = vrm.getPhys(PhysReg);
545 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
554 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
555 /// it can check use as well.
556 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
557 unsigned Reg, bool CheckUse,
558 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
559 for (LiveInterval::Ranges::const_iterator
560 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
561 for (unsigned index = getBaseIndex(I->start),
562 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
563 index += InstrSlots::NUM) {
564 // Skip deleted instructions.
565 MachineInstr *MI = 0;
566 while (index != end) {
567 MI = getInstructionFromIndex(index);
570 index += InstrSlots::NUM;
572 if (index == end) break;
574 if (JoinedCopies.count(MI))
576 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
577 MachineOperand& MO = MI->getOperand(i);
580 if (MO.isUse() && !CheckUse)
582 unsigned PhysReg = MO.getReg();
583 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
585 if (tri_->isSubRegister(Reg, PhysReg))
595 void LiveIntervals::printRegName(unsigned reg) const {
596 if (TargetRegisterInfo::isPhysicalRegister(reg))
597 cerr << tri_->getName(reg);
599 cerr << "%reg" << reg;
602 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
603 MachineBasicBlock::iterator mi,
604 unsigned MIIdx, MachineOperand& MO,
606 LiveInterval &interval) {
607 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
609 // Virtual registers may be defined multiple times (due to phi
610 // elimination and 2-addr elimination). Much of what we do only has to be
611 // done once for the vreg. We use an empty interval to detect the first
612 // time we see a vreg.
613 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
614 if (interval.empty()) {
615 // Get the Idx of the defining instructions.
616 unsigned defIndex = getDefIndex(MIIdx);
617 // Earlyclobbers move back one.
618 if (MO.isEarlyClobber())
619 defIndex = getUseIndex(MIIdx);
621 MachineInstr *CopyMI = NULL;
622 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
623 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
624 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
625 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
626 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
628 // Earlyclobbers move back one.
629 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
631 assert(ValNo->id == 0 && "First value in interval is not 0?");
633 // Loop over all of the blocks that the vreg is defined in. There are
634 // two cases we have to handle here. The most common case is a vreg
635 // whose lifetime is contained within a basic block. In this case there
636 // will be a single kill, in MBB, which comes after the definition.
637 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
638 // FIXME: what about dead vars?
640 if (vi.Kills[0] != mi)
641 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
643 killIdx = defIndex+1;
645 // If the kill happens after the definition, we have an intra-block
647 if (killIdx > defIndex) {
648 assert(vi.AliveBlocks.empty() &&
649 "Shouldn't be alive across any blocks!");
650 LiveRange LR(defIndex, killIdx, ValNo);
651 interval.addRange(LR);
652 DOUT << " +" << LR << "\n";
653 interval.addKill(ValNo, killIdx, false);
658 // The other case we handle is when a virtual register lives to the end
659 // of the defining block, potentially live across some blocks, then is
660 // live into some number of blocks, but gets killed. Start by adding a
661 // range that goes from this definition to the end of the defining block.
662 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
663 DOUT << " +" << NewLR;
664 interval.addRange(NewLR);
666 // Iterate over all of the blocks that the variable is completely
667 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
669 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
670 E = vi.AliveBlocks.end(); I != E; ++I) {
671 LiveRange LR(getMBBStartIdx(*I),
672 getMBBEndIdx(*I)+1, // MBB ends at -1.
674 interval.addRange(LR);
678 // Finally, this virtual register is live from the start of any killing
679 // block to the 'use' slot of the killing instruction.
680 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
681 MachineInstr *Kill = vi.Kills[i];
682 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
683 LiveRange LR(getMBBStartIdx(Kill->getParent()),
685 interval.addRange(LR);
686 interval.addKill(ValNo, killIdx, false);
691 // If this is the second time we see a virtual register definition, it
692 // must be due to phi elimination or two addr elimination. If this is
693 // the result of two address elimination, then the vreg is one of the
694 // def-and-use register operand.
695 if (mi->isRegTiedToUseOperand(MOIdx)) {
696 // If this is a two-address definition, then we have already processed
697 // the live range. The only problem is that we didn't realize there
698 // are actually two values in the live interval. Because of this we
699 // need to take the LiveRegion that defines this register and split it
701 assert(interval.containsOneValue());
702 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
703 unsigned RedefIndex = getDefIndex(MIIdx);
704 if (MO.isEarlyClobber())
705 RedefIndex = getUseIndex(MIIdx);
707 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
708 VNInfo *OldValNo = OldLR->valno;
710 // Delete the initial value, which should be short and continuous,
711 // because the 2-addr copy must be in the same MBB as the redef.
712 interval.removeRange(DefIndex, RedefIndex);
714 // Two-address vregs should always only be redefined once. This means
715 // that at this point, there should be exactly one value number in it.
716 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
718 // The new value number (#1) is defined by the instruction we claimed
720 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
721 false, // update at *
723 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
725 // Value#0 is now defined by the 2-addr instruction.
726 OldValNo->def = RedefIndex;
728 if (MO.isEarlyClobber())
729 OldValNo->setHasRedefByEC(true);
731 // Add the new live interval which replaces the range for the input copy.
732 LiveRange LR(DefIndex, RedefIndex, ValNo);
733 DOUT << " replace range with " << LR;
734 interval.addRange(LR);
735 interval.addKill(ValNo, RedefIndex, false);
737 // If this redefinition is dead, we need to add a dummy unit live
738 // range covering the def slot.
740 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
743 interval.print(DOUT, tri_);
746 // Otherwise, this must be because of phi elimination. If this is the
747 // first redefinition of the vreg that we have seen, go back and change
748 // the live range in the PHI block to be a different value number.
749 if (interval.containsOneValue()) {
750 assert(vi.Kills.size() == 1 &&
751 "PHI elimination vreg should have one kill, the PHI itself!");
753 // Remove the old range that we now know has an incorrect number.
754 VNInfo *VNI = interval.getValNumInfo(0);
755 MachineInstr *Killer = vi.Kills[0];
756 unsigned Start = getMBBStartIdx(Killer->getParent());
757 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
758 DOUT << " Removing [" << Start << "," << End << "] from: ";
759 interval.print(DOUT, tri_); DOUT << "\n";
760 interval.removeRange(Start, End);
761 assert(interval.ranges.size() == 1 &&
762 "newly discovered PHI interval has >1 ranges.");
763 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
764 interval.addKill(VNI, terminatorGaps[killMBB], true);
765 VNI->setHasPHIKill(true);
766 DOUT << " RESULT: "; interval.print(DOUT, tri_);
768 // Replace the interval with one of a NEW value number. Note that this
769 // value number isn't actually defined by an instruction, weird huh? :)
770 LiveRange LR(Start, End,
771 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
772 LR.valno->setIsPHIDef(true);
773 DOUT << " replace range with " << LR;
774 interval.addRange(LR);
775 interval.addKill(LR.valno, End, false);
776 DOUT << " RESULT: "; interval.print(DOUT, tri_);
779 // In the case of PHI elimination, each variable definition is only
780 // live until the end of the block. We've already taken care of the
781 // rest of the live range.
782 unsigned defIndex = getDefIndex(MIIdx);
783 if (MO.isEarlyClobber())
784 defIndex = getUseIndex(MIIdx);
787 MachineInstr *CopyMI = NULL;
788 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
789 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
790 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
791 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
792 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
794 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
796 unsigned killIndex = getMBBEndIdx(mbb) + 1;
797 LiveRange LR(defIndex, killIndex, ValNo);
798 interval.addRange(LR);
799 interval.addKill(ValNo, terminatorGaps[mbb], true);
800 ValNo->setHasPHIKill(true);
808 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
809 MachineBasicBlock::iterator mi,
812 LiveInterval &interval,
813 MachineInstr *CopyMI) {
814 // A physical register cannot be live across basic block, so its
815 // lifetime must end somewhere in its defining basic block.
816 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
818 unsigned baseIndex = MIIdx;
819 unsigned start = getDefIndex(baseIndex);
820 // Earlyclobbers move back one.
821 if (MO.isEarlyClobber())
822 start = getUseIndex(MIIdx);
823 unsigned end = start;
825 // If it is not used after definition, it is considered dead at
826 // the instruction defining it. Hence its interval is:
827 // [defSlot(def), defSlot(def)+1)
834 // If it is not dead on definition, it must be killed by a
835 // subsequent instruction. Hence its interval is:
836 // [defSlot(def), useSlot(kill)+1)
837 baseIndex += InstrSlots::NUM;
838 while (++mi != MBB->end()) {
839 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
840 getInstructionFromIndex(baseIndex) == 0)
841 baseIndex += InstrSlots::NUM;
842 if (mi->killsRegister(interval.reg, tri_)) {
844 end = getUseIndex(baseIndex) + 1;
847 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
849 if (mi->isRegTiedToUseOperand(DefIdx)) {
850 // Two-address instruction.
851 end = getDefIndex(baseIndex);
852 if (mi->getOperand(DefIdx).isEarlyClobber())
853 end = getUseIndex(baseIndex);
855 // Another instruction redefines the register before it is ever read.
856 // Then the register is essentially dead at the instruction that defines
857 // it. Hence its interval is:
858 // [defSlot(def), defSlot(def)+1)
866 baseIndex += InstrSlots::NUM;
869 // The only case we should have a dead physreg here without a killing or
870 // instruction where we know it's dead is if it is live-in to the function
871 // and never used. Another possible case is the implicit use of the
872 // physical register has been deleted by two-address pass.
876 assert(start < end && "did not find end of interval?");
878 // Already exists? Extend old live interval.
879 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
880 bool Extend = OldLR != interval.end();
881 VNInfo *ValNo = Extend
882 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
883 if (MO.isEarlyClobber() && Extend)
884 ValNo->setHasRedefByEC(true);
885 LiveRange LR(start, end, ValNo);
886 interval.addRange(LR);
887 interval.addKill(LR.valno, end, false);
888 DOUT << " +" << LR << '\n';
891 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
892 MachineBasicBlock::iterator MI,
896 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
897 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
898 getOrCreateInterval(MO.getReg()));
899 else if (allocatableRegs_[MO.getReg()]) {
900 MachineInstr *CopyMI = NULL;
901 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
902 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
903 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
904 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
905 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
907 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
908 getOrCreateInterval(MO.getReg()), CopyMI);
909 // Def of a register also defines its sub-registers.
910 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
911 // If MI also modifies the sub-register explicitly, avoid processing it
912 // more than once. Do not pass in TRI here so it checks for exact match.
913 if (!MI->modifiesRegister(*AS))
914 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
915 getOrCreateInterval(*AS), 0);
919 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
921 LiveInterval &interval, bool isAlias) {
922 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
924 // Look for kills, if it reaches a def before it's killed, then it shouldn't
925 // be considered a livein.
926 MachineBasicBlock::iterator mi = MBB->begin();
927 unsigned baseIndex = MIIdx;
928 unsigned start = baseIndex;
929 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
930 getInstructionFromIndex(baseIndex) == 0)
931 baseIndex += InstrSlots::NUM;
932 unsigned end = baseIndex;
933 bool SeenDefUse = false;
935 while (mi != MBB->end()) {
936 if (mi->killsRegister(interval.reg, tri_)) {
938 end = getUseIndex(baseIndex) + 1;
941 } else if (mi->modifiesRegister(interval.reg, tri_)) {
942 // Another instruction redefines the register before it is ever read.
943 // Then the register is essentially dead at the instruction that defines
944 // it. Hence its interval is:
945 // [defSlot(def), defSlot(def)+1)
947 end = getDefIndex(start) + 1;
952 baseIndex += InstrSlots::NUM;
954 if (mi != MBB->end()) {
955 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
956 getInstructionFromIndex(baseIndex) == 0)
957 baseIndex += InstrSlots::NUM;
961 // Live-in register might not be used at all.
965 end = getDefIndex(MIIdx) + 1;
967 DOUT << " live through";
973 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
974 vni->setIsPHIDef(true);
975 LiveRange LR(start, end, vni);
977 interval.addRange(LR);
978 interval.addKill(LR.valno, end, false);
979 DOUT << " +" << LR << '\n';
982 /// computeIntervals - computes the live intervals for virtual
983 /// registers. for some ordering of the machine instructions [1,N] a
984 /// live interval is an interval [i, j) where 1 <= i <= j < N for
985 /// which a variable is live
986 void LiveIntervals::computeIntervals() {
988 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
989 << "********** Function: "
990 << ((Value*)mf_->getFunction())->getName() << '\n';
992 SmallVector<unsigned, 8> UndefUses;
993 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
995 MachineBasicBlock *MBB = MBBI;
996 // Track the index of the current machine instr.
997 unsigned MIIndex = getMBBStartIdx(MBB);
998 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
1000 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1002 // Create intervals for live-ins to this BB first.
1003 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1004 LE = MBB->livein_end(); LI != LE; ++LI) {
1005 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1006 // Multiple live-ins can alias the same register.
1007 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1008 if (!hasInterval(*AS))
1009 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1013 // Skip over empty initial indices.
1014 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1015 getInstructionFromIndex(MIIndex) == 0)
1016 MIIndex += InstrSlots::NUM;
1018 for (; MI != miEnd; ++MI) {
1019 DOUT << MIIndex << "\t" << *MI;
1022 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1023 MachineOperand &MO = MI->getOperand(i);
1024 if (!MO.isReg() || !MO.getReg())
1027 // handle register defs - build intervals
1029 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1030 else if (MO.isUndef())
1031 UndefUses.push_back(MO.getReg());
1034 // Skip over the empty slots after each instruction.
1035 unsigned Slots = MI->getDesc().getNumDefs();
1038 MIIndex += InstrSlots::NUM * Slots;
1040 // Skip over empty indices.
1041 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1042 getInstructionFromIndex(MIIndex) == 0)
1043 MIIndex += InstrSlots::NUM;
1047 // Create empty intervals for registers defined by implicit_def's (except
1048 // for those implicit_def that define values which are liveout of their
1050 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1051 unsigned UndefReg = UndefUses[i];
1052 (void)getOrCreateInterval(UndefReg);
1056 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1057 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1058 std::vector<IdxMBBPair>::const_iterator I =
1059 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1061 bool ResVal = false;
1062 while (I != Idx2MBBMap.end()) {
1063 if (I->first >= End)
1065 MBBs.push_back(I->second);
1072 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1073 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1074 std::vector<IdxMBBPair>::const_iterator I =
1075 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1077 bool ResVal = false;
1078 while (I != Idx2MBBMap.end()) {
1081 MachineBasicBlock *MBB = I->second;
1082 if (getMBBEndIdx(MBB) > End)
1084 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1085 SE = MBB->succ_end(); SI != SE; ++SI)
1086 MBBs.push_back(*SI);
1093 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1094 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1095 return new LiveInterval(reg, Weight);
1098 /// dupInterval - Duplicate a live interval. The caller is responsible for
1099 /// managing the allocated memory.
1100 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1101 LiveInterval *NewLI = createInterval(li->reg);
1102 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1106 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1107 /// copy field and returns the source register that defines it.
1108 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1112 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1113 // If it's extracting out of a physical register, return the sub-register.
1114 unsigned Reg = VNI->copy->getOperand(1).getReg();
1115 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1116 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1118 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1119 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1120 return VNI->copy->getOperand(2).getReg();
1122 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1123 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1125 llvm_unreachable("Unrecognized copy instruction!");
1129 //===----------------------------------------------------------------------===//
1130 // Register allocator hooks.
1133 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1134 /// allow one) virtual register operand, then its uses are implicitly using
1135 /// the register. Returns the virtual register.
1136 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1137 MachineInstr *MI) const {
1139 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1140 MachineOperand &MO = MI->getOperand(i);
1141 if (!MO.isReg() || !MO.isUse())
1143 unsigned Reg = MO.getReg();
1144 if (Reg == 0 || Reg == li.reg)
1147 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1148 !allocatableRegs_[Reg])
1150 // FIXME: For now, only remat MI with at most one register operand.
1152 "Can't rematerialize instruction with multiple register operand!");
1153 RegOp = MO.getReg();
1161 /// isValNoAvailableAt - Return true if the val# of the specified interval
1162 /// which reaches the given instruction also reaches the specified use index.
1163 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1164 unsigned UseIdx) const {
1165 unsigned Index = getInstructionIndex(MI);
1166 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1167 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1168 return UI != li.end() && UI->valno == ValNo;
1171 /// isReMaterializable - Returns true if the definition MI of the specified
1172 /// val# of the specified interval is re-materializable.
1173 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1174 const VNInfo *ValNo, MachineInstr *MI,
1175 SmallVectorImpl<LiveInterval*> &SpillIs,
1180 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1184 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1185 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1186 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1187 // this but remember this is not safe to fold into a two-address
1189 // This is a load from fixed stack slot. It can be rematerialized.
1192 // If the target-specific rules don't identify an instruction as
1193 // being trivially rematerializable, use some target-independent
1195 if (!MI->getDesc().isRematerializable() ||
1196 !tii_->isTriviallyReMaterializable(MI)) {
1197 if (!EnableAggressiveRemat)
1200 // If the instruction accesses memory but the memoperands have been lost,
1201 // we can't analyze it.
1202 const TargetInstrDesc &TID = MI->getDesc();
1203 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1206 // Avoid instructions obviously unsafe for remat.
1207 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1210 // If the instruction accesses memory and the memory could be non-constant,
1211 // assume the instruction is not rematerializable.
1212 for (std::list<MachineMemOperand>::const_iterator
1213 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1214 const MachineMemOperand &MMO = *I;
1215 if (MMO.isVolatile() || MMO.isStore())
1217 const Value *V = MMO.getValue();
1220 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1221 if (!PSV->isConstant(mf_->getFrameInfo()))
1223 } else if (!aa_->pointsToConstantMemory(V))
1227 // If any of the registers accessed are non-constant, conservatively assume
1228 // the instruction is not rematerializable.
1229 unsigned ImpUse = 0;
1230 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1231 const MachineOperand &MO = MI->getOperand(i);
1233 unsigned Reg = MO.getReg();
1236 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1239 // Only allow one def, and that in the first operand.
1240 if (MO.isDef() != (i == 0))
1243 // Only allow constant-valued registers.
1244 bool IsLiveIn = mri_->isLiveIn(Reg);
1245 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1246 E = mri_->def_end();
1248 // For the def, it should be the only def of that register.
1249 if (MO.isDef() && (next(I) != E || IsLiveIn))
1253 // Only allow one use other register use, as that's all the
1254 // remat mechanisms support currently.
1255 if (Reg != li.reg) {
1258 else if (Reg != ImpUse)
1261 // For the use, there should be only one associated def.
1262 if (I != E && (next(I) != E || IsLiveIn))
1269 unsigned ImpUse = getReMatImplicitUse(li, MI);
1271 const LiveInterval &ImpLi = getInterval(ImpUse);
1272 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1273 re = mri_->use_end(); ri != re; ++ri) {
1274 MachineInstr *UseMI = &*ri;
1275 unsigned UseIdx = getInstructionIndex(UseMI);
1276 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1278 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1282 // If a register operand of the re-materialized instruction is going to
1283 // be spilled next, then it's not legal to re-materialize this instruction.
1284 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1285 if (ImpUse == SpillIs[i]->reg)
1291 /// isReMaterializable - Returns true if the definition MI of the specified
1292 /// val# of the specified interval is re-materializable.
1293 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1294 const VNInfo *ValNo, MachineInstr *MI) {
1295 SmallVector<LiveInterval*, 4> Dummy1;
1297 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1300 /// isReMaterializable - Returns true if every definition of MI of every
1301 /// val# of the specified interval is re-materializable.
1302 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1303 SmallVectorImpl<LiveInterval*> &SpillIs,
1306 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1308 const VNInfo *VNI = *i;
1309 if (VNI->isUnused())
1310 continue; // Dead val#.
1311 // Is the def for the val# rematerializable?
1312 if (!VNI->isDefAccurate())
1314 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1315 bool DefIsLoad = false;
1317 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1319 isLoad |= DefIsLoad;
1324 /// FilterFoldedOps - Filter out two-address use operands. Return
1325 /// true if it finds any issue with the operands that ought to prevent
1327 static bool FilterFoldedOps(MachineInstr *MI,
1328 SmallVector<unsigned, 2> &Ops,
1330 SmallVector<unsigned, 2> &FoldOps) {
1332 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1333 unsigned OpIdx = Ops[i];
1334 MachineOperand &MO = MI->getOperand(OpIdx);
1335 // FIXME: fold subreg use.
1339 MRInfo |= (unsigned)VirtRegMap::isMod;
1341 // Filter out two-address use operand(s).
1342 if (MI->isRegTiedToDefOperand(OpIdx)) {
1343 MRInfo = VirtRegMap::isModRef;
1346 MRInfo |= (unsigned)VirtRegMap::isRef;
1348 FoldOps.push_back(OpIdx);
1354 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1355 /// slot / to reg or any rematerialized load into ith operand of specified
1356 /// MI. If it is successul, MI is updated with the newly created MI and
1358 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1359 VirtRegMap &vrm, MachineInstr *DefMI,
1361 SmallVector<unsigned, 2> &Ops,
1362 bool isSS, int Slot, unsigned Reg) {
1363 // If it is an implicit def instruction, just delete it.
1364 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1365 RemoveMachineInstrFromMaps(MI);
1366 vrm.RemoveMachineInstrFromMaps(MI);
1367 MI->eraseFromParent();
1372 // Filter the list of operand indexes that are to be folded. Abort if
1373 // any operand will prevent folding.
1374 unsigned MRInfo = 0;
1375 SmallVector<unsigned, 2> FoldOps;
1376 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1379 // The only time it's safe to fold into a two address instruction is when
1380 // it's folding reload and spill from / into a spill stack slot.
1381 if (DefMI && (MRInfo & VirtRegMap::isMod))
1384 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1385 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1387 // Remember this instruction uses the spill slot.
1388 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1390 // Attempt to fold the memory reference into the instruction. If
1391 // we can do this, we don't need to insert spill code.
1392 MachineBasicBlock &MBB = *MI->getParent();
1393 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1394 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1395 vrm.transferSpillPts(MI, fmi);
1396 vrm.transferRestorePts(MI, fmi);
1397 vrm.transferEmergencySpills(MI, fmi);
1399 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1400 mi2iMap_[fmi] = InstrIdx;
1401 MI = MBB.insert(MBB.erase(MI), fmi);
1408 /// canFoldMemoryOperand - Returns true if the specified load / store
1409 /// folding is possible.
1410 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1411 SmallVector<unsigned, 2> &Ops,
1413 // Filter the list of operand indexes that are to be folded. Abort if
1414 // any operand will prevent folding.
1415 unsigned MRInfo = 0;
1416 SmallVector<unsigned, 2> FoldOps;
1417 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1420 // It's only legal to remat for a use, not a def.
1421 if (ReMat && (MRInfo & VirtRegMap::isMod))
1424 return tii_->canFoldMemoryOperand(MI, FoldOps);
1427 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1428 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1429 for (LiveInterval::Ranges::const_iterator
1430 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1431 std::vector<IdxMBBPair>::const_iterator II =
1432 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1433 if (II == Idx2MBBMap.end())
1435 if (I->end > II->first) // crossing a MBB.
1437 MBBs.insert(II->second);
1438 if (MBBs.size() > 1)
1444 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1445 /// interval on to-be re-materialized operands of MI) with new register.
1446 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1447 MachineInstr *MI, unsigned NewVReg,
1449 // There is an implicit use. That means one of the other operand is
1450 // being remat'ed and the remat'ed instruction has li.reg as an
1451 // use operand. Make sure we rewrite that as well.
1452 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1453 MachineOperand &MO = MI->getOperand(i);
1456 unsigned Reg = MO.getReg();
1457 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1459 if (!vrm.isReMaterialized(Reg))
1461 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1462 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1464 UseMO->setReg(NewVReg);
1468 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1469 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1470 bool LiveIntervals::
1471 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1472 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1473 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1474 unsigned Slot, int LdSlot,
1475 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1477 const TargetRegisterClass* rc,
1478 SmallVector<int, 4> &ReMatIds,
1479 const MachineLoopInfo *loopInfo,
1480 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1481 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1482 std::vector<LiveInterval*> &NewLIs) {
1483 bool CanFold = false;
1485 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1486 MachineOperand& mop = MI->getOperand(i);
1489 unsigned Reg = mop.getReg();
1490 unsigned RegI = Reg;
1491 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1496 bool TryFold = !DefIsReMat;
1497 bool FoldSS = true; // Default behavior unless it's a remat.
1498 int FoldSlot = Slot;
1500 // If this is the rematerializable definition MI itself and
1501 // all of its uses are rematerialized, simply delete it.
1502 if (MI == ReMatOrigDefMI && CanDelete) {
1503 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1505 RemoveMachineInstrFromMaps(MI);
1506 vrm.RemoveMachineInstrFromMaps(MI);
1507 MI->eraseFromParent();
1511 // If def for this use can't be rematerialized, then try folding.
1512 // If def is rematerializable and it's a load, also try folding.
1513 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1515 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1521 // Scan all of the operands of this instruction rewriting operands
1522 // to use NewVReg instead of li.reg as appropriate. We do this for
1525 // 1. If the instr reads the same spilled vreg multiple times, we
1526 // want to reuse the NewVReg.
1527 // 2. If the instr is a two-addr instruction, we are required to
1528 // keep the src/dst regs pinned.
1530 // Keep track of whether we replace a use and/or def so that we can
1531 // create the spill interval with the appropriate range.
1533 HasUse = mop.isUse();
1534 HasDef = mop.isDef();
1535 SmallVector<unsigned, 2> Ops;
1537 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1538 const MachineOperand &MOj = MI->getOperand(j);
1541 unsigned RegJ = MOj.getReg();
1542 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1546 if (!MOj.isUndef()) {
1547 HasUse |= MOj.isUse();
1548 HasDef |= MOj.isDef();
1553 // Create a new virtual register for the spill interval.
1554 // Create the new register now so we can map the fold instruction
1555 // to the new register so when it is unfolded we get the correct
1557 bool CreatedNewVReg = false;
1559 NewVReg = mri_->createVirtualRegister(rc);
1561 CreatedNewVReg = true;
1567 // Do not fold load / store here if we are splitting. We'll find an
1568 // optimal point to insert a load / store later.
1570 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1571 Ops, FoldSS, FoldSlot, NewVReg)) {
1572 // Folding the load/store can completely change the instruction in
1573 // unpredictable ways, rescan it from the beginning.
1576 // We need to give the new vreg the same stack slot as the
1577 // spilled interval.
1578 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1584 if (isNotInMIMap(MI))
1586 goto RestartInstruction;
1589 // We'll try to fold it later if it's profitable.
1590 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1594 mop.setReg(NewVReg);
1595 if (mop.isImplicit())
1596 rewriteImplicitOps(li, MI, NewVReg, vrm);
1598 // Reuse NewVReg for other reads.
1599 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1600 MachineOperand &mopj = MI->getOperand(Ops[j]);
1601 mopj.setReg(NewVReg);
1602 if (mopj.isImplicit())
1603 rewriteImplicitOps(li, MI, NewVReg, vrm);
1606 if (CreatedNewVReg) {
1608 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1609 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1610 // Each valnum may have its own remat id.
1611 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1613 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1615 if (!CanDelete || (HasUse && HasDef)) {
1616 // If this is a two-addr instruction then its use operands are
1617 // rematerializable but its def is not. It should be assigned a
1619 vrm.assignVirt2StackSlot(NewVReg, Slot);
1622 vrm.assignVirt2StackSlot(NewVReg, Slot);
1624 } else if (HasUse && HasDef &&
1625 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1626 // If this interval hasn't been assigned a stack slot (because earlier
1627 // def is a deleted remat def), do it now.
1628 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1629 vrm.assignVirt2StackSlot(NewVReg, Slot);
1632 // Re-matting an instruction with virtual register use. Add the
1633 // register as an implicit use on the use MI.
1634 if (DefIsReMat && ImpUse)
1635 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1637 // Create a new register interval for this spill / remat.
1638 LiveInterval &nI = getOrCreateInterval(NewVReg);
1639 if (CreatedNewVReg) {
1640 NewLIs.push_back(&nI);
1641 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1643 vrm.setIsSplitFromReg(NewVReg, li.reg);
1647 if (CreatedNewVReg) {
1648 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1649 nI.getNextValue(0, 0, false, VNInfoAllocator));
1653 // Extend the split live interval to this def / use.
1654 unsigned End = getUseIndex(index)+1;
1655 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1656 nI.getValNumInfo(nI.getNumValNums()-1));
1662 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1663 nI.getNextValue(0, 0, false, VNInfoAllocator));
1668 DOUT << "\t\t\t\tAdded new interval: ";
1669 nI.print(DOUT, tri_);
1674 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1676 MachineBasicBlock *MBB, unsigned Idx) const {
1677 unsigned End = getMBBEndIdx(MBB);
1678 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1679 if (VNI->kills[j].isPHIKill)
1682 unsigned KillIdx = VNI->kills[j].killIdx;
1683 if (KillIdx > Idx && KillIdx < End)
1689 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1690 /// during spilling.
1692 struct RewriteInfo {
1697 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1698 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1701 struct RewriteInfoCompare {
1702 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1703 return LHS.Index < RHS.Index;
1708 void LiveIntervals::
1709 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1710 LiveInterval::Ranges::const_iterator &I,
1711 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1712 unsigned Slot, int LdSlot,
1713 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1715 const TargetRegisterClass* rc,
1716 SmallVector<int, 4> &ReMatIds,
1717 const MachineLoopInfo *loopInfo,
1718 BitVector &SpillMBBs,
1719 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1720 BitVector &RestoreMBBs,
1721 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1722 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1723 std::vector<LiveInterval*> &NewLIs) {
1724 bool AllCanFold = true;
1725 unsigned NewVReg = 0;
1726 unsigned start = getBaseIndex(I->start);
1727 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1729 // First collect all the def / use in this live range that will be rewritten.
1730 // Make sure they are sorted according to instruction index.
1731 std::vector<RewriteInfo> RewriteMIs;
1732 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1733 re = mri_->reg_end(); ri != re; ) {
1734 MachineInstr *MI = &*ri;
1735 MachineOperand &O = ri.getOperand();
1737 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1738 unsigned index = getInstructionIndex(MI);
1739 if (index < start || index >= end)
1743 // Must be defined by an implicit def. It should not be spilled. Note,
1744 // this is for correctness reason. e.g.
1745 // 8 %reg1024<def> = IMPLICIT_DEF
1746 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1747 // The live range [12, 14) are not part of the r1024 live interval since
1748 // it's defined by an implicit def. It will not conflicts with live
1749 // interval of r1025. Now suppose both registers are spilled, you can
1750 // easily see a situation where both registers are reloaded before
1751 // the INSERT_SUBREG and both target registers that would overlap.
1753 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1755 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1757 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1758 // Now rewrite the defs and uses.
1759 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1760 RewriteInfo &rwi = RewriteMIs[i];
1762 unsigned index = rwi.Index;
1763 bool MIHasUse = rwi.HasUse;
1764 bool MIHasDef = rwi.HasDef;
1765 MachineInstr *MI = rwi.MI;
1766 // If MI def and/or use the same register multiple times, then there
1767 // are multiple entries.
1768 unsigned NumUses = MIHasUse;
1769 while (i != e && RewriteMIs[i].MI == MI) {
1770 assert(RewriteMIs[i].Index == index);
1771 bool isUse = RewriteMIs[i].HasUse;
1772 if (isUse) ++NumUses;
1774 MIHasDef |= RewriteMIs[i].HasDef;
1777 MachineBasicBlock *MBB = MI->getParent();
1779 if (ImpUse && MI != ReMatDefMI) {
1780 // Re-matting an instruction with virtual register use. Update the
1781 // register interval's spill weight to HUGE_VALF to prevent it from
1783 LiveInterval &ImpLi = getInterval(ImpUse);
1784 ImpLi.weight = HUGE_VALF;
1787 unsigned MBBId = MBB->getNumber();
1788 unsigned ThisVReg = 0;
1790 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1791 if (NVI != MBBVRegsMap.end()) {
1792 ThisVReg = NVI->second;
1799 // It's better to start a new interval to avoid artifically
1800 // extend the new interval.
1801 if (MIHasDef && !MIHasUse) {
1802 MBBVRegsMap.erase(MBB->getNumber());
1808 bool IsNew = ThisVReg == 0;
1810 // This ends the previous live interval. If all of its def / use
1811 // can be folded, give it a low spill weight.
1812 if (NewVReg && TrySplit && AllCanFold) {
1813 LiveInterval &nI = getOrCreateInterval(NewVReg);
1820 bool HasDef = false;
1821 bool HasUse = false;
1822 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1823 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1824 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1825 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1826 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1827 if (!HasDef && !HasUse)
1830 AllCanFold &= CanFold;
1832 // Update weight of spill interval.
1833 LiveInterval &nI = getOrCreateInterval(NewVReg);
1835 // The spill weight is now infinity as it cannot be spilled again.
1836 nI.weight = HUGE_VALF;
1840 // Keep track of the last def and first use in each MBB.
1842 if (MI != ReMatOrigDefMI || !CanDelete) {
1843 bool HasKill = false;
1845 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1847 // If this is a two-address code, then this index starts a new VNInfo.
1848 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1850 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1852 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1853 SpillIdxes.find(MBBId);
1855 if (SII == SpillIdxes.end()) {
1856 std::vector<SRInfo> S;
1857 S.push_back(SRInfo(index, NewVReg, true));
1858 SpillIdxes.insert(std::make_pair(MBBId, S));
1859 } else if (SII->second.back().vreg != NewVReg) {
1860 SII->second.push_back(SRInfo(index, NewVReg, true));
1861 } else if ((int)index > SII->second.back().index) {
1862 // If there is an earlier def and this is a two-address
1863 // instruction, then it's not possible to fold the store (which
1864 // would also fold the load).
1865 SRInfo &Info = SII->second.back();
1867 Info.canFold = !HasUse;
1869 SpillMBBs.set(MBBId);
1870 } else if (SII != SpillIdxes.end() &&
1871 SII->second.back().vreg == NewVReg &&
1872 (int)index > SII->second.back().index) {
1873 // There is an earlier def that's not killed (must be two-address).
1874 // The spill is no longer needed.
1875 SII->second.pop_back();
1876 if (SII->second.empty()) {
1877 SpillIdxes.erase(MBBId);
1878 SpillMBBs.reset(MBBId);
1885 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1886 SpillIdxes.find(MBBId);
1887 if (SII != SpillIdxes.end() &&
1888 SII->second.back().vreg == NewVReg &&
1889 (int)index > SII->second.back().index)
1890 // Use(s) following the last def, it's not safe to fold the spill.
1891 SII->second.back().canFold = false;
1892 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1893 RestoreIdxes.find(MBBId);
1894 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1895 // If we are splitting live intervals, only fold if it's the first
1896 // use and there isn't another use later in the MBB.
1897 RII->second.back().canFold = false;
1899 // Only need a reload if there isn't an earlier def / use.
1900 if (RII == RestoreIdxes.end()) {
1901 std::vector<SRInfo> Infos;
1902 Infos.push_back(SRInfo(index, NewVReg, true));
1903 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1905 RII->second.push_back(SRInfo(index, NewVReg, true));
1907 RestoreMBBs.set(MBBId);
1911 // Update spill weight.
1912 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1913 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1916 if (NewVReg && TrySplit && AllCanFold) {
1917 // If all of its def / use can be folded, give it a low spill weight.
1918 LiveInterval &nI = getOrCreateInterval(NewVReg);
1923 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1924 BitVector &RestoreMBBs,
1925 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1926 if (!RestoreMBBs[Id])
1928 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1929 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1930 if (Restores[i].index == index &&
1931 Restores[i].vreg == vr &&
1932 Restores[i].canFold)
1937 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1938 BitVector &RestoreMBBs,
1939 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1940 if (!RestoreMBBs[Id])
1942 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1943 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1944 if (Restores[i].index == index && Restores[i].vreg)
1945 Restores[i].index = -1;
1948 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1949 /// spilled and create empty intervals for their uses.
1951 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1952 const TargetRegisterClass* rc,
1953 std::vector<LiveInterval*> &NewLIs) {
1954 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1955 re = mri_->reg_end(); ri != re; ) {
1956 MachineOperand &O = ri.getOperand();
1957 MachineInstr *MI = &*ri;
1960 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1961 "Register def was not rewritten?");
1962 RemoveMachineInstrFromMaps(MI);
1963 vrm.RemoveMachineInstrFromMaps(MI);
1964 MI->eraseFromParent();
1966 // This must be an use of an implicit_def so it's not part of the live
1967 // interval. Create a new empty live interval for it.
1968 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1969 unsigned NewVReg = mri_->createVirtualRegister(rc);
1971 vrm.setIsImplicitlyDefined(NewVReg);
1972 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1973 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1974 MachineOperand &MO = MI->getOperand(i);
1975 if (MO.isReg() && MO.getReg() == li.reg) {
1984 std::vector<LiveInterval*> LiveIntervals::
1985 addIntervalsForSpillsFast(const LiveInterval &li,
1986 const MachineLoopInfo *loopInfo,
1988 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1990 std::vector<LiveInterval*> added;
1992 assert(li.weight != HUGE_VALF &&
1993 "attempt to spill already spilled interval!");
1995 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1999 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2001 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2002 while (RI != mri_->reg_end()) {
2003 MachineInstr* MI = &*RI;
2005 SmallVector<unsigned, 2> Indices;
2006 bool HasUse = false;
2007 bool HasDef = false;
2009 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2010 MachineOperand& mop = MI->getOperand(i);
2011 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2013 HasUse |= MI->getOperand(i).isUse();
2014 HasDef |= MI->getOperand(i).isDef();
2016 Indices.push_back(i);
2019 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2020 Indices, true, slot, li.reg)) {
2021 unsigned NewVReg = mri_->createVirtualRegister(rc);
2023 vrm.assignVirt2StackSlot(NewVReg, slot);
2025 // create a new register for this spill
2026 LiveInterval &nI = getOrCreateInterval(NewVReg);
2028 // the spill weight is now infinity as it
2029 // cannot be spilled again
2030 nI.weight = HUGE_VALF;
2032 // Rewrite register operands to use the new vreg.
2033 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2034 E = Indices.end(); I != E; ++I) {
2035 MI->getOperand(*I).setReg(NewVReg);
2037 if (MI->getOperand(*I).isUse())
2038 MI->getOperand(*I).setIsKill(true);
2041 // Fill in the new live interval.
2042 unsigned index = getInstructionIndex(MI);
2044 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2045 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2048 vrm.addRestorePoint(NewVReg, MI);
2051 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2052 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2055 vrm.addSpillPoint(NewVReg, true, MI);
2058 added.push_back(&nI);
2060 DOUT << "\t\t\t\tadded new interval: ";
2066 RI = mri_->reg_begin(li.reg);
2072 std::vector<LiveInterval*> LiveIntervals::
2073 addIntervalsForSpills(const LiveInterval &li,
2074 SmallVectorImpl<LiveInterval*> &SpillIs,
2075 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2077 if (EnableFastSpilling)
2078 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2080 assert(li.weight != HUGE_VALF &&
2081 "attempt to spill already spilled interval!");
2083 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2084 li.print(DOUT, tri_);
2087 // Each bit specify whether a spill is required in the MBB.
2088 BitVector SpillMBBs(mf_->getNumBlockIDs());
2089 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2090 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2091 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2092 DenseMap<unsigned,unsigned> MBBVRegsMap;
2093 std::vector<LiveInterval*> NewLIs;
2094 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2096 unsigned NumValNums = li.getNumValNums();
2097 SmallVector<MachineInstr*, 4> ReMatDefs;
2098 ReMatDefs.resize(NumValNums, NULL);
2099 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2100 ReMatOrigDefs.resize(NumValNums, NULL);
2101 SmallVector<int, 4> ReMatIds;
2102 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2103 BitVector ReMatDelete(NumValNums);
2104 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2106 // Spilling a split live interval. It cannot be split any further. Also,
2107 // it's also guaranteed to be a single val# / range interval.
2108 if (vrm.getPreSplitReg(li.reg)) {
2109 vrm.setIsSplitFromReg(li.reg, 0);
2110 // Unset the split kill marker on the last use.
2111 unsigned KillIdx = vrm.getKillPoint(li.reg);
2113 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2114 assert(KillMI && "Last use disappeared?");
2115 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2116 assert(KillOp != -1 && "Last use disappeared?");
2117 KillMI->getOperand(KillOp).setIsKill(false);
2119 vrm.removeKillPoint(li.reg);
2120 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2121 Slot = vrm.getStackSlot(li.reg);
2122 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2123 MachineInstr *ReMatDefMI = DefIsReMat ?
2124 vrm.getReMaterializedMI(li.reg) : NULL;
2126 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2127 bool isLoad = isLoadSS ||
2128 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2129 bool IsFirstRange = true;
2130 for (LiveInterval::Ranges::const_iterator
2131 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2132 // If this is a split live interval with multiple ranges, it means there
2133 // are two-address instructions that re-defined the value. Only the
2134 // first def can be rematerialized!
2136 // Note ReMatOrigDefMI has already been deleted.
2137 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2138 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2139 false, vrm, rc, ReMatIds, loopInfo,
2140 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2141 MBBVRegsMap, NewLIs);
2143 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2144 Slot, 0, false, false, false,
2145 false, vrm, rc, ReMatIds, loopInfo,
2146 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2147 MBBVRegsMap, NewLIs);
2149 IsFirstRange = false;
2152 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2156 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2157 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2161 bool NeedStackSlot = false;
2162 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2164 const VNInfo *VNI = *i;
2165 unsigned VN = VNI->id;
2166 if (VNI->isUnused())
2167 continue; // Dead val#.
2168 // Is the def for the val# rematerializable?
2169 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2170 ? getInstructionFromIndex(VNI->def) : 0;
2172 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2173 // Remember how to remat the def of this val#.
2174 ReMatOrigDefs[VN] = ReMatDefMI;
2175 // Original def may be modified so we have to make a copy here.
2176 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2177 ClonedMIs.push_back(Clone);
2178 ReMatDefs[VN] = Clone;
2180 bool CanDelete = true;
2181 if (VNI->hasPHIKill()) {
2182 // A kill is a phi node, not all of its uses can be rematerialized.
2183 // It must not be deleted.
2185 // Need a stack slot if there is any live range where uses cannot be
2187 NeedStackSlot = true;
2190 ReMatDelete.set(VN);
2192 // Need a stack slot if there is any live range where uses cannot be
2194 NeedStackSlot = true;
2198 // One stack slot per live interval.
2199 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2200 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2201 Slot = vrm.assignVirt2StackSlot(li.reg);
2203 // This case only occurs when the prealloc splitter has already assigned
2204 // a stack slot to this vreg.
2206 Slot = vrm.getStackSlot(li.reg);
2209 // Create new intervals and rewrite defs and uses.
2210 for (LiveInterval::Ranges::const_iterator
2211 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2212 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2213 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2214 bool DefIsReMat = ReMatDefMI != NULL;
2215 bool CanDelete = ReMatDelete[I->valno->id];
2217 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2218 bool isLoad = isLoadSS ||
2219 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2220 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2221 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2222 CanDelete, vrm, rc, ReMatIds, loopInfo,
2223 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2224 MBBVRegsMap, NewLIs);
2227 // Insert spills / restores if we are splitting.
2229 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2233 SmallPtrSet<LiveInterval*, 4> AddedKill;
2234 SmallVector<unsigned, 2> Ops;
2235 if (NeedStackSlot) {
2236 int Id = SpillMBBs.find_first();
2238 std::vector<SRInfo> &spills = SpillIdxes[Id];
2239 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2240 int index = spills[i].index;
2241 unsigned VReg = spills[i].vreg;
2242 LiveInterval &nI = getOrCreateInterval(VReg);
2243 bool isReMat = vrm.isReMaterialized(VReg);
2244 MachineInstr *MI = getInstructionFromIndex(index);
2245 bool CanFold = false;
2246 bool FoundUse = false;
2248 if (spills[i].canFold) {
2250 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2251 MachineOperand &MO = MI->getOperand(j);
2252 if (!MO.isReg() || MO.getReg() != VReg)
2259 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2260 RestoreMBBs, RestoreIdxes))) {
2261 // MI has two-address uses of the same register. If the use
2262 // isn't the first and only use in the BB, then we can't fold
2263 // it. FIXME: Move this to rewriteInstructionsForSpills.
2270 // Fold the store into the def if possible.
2271 bool Folded = false;
2272 if (CanFold && !Ops.empty()) {
2273 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2276 // Also folded uses, do not issue a load.
2277 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2278 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2280 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2284 // Otherwise tell the spiller to issue a spill.
2286 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2287 bool isKill = LR->end == getStoreIndex(index);
2288 if (!MI->registerDefIsDead(nI.reg))
2289 // No need to spill a dead def.
2290 vrm.addSpillPoint(VReg, isKill, MI);
2292 AddedKill.insert(&nI);
2295 Id = SpillMBBs.find_next(Id);
2299 int Id = RestoreMBBs.find_first();
2301 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2302 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2303 int index = restores[i].index;
2306 unsigned VReg = restores[i].vreg;
2307 LiveInterval &nI = getOrCreateInterval(VReg);
2308 bool isReMat = vrm.isReMaterialized(VReg);
2309 MachineInstr *MI = getInstructionFromIndex(index);
2310 bool CanFold = false;
2312 if (restores[i].canFold) {
2314 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2315 MachineOperand &MO = MI->getOperand(j);
2316 if (!MO.isReg() || MO.getReg() != VReg)
2320 // If this restore were to be folded, it would have been folded
2329 // Fold the load into the use if possible.
2330 bool Folded = false;
2331 if (CanFold && !Ops.empty()) {
2333 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2335 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2337 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2338 // If the rematerializable def is a load, also try to fold it.
2339 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2340 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2341 Ops, isLoadSS, LdSlot, VReg);
2343 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2345 // Re-matting an instruction with virtual register use. Add the
2346 // register as an implicit use on the use MI and update the register
2347 // interval's spill weight to HUGE_VALF to prevent it from being
2349 LiveInterval &ImpLi = getInterval(ImpUse);
2350 ImpLi.weight = HUGE_VALF;
2351 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2356 // If folding is not possible / failed, then tell the spiller to issue a
2357 // load / rematerialization for us.
2359 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2361 vrm.addRestorePoint(VReg, MI);
2363 Id = RestoreMBBs.find_next(Id);
2366 // Finalize intervals: add kills, finalize spill weights, and filter out
2368 std::vector<LiveInterval*> RetNewLIs;
2369 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2370 LiveInterval *LI = NewLIs[i];
2372 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2373 if (!AddedKill.count(LI)) {
2374 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2375 unsigned LastUseIdx = getBaseIndex(LR->end);
2376 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2377 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2378 assert(UseIdx != -1);
2379 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2380 LastUse->getOperand(UseIdx).setIsKill();
2381 vrm.addKillPoint(LI->reg, LastUseIdx);
2384 RetNewLIs.push_back(LI);
2388 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2392 /// hasAllocatableSuperReg - Return true if the specified physical register has
2393 /// any super register that's allocatable.
2394 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2395 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2396 if (allocatableRegs_[*AS] && hasInterval(*AS))
2401 /// getRepresentativeReg - Find the largest super register of the specified
2402 /// physical register.
2403 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2404 // Find the largest super-register that is allocatable.
2405 unsigned BestReg = Reg;
2406 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2407 unsigned SuperReg = *AS;
2408 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2416 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2417 /// specified interval that conflicts with the specified physical register.
2418 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2419 unsigned PhysReg) const {
2420 unsigned NumConflicts = 0;
2421 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2422 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2423 E = mri_->reg_end(); I != E; ++I) {
2424 MachineOperand &O = I.getOperand();
2425 MachineInstr *MI = O.getParent();
2426 unsigned Index = getInstructionIndex(MI);
2427 if (pli.liveAt(Index))
2430 return NumConflicts;
2433 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2434 /// around all defs and uses of the specified interval. Return true if it
2435 /// was able to cut its interval.
2436 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2437 unsigned PhysReg, VirtRegMap &vrm) {
2438 unsigned SpillReg = getRepresentativeReg(PhysReg);
2440 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2441 // If there are registers which alias PhysReg, but which are not a
2442 // sub-register of the chosen representative super register. Assert
2443 // since we can't handle it yet.
2444 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2445 tri_->isSuperRegister(*AS, SpillReg));
2448 LiveInterval &pli = getInterval(SpillReg);
2449 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2450 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2451 E = mri_->reg_end(); I != E; ++I) {
2452 MachineOperand &O = I.getOperand();
2453 MachineInstr *MI = O.getParent();
2454 if (SeenMIs.count(MI))
2457 unsigned Index = getInstructionIndex(MI);
2458 if (pli.liveAt(Index)) {
2459 vrm.addEmergencySpill(SpillReg, MI);
2460 unsigned StartIdx = getLoadIndex(Index);
2461 unsigned EndIdx = getStoreIndex(Index)+1;
2462 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2463 pli.removeRange(StartIdx, EndIdx);
2467 raw_string_ostream Msg(msg);
2468 Msg << "Ran out of registers during register allocation!";
2469 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2470 Msg << "\nPlease check your inline asm statement for invalid "
2471 << "constraints:\n";
2472 MI->print(Msg, tm_);
2474 llvm_report_error(Msg.str());
2476 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2477 if (!hasInterval(*AS))
2479 LiveInterval &spli = getInterval(*AS);
2480 if (spli.liveAt(Index))
2481 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2488 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2489 MachineInstr* startInst) {
2490 LiveInterval& Interval = getOrCreateInterval(reg);
2491 VNInfo* VN = Interval.getNextValue(
2492 getInstructionIndex(startInst) + InstrSlots::DEF,
2493 startInst, true, getVNInfoAllocator());
2494 VN->setHasPHIKill(true);
2495 VN->kills.push_back(
2496 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2497 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2498 getMBBEndIdx(startInst->getParent()) + 1, VN);
2499 Interval.addRange(LR);