1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 MachineFunctionPass::getAnalysisUsage(AU);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90 E = r2iMap_.end(); I != E; ++I)
98 terminatorGaps.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!ClonedMIs.empty()) {
103 MachineInstr *MI = ClonedMIs.back();
104 ClonedMIs.pop_back();
105 mf_->DeleteMachineInstr(MI);
109 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
110 /// there is one implicit_def for each use. Add isUndef marker to
111 /// implicit_def defs and their uses.
112 void LiveIntervals::processImplicitDefs() {
113 SmallSet<unsigned, 8> ImpDefRegs;
114 SmallVector<MachineInstr*, 8> ImpDefMIs;
115 MachineBasicBlock *Entry = mf_->begin();
116 SmallPtrSet<MachineBasicBlock*,16> Visited;
117 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
118 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
120 MachineBasicBlock *MBB = *DFI;
121 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
123 MachineInstr *MI = &*I;
125 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
126 unsigned Reg = MI->getOperand(0).getReg();
127 ImpDefRegs.insert(Reg);
128 ImpDefMIs.push_back(MI);
132 bool ChangedToImpDef = false;
133 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
134 MachineOperand& MO = MI->getOperand(i);
135 if (!MO.isReg() || !MO.isUse())
137 unsigned Reg = MO.getReg();
140 if (!ImpDefRegs.count(Reg))
142 // Use is a copy, just turn it into an implicit_def.
143 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
144 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
146 bool isKill = MO.isKill();
147 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
148 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
149 MI->RemoveOperand(j);
151 ImpDefRegs.erase(Reg);
152 ChangedToImpDef = true;
157 if (MO.isKill() || MI->isRegTiedToDefOperand(i))
158 ImpDefRegs.erase(Reg);
161 if (ChangedToImpDef) {
162 // Backtrack to process this new implicit_def.
165 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
166 MachineOperand& MO = MI->getOperand(i);
167 if (!MO.isReg() || !MO.isDef())
169 ImpDefRegs.erase(MO.getReg());
174 // Any outstanding liveout implicit_def's?
175 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
176 MachineInstr *MI = ImpDefMIs[i];
177 unsigned Reg = MI->getOperand(0).getReg();
178 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
179 !ImpDefRegs.count(Reg)) {
180 // Delete all "local" implicit_def's. That include those which define
181 // physical registers since they cannot be liveout.
182 MI->eraseFromParent();
186 // If there are multiple defs of the same register and at least one
187 // is not an implicit_def, do not insert implicit_def's before the
190 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
191 DE = mri_->def_end(); DI != DE; ++DI) {
192 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
200 // The only implicit_def which we want to keep are those that are live
202 MI->eraseFromParent();
204 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
205 UE = mri_->use_end(); UI != UE; ) {
206 MachineOperand &RMO = UI.getOperand();
207 MachineInstr *RMI = &*UI;
209 MachineBasicBlock *RMBB = RMI->getParent();
213 // Turn a copy use into an implicit_def.
214 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
215 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
217 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
218 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
219 RMI->RemoveOperand(j);
223 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
224 unsigned NewVReg = mri_->createVirtualRegister(RC);
235 void LiveIntervals::computeNumbering() {
236 Index2MiMap OldI2MI = i2miMap_;
237 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
243 terminatorGaps.clear();
247 // Number MachineInstrs and MachineBasicBlocks.
248 // Initialize MBB indexes to a sentinal.
249 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
251 unsigned MIIndex = 0;
252 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
254 unsigned StartIdx = MIIndex;
256 // Insert an empty slot at the beginning of each block.
257 MIIndex += InstrSlots::NUM;
258 i2miMap_.push_back(0);
260 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
263 if (I == MBB->getFirstTerminator()) {
264 // Leave a gap for before terminators, this is where we will point
267 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
269 "Multiple 'first' terminators encountered during numbering.");
270 inserted = inserted; // Avoid compiler warning if assertions turned off.
271 i2miMap_.push_back(0);
273 MIIndex += InstrSlots::NUM;
276 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
277 assert(inserted && "multiple MachineInstr -> index mappings");
279 i2miMap_.push_back(I);
280 MIIndex += InstrSlots::NUM;
283 // Insert max(1, numdefs) empty slots after every instruction.
284 unsigned Slots = I->getDesc().getNumDefs();
287 MIIndex += InstrSlots::NUM * Slots;
289 i2miMap_.push_back(0);
292 if (MBB->getFirstTerminator() == MBB->end()) {
293 // Leave a gap for before terminators, this is where we will point
296 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
298 "Multiple 'first' terminators encountered during numbering.");
299 inserted = inserted; // Avoid compiler warning if assertions turned off.
300 i2miMap_.push_back(0);
302 MIIndex += InstrSlots::NUM;
305 // Set the MBB2IdxMap entry for this MBB.
306 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
307 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
310 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
312 if (!OldI2MI.empty())
313 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
314 for (LiveInterval::iterator LI = OI->second->begin(),
315 LE = OI->second->end(); LI != LE; ++LI) {
317 // Remap the start index of the live range to the corresponding new
318 // number, or our best guess at what it _should_ correspond to if the
319 // original instruction has been erased. This is either the following
320 // instruction or its predecessor.
321 unsigned index = LI->start / InstrSlots::NUM;
322 unsigned offset = LI->start % InstrSlots::NUM;
323 if (offset == InstrSlots::LOAD) {
324 std::vector<IdxMBBPair>::const_iterator I =
325 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
326 // Take the pair containing the index
327 std::vector<IdxMBBPair>::const_iterator J =
328 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
330 LI->start = getMBBStartIdx(J->second);
332 LI->start = mi2iMap_[OldI2MI[index]] + offset;
335 // Remap the ending index in the same way that we remapped the start,
336 // except for the final step where we always map to the immediately
337 // following instruction.
338 index = (LI->end - 1) / InstrSlots::NUM;
339 offset = LI->end % InstrSlots::NUM;
340 if (offset == InstrSlots::LOAD) {
341 // VReg dies at end of block.
342 std::vector<IdxMBBPair>::const_iterator I =
343 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
346 LI->end = getMBBEndIdx(I->second) + 1;
348 unsigned idx = index;
349 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
351 if (index != OldI2MI.size())
352 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
354 LI->end = InstrSlots::NUM * i2miMap_.size();
358 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
359 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
362 // Remap the VNInfo def index, which works the same as the
363 // start indices above. VN's with special sentinel defs
364 // don't need to be remapped.
365 if (vni->isDefAccurate() && !vni->isUnused()) {
366 unsigned index = vni->def / InstrSlots::NUM;
367 unsigned offset = vni->def % InstrSlots::NUM;
368 if (offset == InstrSlots::LOAD) {
369 std::vector<IdxMBBPair>::const_iterator I =
370 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
371 // Take the pair containing the index
372 std::vector<IdxMBBPair>::const_iterator J =
373 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
375 vni->def = getMBBStartIdx(J->second);
377 vni->def = mi2iMap_[OldI2MI[index]] + offset;
381 // Remap the VNInfo kill indices, which works the same as
382 // the end indices above.
383 for (size_t i = 0; i < vni->kills.size(); ++i) {
384 unsigned killIdx = vni->kills[i].killIdx;
386 unsigned index = (killIdx - 1) / InstrSlots::NUM;
387 unsigned offset = killIdx % InstrSlots::NUM;
389 if (offset == InstrSlots::LOAD) {
390 assert("Value killed at a load slot.");
391 /*std::vector<IdxMBBPair>::const_iterator I =
392 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
395 vni->kills[i] = getMBBEndIdx(I->second);*/
397 if (vni->kills[i].isPHIKill) {
398 std::vector<IdxMBBPair>::const_iterator I =
399 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
401 vni->kills[i].killIdx = terminatorGaps[I->second];
403 assert(OldI2MI[index] != 0 &&
404 "Kill refers to instruction not present in index maps.");
405 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
409 unsigned idx = index;
410 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
412 if (index != OldI2MI.size())
413 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
414 (idx == index ? offset : 0);
416 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
424 void LiveIntervals::scaleNumbering(int factor) {
426 // * scale MBB begin and end points
427 // * scale all ranges.
428 // * Update VNI structures.
429 // * Scale instruction numberings
431 // Scale the MBB indices.
433 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
434 MBB != MBBE; ++MBB) {
435 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
436 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
437 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
438 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
440 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
442 // Scale terminator gaps.
443 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
444 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
446 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
449 // Scale the intervals.
450 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
451 LI->second->scaleNumbering(factor);
454 // Scale MachineInstrs.
455 Mi2IndexMap oldmi2iMap = mi2iMap_;
456 unsigned highestSlot = 0;
457 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
459 unsigned newSlot = InstrSlots::scale(MI->second, factor);
460 mi2iMap_[MI->first] = newSlot;
461 highestSlot = std::max(highestSlot, newSlot);
465 i2miMap_.resize(highestSlot + 1);
466 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
468 i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
474 /// runOnMachineFunction - Register allocate the whole function
476 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
478 mri_ = &mf_->getRegInfo();
479 tm_ = &fn.getTarget();
480 tri_ = tm_->getRegisterInfo();
481 tii_ = tm_->getInstrInfo();
482 aa_ = &getAnalysis<AliasAnalysis>();
483 lv_ = &getAnalysis<LiveVariables>();
484 allocatableRegs_ = tri_->getAllocatableSet(fn);
486 processImplicitDefs();
490 numIntervals += getNumIntervals();
496 /// print - Implement the dump method.
497 void LiveIntervals::print(std::ostream &O, const Module* ) const {
498 O << "********** INTERVALS **********\n";
499 for (const_iterator I = begin(), E = end(); I != E; ++I) {
500 I->second->print(O, tri_);
504 O << "********** MACHINEINSTRS **********\n";
505 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
506 mbbi != mbbe; ++mbbi) {
507 O << ((Value*)mbbi->getBasicBlock())->getNameStr() << ":\n";
508 for (MachineBasicBlock::iterator mii = mbbi->begin(),
509 mie = mbbi->end(); mii != mie; ++mii) {
510 O << getInstructionIndex(mii) << '\t' << *mii;
515 /// conflictsWithPhysRegDef - Returns true if the specified register
516 /// is defined during the duration of the specified interval.
517 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
518 VirtRegMap &vrm, unsigned reg) {
519 for (LiveInterval::Ranges::const_iterator
520 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
521 for (unsigned index = getBaseIndex(I->start),
522 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
523 index += InstrSlots::NUM) {
524 // skip deleted instructions
525 while (index != end && !getInstructionFromIndex(index))
526 index += InstrSlots::NUM;
527 if (index == end) break;
529 MachineInstr *MI = getInstructionFromIndex(index);
530 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
531 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
532 if (SrcReg == li.reg || DstReg == li.reg)
534 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
535 MachineOperand& mop = MI->getOperand(i);
538 unsigned PhysReg = mop.getReg();
539 if (PhysReg == 0 || PhysReg == li.reg)
541 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
542 if (!vrm.hasPhys(PhysReg))
544 PhysReg = vrm.getPhys(PhysReg);
546 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
555 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
556 /// it can check use as well.
557 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
558 unsigned Reg, bool CheckUse,
559 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
560 for (LiveInterval::Ranges::const_iterator
561 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
562 for (unsigned index = getBaseIndex(I->start),
563 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
564 index += InstrSlots::NUM) {
565 // Skip deleted instructions.
566 MachineInstr *MI = 0;
567 while (index != end) {
568 MI = getInstructionFromIndex(index);
571 index += InstrSlots::NUM;
573 if (index == end) break;
575 if (JoinedCopies.count(MI))
577 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
578 MachineOperand& MO = MI->getOperand(i);
581 if (MO.isUse() && !CheckUse)
583 unsigned PhysReg = MO.getReg();
584 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
586 if (tri_->isSubRegister(Reg, PhysReg))
596 void LiveIntervals::printRegName(unsigned reg) const {
597 if (TargetRegisterInfo::isPhysicalRegister(reg))
598 errs() << tri_->getName(reg);
600 errs() << "%reg" << reg;
603 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
604 MachineBasicBlock::iterator mi,
605 unsigned MIIdx, MachineOperand& MO,
607 LiveInterval &interval) {
608 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
610 // Virtual registers may be defined multiple times (due to phi
611 // elimination and 2-addr elimination). Much of what we do only has to be
612 // done once for the vreg. We use an empty interval to detect the first
613 // time we see a vreg.
614 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
615 if (interval.empty()) {
616 // Get the Idx of the defining instructions.
617 unsigned defIndex = getDefIndex(MIIdx);
618 // Earlyclobbers move back one.
619 if (MO.isEarlyClobber())
620 defIndex = getUseIndex(MIIdx);
622 MachineInstr *CopyMI = NULL;
623 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
624 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
625 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
626 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
627 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
629 // Earlyclobbers move back one.
630 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
632 assert(ValNo->id == 0 && "First value in interval is not 0?");
634 // Loop over all of the blocks that the vreg is defined in. There are
635 // two cases we have to handle here. The most common case is a vreg
636 // whose lifetime is contained within a basic block. In this case there
637 // will be a single kill, in MBB, which comes after the definition.
638 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
639 // FIXME: what about dead vars?
641 if (vi.Kills[0] != mi)
642 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
644 killIdx = defIndex+1;
646 // If the kill happens after the definition, we have an intra-block
648 if (killIdx > defIndex) {
649 assert(vi.AliveBlocks.empty() &&
650 "Shouldn't be alive across any blocks!");
651 LiveRange LR(defIndex, killIdx, ValNo);
652 interval.addRange(LR);
653 DOUT << " +" << LR << "\n";
654 interval.addKill(ValNo, killIdx, false);
659 // The other case we handle is when a virtual register lives to the end
660 // of the defining block, potentially live across some blocks, then is
661 // live into some number of blocks, but gets killed. Start by adding a
662 // range that goes from this definition to the end of the defining block.
663 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
664 DOUT << " +" << NewLR;
665 interval.addRange(NewLR);
667 // Iterate over all of the blocks that the variable is completely
668 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
670 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
671 E = vi.AliveBlocks.end(); I != E; ++I) {
672 LiveRange LR(getMBBStartIdx(*I),
673 getMBBEndIdx(*I)+1, // MBB ends at -1.
675 interval.addRange(LR);
679 // Finally, this virtual register is live from the start of any killing
680 // block to the 'use' slot of the killing instruction.
681 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
682 MachineInstr *Kill = vi.Kills[i];
683 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
684 LiveRange LR(getMBBStartIdx(Kill->getParent()),
686 interval.addRange(LR);
687 interval.addKill(ValNo, killIdx, false);
692 // If this is the second time we see a virtual register definition, it
693 // must be due to phi elimination or two addr elimination. If this is
694 // the result of two address elimination, then the vreg is one of the
695 // def-and-use register operand.
696 if (mi->isRegTiedToUseOperand(MOIdx)) {
697 // If this is a two-address definition, then we have already processed
698 // the live range. The only problem is that we didn't realize there
699 // are actually two values in the live interval. Because of this we
700 // need to take the LiveRegion that defines this register and split it
702 assert(interval.containsOneValue());
703 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
704 unsigned RedefIndex = getDefIndex(MIIdx);
705 if (MO.isEarlyClobber())
706 RedefIndex = getUseIndex(MIIdx);
708 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
709 VNInfo *OldValNo = OldLR->valno;
711 // Delete the initial value, which should be short and continuous,
712 // because the 2-addr copy must be in the same MBB as the redef.
713 interval.removeRange(DefIndex, RedefIndex);
715 // Two-address vregs should always only be redefined once. This means
716 // that at this point, there should be exactly one value number in it.
717 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
719 // The new value number (#1) is defined by the instruction we claimed
721 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
722 false, // update at *
724 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
726 // Value#0 is now defined by the 2-addr instruction.
727 OldValNo->def = RedefIndex;
729 if (MO.isEarlyClobber())
730 OldValNo->setHasRedefByEC(true);
732 // Add the new live interval which replaces the range for the input copy.
733 LiveRange LR(DefIndex, RedefIndex, ValNo);
734 DOUT << " replace range with " << LR;
735 interval.addRange(LR);
736 interval.addKill(ValNo, RedefIndex, false);
738 // If this redefinition is dead, we need to add a dummy unit live
739 // range covering the def slot.
741 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
744 interval.print(DOUT, tri_);
747 // Otherwise, this must be because of phi elimination. If this is the
748 // first redefinition of the vreg that we have seen, go back and change
749 // the live range in the PHI block to be a different value number.
750 if (interval.containsOneValue()) {
751 assert(vi.Kills.size() == 1 &&
752 "PHI elimination vreg should have one kill, the PHI itself!");
754 // Remove the old range that we now know has an incorrect number.
755 VNInfo *VNI = interval.getValNumInfo(0);
756 MachineInstr *Killer = vi.Kills[0];
757 unsigned Start = getMBBStartIdx(Killer->getParent());
758 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
759 DOUT << " Removing [" << Start << "," << End << "] from: ";
760 interval.print(DOUT, tri_); DOUT << "\n";
761 interval.removeRange(Start, End);
762 assert(interval.ranges.size() == 1 &&
763 "newly discovered PHI interval has >1 ranges.");
764 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
765 interval.addKill(VNI, terminatorGaps[killMBB], true);
766 VNI->setHasPHIKill(true);
767 DOUT << " RESULT: "; interval.print(DOUT, tri_);
769 // Replace the interval with one of a NEW value number. Note that this
770 // value number isn't actually defined by an instruction, weird huh? :)
771 LiveRange LR(Start, End,
772 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
773 LR.valno->setIsPHIDef(true);
774 DOUT << " replace range with " << LR;
775 interval.addRange(LR);
776 interval.addKill(LR.valno, End, false);
777 DOUT << " RESULT: "; interval.print(DOUT, tri_);
780 // In the case of PHI elimination, each variable definition is only
781 // live until the end of the block. We've already taken care of the
782 // rest of the live range.
783 unsigned defIndex = getDefIndex(MIIdx);
784 if (MO.isEarlyClobber())
785 defIndex = getUseIndex(MIIdx);
788 MachineInstr *CopyMI = NULL;
789 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
790 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
791 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
792 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
793 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
795 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
797 unsigned killIndex = getMBBEndIdx(mbb) + 1;
798 LiveRange LR(defIndex, killIndex, ValNo);
799 interval.addRange(LR);
800 interval.addKill(ValNo, terminatorGaps[mbb], true);
801 ValNo->setHasPHIKill(true);
809 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
810 MachineBasicBlock::iterator mi,
813 LiveInterval &interval,
814 MachineInstr *CopyMI) {
815 // A physical register cannot be live across basic block, so its
816 // lifetime must end somewhere in its defining basic block.
817 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
819 unsigned baseIndex = MIIdx;
820 unsigned start = getDefIndex(baseIndex);
821 // Earlyclobbers move back one.
822 if (MO.isEarlyClobber())
823 start = getUseIndex(MIIdx);
824 unsigned end = start;
826 // If it is not used after definition, it is considered dead at
827 // the instruction defining it. Hence its interval is:
828 // [defSlot(def), defSlot(def)+1)
835 // If it is not dead on definition, it must be killed by a
836 // subsequent instruction. Hence its interval is:
837 // [defSlot(def), useSlot(kill)+1)
838 baseIndex += InstrSlots::NUM;
839 while (++mi != MBB->end()) {
840 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
841 getInstructionFromIndex(baseIndex) == 0)
842 baseIndex += InstrSlots::NUM;
843 if (mi->killsRegister(interval.reg, tri_)) {
845 end = getUseIndex(baseIndex) + 1;
848 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
850 if (mi->isRegTiedToUseOperand(DefIdx)) {
851 // Two-address instruction.
852 end = getDefIndex(baseIndex);
853 if (mi->getOperand(DefIdx).isEarlyClobber())
854 end = getUseIndex(baseIndex);
856 // Another instruction redefines the register before it is ever read.
857 // Then the register is essentially dead at the instruction that defines
858 // it. Hence its interval is:
859 // [defSlot(def), defSlot(def)+1)
867 baseIndex += InstrSlots::NUM;
870 // The only case we should have a dead physreg here without a killing or
871 // instruction where we know it's dead is if it is live-in to the function
872 // and never used. Another possible case is the implicit use of the
873 // physical register has been deleted by two-address pass.
877 assert(start < end && "did not find end of interval?");
879 // Already exists? Extend old live interval.
880 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
881 bool Extend = OldLR != interval.end();
882 VNInfo *ValNo = Extend
883 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
884 if (MO.isEarlyClobber() && Extend)
885 ValNo->setHasRedefByEC(true);
886 LiveRange LR(start, end, ValNo);
887 interval.addRange(LR);
888 interval.addKill(LR.valno, end, false);
889 DOUT << " +" << LR << '\n';
892 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
893 MachineBasicBlock::iterator MI,
897 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
898 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
899 getOrCreateInterval(MO.getReg()));
900 else if (allocatableRegs_[MO.getReg()]) {
901 MachineInstr *CopyMI = NULL;
902 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
903 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
904 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
905 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
906 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
908 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
909 getOrCreateInterval(MO.getReg()), CopyMI);
910 // Def of a register also defines its sub-registers.
911 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
912 // If MI also modifies the sub-register explicitly, avoid processing it
913 // more than once. Do not pass in TRI here so it checks for exact match.
914 if (!MI->modifiesRegister(*AS))
915 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
916 getOrCreateInterval(*AS), 0);
920 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
922 LiveInterval &interval, bool isAlias) {
923 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
925 // Look for kills, if it reaches a def before it's killed, then it shouldn't
926 // be considered a livein.
927 MachineBasicBlock::iterator mi = MBB->begin();
928 unsigned baseIndex = MIIdx;
929 unsigned start = baseIndex;
930 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
931 getInstructionFromIndex(baseIndex) == 0)
932 baseIndex += InstrSlots::NUM;
933 unsigned end = baseIndex;
934 bool SeenDefUse = false;
936 while (mi != MBB->end()) {
937 if (mi->killsRegister(interval.reg, tri_)) {
939 end = getUseIndex(baseIndex) + 1;
942 } else if (mi->modifiesRegister(interval.reg, tri_)) {
943 // Another instruction redefines the register before it is ever read.
944 // Then the register is essentially dead at the instruction that defines
945 // it. Hence its interval is:
946 // [defSlot(def), defSlot(def)+1)
948 end = getDefIndex(start) + 1;
953 baseIndex += InstrSlots::NUM;
955 if (mi != MBB->end()) {
956 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
957 getInstructionFromIndex(baseIndex) == 0)
958 baseIndex += InstrSlots::NUM;
962 // Live-in register might not be used at all.
966 end = getDefIndex(MIIdx) + 1;
968 DOUT << " live through";
974 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
975 vni->setIsPHIDef(true);
976 LiveRange LR(start, end, vni);
978 interval.addRange(LR);
979 interval.addKill(LR.valno, end, false);
980 DOUT << " +" << LR << '\n';
983 /// computeIntervals - computes the live intervals for virtual
984 /// registers. for some ordering of the machine instructions [1,N] a
985 /// live interval is an interval [i, j) where 1 <= i <= j < N for
986 /// which a variable is live
987 void LiveIntervals::computeIntervals() {
989 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
990 << "********** Function: "
991 << ((Value*)mf_->getFunction())->getName() << '\n');
993 SmallVector<unsigned, 8> UndefUses;
994 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
996 MachineBasicBlock *MBB = MBBI;
997 // Track the index of the current machine instr.
998 unsigned MIIndex = getMBBStartIdx(MBB);
999 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1001 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1003 // Create intervals for live-ins to this BB first.
1004 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1005 LE = MBB->livein_end(); LI != LE; ++LI) {
1006 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1007 // Multiple live-ins can alias the same register.
1008 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1009 if (!hasInterval(*AS))
1010 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1014 // Skip over empty initial indices.
1015 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1016 getInstructionFromIndex(MIIndex) == 0)
1017 MIIndex += InstrSlots::NUM;
1019 for (; MI != miEnd; ++MI) {
1020 DOUT << MIIndex << "\t" << *MI;
1023 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1024 MachineOperand &MO = MI->getOperand(i);
1025 if (!MO.isReg() || !MO.getReg())
1028 // handle register defs - build intervals
1030 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1031 else if (MO.isUndef())
1032 UndefUses.push_back(MO.getReg());
1035 // Skip over the empty slots after each instruction.
1036 unsigned Slots = MI->getDesc().getNumDefs();
1039 MIIndex += InstrSlots::NUM * Slots;
1041 // Skip over empty indices.
1042 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1043 getInstructionFromIndex(MIIndex) == 0)
1044 MIIndex += InstrSlots::NUM;
1048 // Create empty intervals for registers defined by implicit_def's (except
1049 // for those implicit_def that define values which are liveout of their
1051 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1052 unsigned UndefReg = UndefUses[i];
1053 (void)getOrCreateInterval(UndefReg);
1057 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1058 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1059 std::vector<IdxMBBPair>::const_iterator I =
1060 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1062 bool ResVal = false;
1063 while (I != Idx2MBBMap.end()) {
1064 if (I->first >= End)
1066 MBBs.push_back(I->second);
1073 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1074 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1075 std::vector<IdxMBBPair>::const_iterator I =
1076 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1078 bool ResVal = false;
1079 while (I != Idx2MBBMap.end()) {
1082 MachineBasicBlock *MBB = I->second;
1083 if (getMBBEndIdx(MBB) > End)
1085 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1086 SE = MBB->succ_end(); SI != SE; ++SI)
1087 MBBs.push_back(*SI);
1094 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1095 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1096 return new LiveInterval(reg, Weight);
1099 /// dupInterval - Duplicate a live interval. The caller is responsible for
1100 /// managing the allocated memory.
1101 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1102 LiveInterval *NewLI = createInterval(li->reg);
1103 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1107 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1108 /// copy field and returns the source register that defines it.
1109 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1113 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1114 // If it's extracting out of a physical register, return the sub-register.
1115 unsigned Reg = VNI->copy->getOperand(1).getReg();
1116 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1117 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1119 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1120 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1121 return VNI->copy->getOperand(2).getReg();
1123 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1124 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1126 llvm_unreachable("Unrecognized copy instruction!");
1130 //===----------------------------------------------------------------------===//
1131 // Register allocator hooks.
1134 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1135 /// allow one) virtual register operand, then its uses are implicitly using
1136 /// the register. Returns the virtual register.
1137 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1138 MachineInstr *MI) const {
1140 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1141 MachineOperand &MO = MI->getOperand(i);
1142 if (!MO.isReg() || !MO.isUse())
1144 unsigned Reg = MO.getReg();
1145 if (Reg == 0 || Reg == li.reg)
1148 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1149 !allocatableRegs_[Reg])
1151 // FIXME: For now, only remat MI with at most one register operand.
1153 "Can't rematerialize instruction with multiple register operand!");
1154 RegOp = MO.getReg();
1162 /// isValNoAvailableAt - Return true if the val# of the specified interval
1163 /// which reaches the given instruction also reaches the specified use index.
1164 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1165 unsigned UseIdx) const {
1166 unsigned Index = getInstructionIndex(MI);
1167 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1168 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1169 return UI != li.end() && UI->valno == ValNo;
1172 /// isReMaterializable - Returns true if the definition MI of the specified
1173 /// val# of the specified interval is re-materializable.
1174 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1175 const VNInfo *ValNo, MachineInstr *MI,
1176 SmallVectorImpl<LiveInterval*> &SpillIs,
1181 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1185 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1186 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1187 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1188 // this but remember this is not safe to fold into a two-address
1190 // This is a load from fixed stack slot. It can be rematerialized.
1193 // If the target-specific rules don't identify an instruction as
1194 // being trivially rematerializable, use some target-independent
1196 if (!MI->getDesc().isRematerializable() ||
1197 !tii_->isTriviallyReMaterializable(MI)) {
1198 if (!EnableAggressiveRemat)
1201 // If the instruction accesses memory but the memoperands have been lost,
1202 // we can't analyze it.
1203 const TargetInstrDesc &TID = MI->getDesc();
1204 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1207 // Avoid instructions obviously unsafe for remat.
1208 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1211 // If the instruction accesses memory and the memory could be non-constant,
1212 // assume the instruction is not rematerializable.
1213 for (std::list<MachineMemOperand>::const_iterator
1214 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1215 const MachineMemOperand &MMO = *I;
1216 if (MMO.isVolatile() || MMO.isStore())
1218 const Value *V = MMO.getValue();
1221 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1222 if (!PSV->isConstant(mf_->getFrameInfo()))
1224 } else if (!aa_->pointsToConstantMemory(V))
1228 // If any of the registers accessed are non-constant, conservatively assume
1229 // the instruction is not rematerializable.
1230 unsigned ImpUse = 0;
1231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1232 const MachineOperand &MO = MI->getOperand(i);
1234 unsigned Reg = MO.getReg();
1237 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1240 // Only allow one def, and that in the first operand.
1241 if (MO.isDef() != (i == 0))
1244 // Only allow constant-valued registers.
1245 bool IsLiveIn = mri_->isLiveIn(Reg);
1246 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1247 E = mri_->def_end();
1249 // For the def, it should be the only def of that register.
1250 if (MO.isDef() && (next(I) != E || IsLiveIn))
1254 // Only allow one use other register use, as that's all the
1255 // remat mechanisms support currently.
1256 if (Reg != li.reg) {
1259 else if (Reg != ImpUse)
1262 // For the use, there should be only one associated def.
1263 if (I != E && (next(I) != E || IsLiveIn))
1270 unsigned ImpUse = getReMatImplicitUse(li, MI);
1272 const LiveInterval &ImpLi = getInterval(ImpUse);
1273 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1274 re = mri_->use_end(); ri != re; ++ri) {
1275 MachineInstr *UseMI = &*ri;
1276 unsigned UseIdx = getInstructionIndex(UseMI);
1277 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1279 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1283 // If a register operand of the re-materialized instruction is going to
1284 // be spilled next, then it's not legal to re-materialize this instruction.
1285 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1286 if (ImpUse == SpillIs[i]->reg)
1292 /// isReMaterializable - Returns true if the definition MI of the specified
1293 /// val# of the specified interval is re-materializable.
1294 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1295 const VNInfo *ValNo, MachineInstr *MI) {
1296 SmallVector<LiveInterval*, 4> Dummy1;
1298 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1301 /// isReMaterializable - Returns true if every definition of MI of every
1302 /// val# of the specified interval is re-materializable.
1303 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1304 SmallVectorImpl<LiveInterval*> &SpillIs,
1307 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1309 const VNInfo *VNI = *i;
1310 if (VNI->isUnused())
1311 continue; // Dead val#.
1312 // Is the def for the val# rematerializable?
1313 if (!VNI->isDefAccurate())
1315 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1316 bool DefIsLoad = false;
1318 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1320 isLoad |= DefIsLoad;
1325 /// FilterFoldedOps - Filter out two-address use operands. Return
1326 /// true if it finds any issue with the operands that ought to prevent
1328 static bool FilterFoldedOps(MachineInstr *MI,
1329 SmallVector<unsigned, 2> &Ops,
1331 SmallVector<unsigned, 2> &FoldOps) {
1333 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1334 unsigned OpIdx = Ops[i];
1335 MachineOperand &MO = MI->getOperand(OpIdx);
1336 // FIXME: fold subreg use.
1340 MRInfo |= (unsigned)VirtRegMap::isMod;
1342 // Filter out two-address use operand(s).
1343 if (MI->isRegTiedToDefOperand(OpIdx)) {
1344 MRInfo = VirtRegMap::isModRef;
1347 MRInfo |= (unsigned)VirtRegMap::isRef;
1349 FoldOps.push_back(OpIdx);
1355 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1356 /// slot / to reg or any rematerialized load into ith operand of specified
1357 /// MI. If it is successul, MI is updated with the newly created MI and
1359 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1360 VirtRegMap &vrm, MachineInstr *DefMI,
1362 SmallVector<unsigned, 2> &Ops,
1363 bool isSS, int Slot, unsigned Reg) {
1364 // If it is an implicit def instruction, just delete it.
1365 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1366 RemoveMachineInstrFromMaps(MI);
1367 vrm.RemoveMachineInstrFromMaps(MI);
1368 MI->eraseFromParent();
1373 // Filter the list of operand indexes that are to be folded. Abort if
1374 // any operand will prevent folding.
1375 unsigned MRInfo = 0;
1376 SmallVector<unsigned, 2> FoldOps;
1377 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1380 // The only time it's safe to fold into a two address instruction is when
1381 // it's folding reload and spill from / into a spill stack slot.
1382 if (DefMI && (MRInfo & VirtRegMap::isMod))
1385 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1386 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1388 // Remember this instruction uses the spill slot.
1389 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1391 // Attempt to fold the memory reference into the instruction. If
1392 // we can do this, we don't need to insert spill code.
1393 MachineBasicBlock &MBB = *MI->getParent();
1394 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1395 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1396 vrm.transferSpillPts(MI, fmi);
1397 vrm.transferRestorePts(MI, fmi);
1398 vrm.transferEmergencySpills(MI, fmi);
1400 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1401 mi2iMap_[fmi] = InstrIdx;
1402 MI = MBB.insert(MBB.erase(MI), fmi);
1409 /// canFoldMemoryOperand - Returns true if the specified load / store
1410 /// folding is possible.
1411 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1412 SmallVector<unsigned, 2> &Ops,
1414 // Filter the list of operand indexes that are to be folded. Abort if
1415 // any operand will prevent folding.
1416 unsigned MRInfo = 0;
1417 SmallVector<unsigned, 2> FoldOps;
1418 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1421 // It's only legal to remat for a use, not a def.
1422 if (ReMat && (MRInfo & VirtRegMap::isMod))
1425 return tii_->canFoldMemoryOperand(MI, FoldOps);
1428 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1429 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1430 for (LiveInterval::Ranges::const_iterator
1431 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1432 std::vector<IdxMBBPair>::const_iterator II =
1433 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1434 if (II == Idx2MBBMap.end())
1436 if (I->end > II->first) // crossing a MBB.
1438 MBBs.insert(II->second);
1439 if (MBBs.size() > 1)
1445 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1446 /// interval on to-be re-materialized operands of MI) with new register.
1447 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1448 MachineInstr *MI, unsigned NewVReg,
1450 // There is an implicit use. That means one of the other operand is
1451 // being remat'ed and the remat'ed instruction has li.reg as an
1452 // use operand. Make sure we rewrite that as well.
1453 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1454 MachineOperand &MO = MI->getOperand(i);
1457 unsigned Reg = MO.getReg();
1458 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1460 if (!vrm.isReMaterialized(Reg))
1462 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1463 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1465 UseMO->setReg(NewVReg);
1469 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1470 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1471 bool LiveIntervals::
1472 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1473 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1474 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1475 unsigned Slot, int LdSlot,
1476 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1478 const TargetRegisterClass* rc,
1479 SmallVector<int, 4> &ReMatIds,
1480 const MachineLoopInfo *loopInfo,
1481 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1482 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1483 std::vector<LiveInterval*> &NewLIs) {
1484 bool CanFold = false;
1486 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1487 MachineOperand& mop = MI->getOperand(i);
1490 unsigned Reg = mop.getReg();
1491 unsigned RegI = Reg;
1492 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1497 bool TryFold = !DefIsReMat;
1498 bool FoldSS = true; // Default behavior unless it's a remat.
1499 int FoldSlot = Slot;
1501 // If this is the rematerializable definition MI itself and
1502 // all of its uses are rematerialized, simply delete it.
1503 if (MI == ReMatOrigDefMI && CanDelete) {
1504 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1506 RemoveMachineInstrFromMaps(MI);
1507 vrm.RemoveMachineInstrFromMaps(MI);
1508 MI->eraseFromParent();
1512 // If def for this use can't be rematerialized, then try folding.
1513 // If def is rematerializable and it's a load, also try folding.
1514 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1516 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1522 // Scan all of the operands of this instruction rewriting operands
1523 // to use NewVReg instead of li.reg as appropriate. We do this for
1526 // 1. If the instr reads the same spilled vreg multiple times, we
1527 // want to reuse the NewVReg.
1528 // 2. If the instr is a two-addr instruction, we are required to
1529 // keep the src/dst regs pinned.
1531 // Keep track of whether we replace a use and/or def so that we can
1532 // create the spill interval with the appropriate range.
1534 HasUse = mop.isUse();
1535 HasDef = mop.isDef();
1536 SmallVector<unsigned, 2> Ops;
1538 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1539 const MachineOperand &MOj = MI->getOperand(j);
1542 unsigned RegJ = MOj.getReg();
1543 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1547 if (!MOj.isUndef()) {
1548 HasUse |= MOj.isUse();
1549 HasDef |= MOj.isDef();
1554 // Create a new virtual register for the spill interval.
1555 // Create the new register now so we can map the fold instruction
1556 // to the new register so when it is unfolded we get the correct
1558 bool CreatedNewVReg = false;
1560 NewVReg = mri_->createVirtualRegister(rc);
1562 CreatedNewVReg = true;
1568 // Do not fold load / store here if we are splitting. We'll find an
1569 // optimal point to insert a load / store later.
1571 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1572 Ops, FoldSS, FoldSlot, NewVReg)) {
1573 // Folding the load/store can completely change the instruction in
1574 // unpredictable ways, rescan it from the beginning.
1577 // We need to give the new vreg the same stack slot as the
1578 // spilled interval.
1579 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1585 if (isNotInMIMap(MI))
1587 goto RestartInstruction;
1590 // We'll try to fold it later if it's profitable.
1591 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1595 mop.setReg(NewVReg);
1596 if (mop.isImplicit())
1597 rewriteImplicitOps(li, MI, NewVReg, vrm);
1599 // Reuse NewVReg for other reads.
1600 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1601 MachineOperand &mopj = MI->getOperand(Ops[j]);
1602 mopj.setReg(NewVReg);
1603 if (mopj.isImplicit())
1604 rewriteImplicitOps(li, MI, NewVReg, vrm);
1607 if (CreatedNewVReg) {
1609 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1610 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1611 // Each valnum may have its own remat id.
1612 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1614 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1616 if (!CanDelete || (HasUse && HasDef)) {
1617 // If this is a two-addr instruction then its use operands are
1618 // rematerializable but its def is not. It should be assigned a
1620 vrm.assignVirt2StackSlot(NewVReg, Slot);
1623 vrm.assignVirt2StackSlot(NewVReg, Slot);
1625 } else if (HasUse && HasDef &&
1626 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1627 // If this interval hasn't been assigned a stack slot (because earlier
1628 // def is a deleted remat def), do it now.
1629 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1630 vrm.assignVirt2StackSlot(NewVReg, Slot);
1633 // Re-matting an instruction with virtual register use. Add the
1634 // register as an implicit use on the use MI.
1635 if (DefIsReMat && ImpUse)
1636 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1638 // Create a new register interval for this spill / remat.
1639 LiveInterval &nI = getOrCreateInterval(NewVReg);
1640 if (CreatedNewVReg) {
1641 NewLIs.push_back(&nI);
1642 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1644 vrm.setIsSplitFromReg(NewVReg, li.reg);
1648 if (CreatedNewVReg) {
1649 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1650 nI.getNextValue(0, 0, false, VNInfoAllocator));
1654 // Extend the split live interval to this def / use.
1655 unsigned End = getUseIndex(index)+1;
1656 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1657 nI.getValNumInfo(nI.getNumValNums()-1));
1663 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1664 nI.getNextValue(0, 0, false, VNInfoAllocator));
1669 DOUT << "\t\t\t\tAdded new interval: ";
1670 nI.print(DOUT, tri_);
1675 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1677 MachineBasicBlock *MBB, unsigned Idx) const {
1678 unsigned End = getMBBEndIdx(MBB);
1679 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1680 if (VNI->kills[j].isPHIKill)
1683 unsigned KillIdx = VNI->kills[j].killIdx;
1684 if (KillIdx > Idx && KillIdx < End)
1690 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1691 /// during spilling.
1693 struct RewriteInfo {
1698 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1699 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1702 struct RewriteInfoCompare {
1703 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1704 return LHS.Index < RHS.Index;
1709 void LiveIntervals::
1710 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1711 LiveInterval::Ranges::const_iterator &I,
1712 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1713 unsigned Slot, int LdSlot,
1714 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1716 const TargetRegisterClass* rc,
1717 SmallVector<int, 4> &ReMatIds,
1718 const MachineLoopInfo *loopInfo,
1719 BitVector &SpillMBBs,
1720 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1721 BitVector &RestoreMBBs,
1722 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1723 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1724 std::vector<LiveInterval*> &NewLIs) {
1725 bool AllCanFold = true;
1726 unsigned NewVReg = 0;
1727 unsigned start = getBaseIndex(I->start);
1728 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1730 // First collect all the def / use in this live range that will be rewritten.
1731 // Make sure they are sorted according to instruction index.
1732 std::vector<RewriteInfo> RewriteMIs;
1733 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1734 re = mri_->reg_end(); ri != re; ) {
1735 MachineInstr *MI = &*ri;
1736 MachineOperand &O = ri.getOperand();
1738 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1739 unsigned index = getInstructionIndex(MI);
1740 if (index < start || index >= end)
1744 // Must be defined by an implicit def. It should not be spilled. Note,
1745 // this is for correctness reason. e.g.
1746 // 8 %reg1024<def> = IMPLICIT_DEF
1747 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1748 // The live range [12, 14) are not part of the r1024 live interval since
1749 // it's defined by an implicit def. It will not conflicts with live
1750 // interval of r1025. Now suppose both registers are spilled, you can
1751 // easily see a situation where both registers are reloaded before
1752 // the INSERT_SUBREG and both target registers that would overlap.
1754 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1756 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1758 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1759 // Now rewrite the defs and uses.
1760 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1761 RewriteInfo &rwi = RewriteMIs[i];
1763 unsigned index = rwi.Index;
1764 bool MIHasUse = rwi.HasUse;
1765 bool MIHasDef = rwi.HasDef;
1766 MachineInstr *MI = rwi.MI;
1767 // If MI def and/or use the same register multiple times, then there
1768 // are multiple entries.
1769 unsigned NumUses = MIHasUse;
1770 while (i != e && RewriteMIs[i].MI == MI) {
1771 assert(RewriteMIs[i].Index == index);
1772 bool isUse = RewriteMIs[i].HasUse;
1773 if (isUse) ++NumUses;
1775 MIHasDef |= RewriteMIs[i].HasDef;
1778 MachineBasicBlock *MBB = MI->getParent();
1780 if (ImpUse && MI != ReMatDefMI) {
1781 // Re-matting an instruction with virtual register use. Update the
1782 // register interval's spill weight to HUGE_VALF to prevent it from
1784 LiveInterval &ImpLi = getInterval(ImpUse);
1785 ImpLi.weight = HUGE_VALF;
1788 unsigned MBBId = MBB->getNumber();
1789 unsigned ThisVReg = 0;
1791 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1792 if (NVI != MBBVRegsMap.end()) {
1793 ThisVReg = NVI->second;
1800 // It's better to start a new interval to avoid artifically
1801 // extend the new interval.
1802 if (MIHasDef && !MIHasUse) {
1803 MBBVRegsMap.erase(MBB->getNumber());
1809 bool IsNew = ThisVReg == 0;
1811 // This ends the previous live interval. If all of its def / use
1812 // can be folded, give it a low spill weight.
1813 if (NewVReg && TrySplit && AllCanFold) {
1814 LiveInterval &nI = getOrCreateInterval(NewVReg);
1821 bool HasDef = false;
1822 bool HasUse = false;
1823 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1824 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1825 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1826 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1827 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1828 if (!HasDef && !HasUse)
1831 AllCanFold &= CanFold;
1833 // Update weight of spill interval.
1834 LiveInterval &nI = getOrCreateInterval(NewVReg);
1836 // The spill weight is now infinity as it cannot be spilled again.
1837 nI.weight = HUGE_VALF;
1841 // Keep track of the last def and first use in each MBB.
1843 if (MI != ReMatOrigDefMI || !CanDelete) {
1844 bool HasKill = false;
1846 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1848 // If this is a two-address code, then this index starts a new VNInfo.
1849 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1851 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1853 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1854 SpillIdxes.find(MBBId);
1856 if (SII == SpillIdxes.end()) {
1857 std::vector<SRInfo> S;
1858 S.push_back(SRInfo(index, NewVReg, true));
1859 SpillIdxes.insert(std::make_pair(MBBId, S));
1860 } else if (SII->second.back().vreg != NewVReg) {
1861 SII->second.push_back(SRInfo(index, NewVReg, true));
1862 } else if ((int)index > SII->second.back().index) {
1863 // If there is an earlier def and this is a two-address
1864 // instruction, then it's not possible to fold the store (which
1865 // would also fold the load).
1866 SRInfo &Info = SII->second.back();
1868 Info.canFold = !HasUse;
1870 SpillMBBs.set(MBBId);
1871 } else if (SII != SpillIdxes.end() &&
1872 SII->second.back().vreg == NewVReg &&
1873 (int)index > SII->second.back().index) {
1874 // There is an earlier def that's not killed (must be two-address).
1875 // The spill is no longer needed.
1876 SII->second.pop_back();
1877 if (SII->second.empty()) {
1878 SpillIdxes.erase(MBBId);
1879 SpillMBBs.reset(MBBId);
1886 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1887 SpillIdxes.find(MBBId);
1888 if (SII != SpillIdxes.end() &&
1889 SII->second.back().vreg == NewVReg &&
1890 (int)index > SII->second.back().index)
1891 // Use(s) following the last def, it's not safe to fold the spill.
1892 SII->second.back().canFold = false;
1893 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1894 RestoreIdxes.find(MBBId);
1895 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1896 // If we are splitting live intervals, only fold if it's the first
1897 // use and there isn't another use later in the MBB.
1898 RII->second.back().canFold = false;
1900 // Only need a reload if there isn't an earlier def / use.
1901 if (RII == RestoreIdxes.end()) {
1902 std::vector<SRInfo> Infos;
1903 Infos.push_back(SRInfo(index, NewVReg, true));
1904 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1906 RII->second.push_back(SRInfo(index, NewVReg, true));
1908 RestoreMBBs.set(MBBId);
1912 // Update spill weight.
1913 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1914 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1917 if (NewVReg && TrySplit && AllCanFold) {
1918 // If all of its def / use can be folded, give it a low spill weight.
1919 LiveInterval &nI = getOrCreateInterval(NewVReg);
1924 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1925 BitVector &RestoreMBBs,
1926 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1927 if (!RestoreMBBs[Id])
1929 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1930 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1931 if (Restores[i].index == index &&
1932 Restores[i].vreg == vr &&
1933 Restores[i].canFold)
1938 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1939 BitVector &RestoreMBBs,
1940 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1941 if (!RestoreMBBs[Id])
1943 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1944 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1945 if (Restores[i].index == index && Restores[i].vreg)
1946 Restores[i].index = -1;
1949 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1950 /// spilled and create empty intervals for their uses.
1952 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1953 const TargetRegisterClass* rc,
1954 std::vector<LiveInterval*> &NewLIs) {
1955 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1956 re = mri_->reg_end(); ri != re; ) {
1957 MachineOperand &O = ri.getOperand();
1958 MachineInstr *MI = &*ri;
1961 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1962 "Register def was not rewritten?");
1963 RemoveMachineInstrFromMaps(MI);
1964 vrm.RemoveMachineInstrFromMaps(MI);
1965 MI->eraseFromParent();
1967 // This must be an use of an implicit_def so it's not part of the live
1968 // interval. Create a new empty live interval for it.
1969 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1970 unsigned NewVReg = mri_->createVirtualRegister(rc);
1972 vrm.setIsImplicitlyDefined(NewVReg);
1973 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1974 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1975 MachineOperand &MO = MI->getOperand(i);
1976 if (MO.isReg() && MO.getReg() == li.reg) {
1985 std::vector<LiveInterval*> LiveIntervals::
1986 addIntervalsForSpillsFast(const LiveInterval &li,
1987 const MachineLoopInfo *loopInfo,
1989 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1991 std::vector<LiveInterval*> added;
1993 assert(li.weight != HUGE_VALF &&
1994 "attempt to spill already spilled interval!");
1996 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2000 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2002 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2003 while (RI != mri_->reg_end()) {
2004 MachineInstr* MI = &*RI;
2006 SmallVector<unsigned, 2> Indices;
2007 bool HasUse = false;
2008 bool HasDef = false;
2010 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2011 MachineOperand& mop = MI->getOperand(i);
2012 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2014 HasUse |= MI->getOperand(i).isUse();
2015 HasDef |= MI->getOperand(i).isDef();
2017 Indices.push_back(i);
2020 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2021 Indices, true, slot, li.reg)) {
2022 unsigned NewVReg = mri_->createVirtualRegister(rc);
2024 vrm.assignVirt2StackSlot(NewVReg, slot);
2026 // create a new register for this spill
2027 LiveInterval &nI = getOrCreateInterval(NewVReg);
2029 // the spill weight is now infinity as it
2030 // cannot be spilled again
2031 nI.weight = HUGE_VALF;
2033 // Rewrite register operands to use the new vreg.
2034 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2035 E = Indices.end(); I != E; ++I) {
2036 MI->getOperand(*I).setReg(NewVReg);
2038 if (MI->getOperand(*I).isUse())
2039 MI->getOperand(*I).setIsKill(true);
2042 // Fill in the new live interval.
2043 unsigned index = getInstructionIndex(MI);
2045 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2046 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2049 vrm.addRestorePoint(NewVReg, MI);
2052 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2053 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2056 vrm.addSpillPoint(NewVReg, true, MI);
2059 added.push_back(&nI);
2061 DOUT << "\t\t\t\tadded new interval: ";
2067 RI = mri_->reg_begin(li.reg);
2073 std::vector<LiveInterval*> LiveIntervals::
2074 addIntervalsForSpills(const LiveInterval &li,
2075 SmallVectorImpl<LiveInterval*> &SpillIs,
2076 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2078 if (EnableFastSpilling)
2079 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2081 assert(li.weight != HUGE_VALF &&
2082 "attempt to spill already spilled interval!");
2084 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2085 li.print(DOUT, tri_);
2088 // Each bit specify whether a spill is required in the MBB.
2089 BitVector SpillMBBs(mf_->getNumBlockIDs());
2090 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2091 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2092 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2093 DenseMap<unsigned,unsigned> MBBVRegsMap;
2094 std::vector<LiveInterval*> NewLIs;
2095 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2097 unsigned NumValNums = li.getNumValNums();
2098 SmallVector<MachineInstr*, 4> ReMatDefs;
2099 ReMatDefs.resize(NumValNums, NULL);
2100 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2101 ReMatOrigDefs.resize(NumValNums, NULL);
2102 SmallVector<int, 4> ReMatIds;
2103 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2104 BitVector ReMatDelete(NumValNums);
2105 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2107 // Spilling a split live interval. It cannot be split any further. Also,
2108 // it's also guaranteed to be a single val# / range interval.
2109 if (vrm.getPreSplitReg(li.reg)) {
2110 vrm.setIsSplitFromReg(li.reg, 0);
2111 // Unset the split kill marker on the last use.
2112 unsigned KillIdx = vrm.getKillPoint(li.reg);
2114 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2115 assert(KillMI && "Last use disappeared?");
2116 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2117 assert(KillOp != -1 && "Last use disappeared?");
2118 KillMI->getOperand(KillOp).setIsKill(false);
2120 vrm.removeKillPoint(li.reg);
2121 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2122 Slot = vrm.getStackSlot(li.reg);
2123 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2124 MachineInstr *ReMatDefMI = DefIsReMat ?
2125 vrm.getReMaterializedMI(li.reg) : NULL;
2127 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2128 bool isLoad = isLoadSS ||
2129 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2130 bool IsFirstRange = true;
2131 for (LiveInterval::Ranges::const_iterator
2132 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2133 // If this is a split live interval with multiple ranges, it means there
2134 // are two-address instructions that re-defined the value. Only the
2135 // first def can be rematerialized!
2137 // Note ReMatOrigDefMI has already been deleted.
2138 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2139 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2140 false, vrm, rc, ReMatIds, loopInfo,
2141 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2142 MBBVRegsMap, NewLIs);
2144 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2145 Slot, 0, false, false, false,
2146 false, vrm, rc, ReMatIds, loopInfo,
2147 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2148 MBBVRegsMap, NewLIs);
2150 IsFirstRange = false;
2153 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2157 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2158 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2162 bool NeedStackSlot = false;
2163 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2165 const VNInfo *VNI = *i;
2166 unsigned VN = VNI->id;
2167 if (VNI->isUnused())
2168 continue; // Dead val#.
2169 // Is the def for the val# rematerializable?
2170 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2171 ? getInstructionFromIndex(VNI->def) : 0;
2173 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2174 // Remember how to remat the def of this val#.
2175 ReMatOrigDefs[VN] = ReMatDefMI;
2176 // Original def may be modified so we have to make a copy here.
2177 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2178 ClonedMIs.push_back(Clone);
2179 ReMatDefs[VN] = Clone;
2181 bool CanDelete = true;
2182 if (VNI->hasPHIKill()) {
2183 // A kill is a phi node, not all of its uses can be rematerialized.
2184 // It must not be deleted.
2186 // Need a stack slot if there is any live range where uses cannot be
2188 NeedStackSlot = true;
2191 ReMatDelete.set(VN);
2193 // Need a stack slot if there is any live range where uses cannot be
2195 NeedStackSlot = true;
2199 // One stack slot per live interval.
2200 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2201 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2202 Slot = vrm.assignVirt2StackSlot(li.reg);
2204 // This case only occurs when the prealloc splitter has already assigned
2205 // a stack slot to this vreg.
2207 Slot = vrm.getStackSlot(li.reg);
2210 // Create new intervals and rewrite defs and uses.
2211 for (LiveInterval::Ranges::const_iterator
2212 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2213 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2214 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2215 bool DefIsReMat = ReMatDefMI != NULL;
2216 bool CanDelete = ReMatDelete[I->valno->id];
2218 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2219 bool isLoad = isLoadSS ||
2220 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2221 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2222 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2223 CanDelete, vrm, rc, ReMatIds, loopInfo,
2224 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2225 MBBVRegsMap, NewLIs);
2228 // Insert spills / restores if we are splitting.
2230 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2234 SmallPtrSet<LiveInterval*, 4> AddedKill;
2235 SmallVector<unsigned, 2> Ops;
2236 if (NeedStackSlot) {
2237 int Id = SpillMBBs.find_first();
2239 std::vector<SRInfo> &spills = SpillIdxes[Id];
2240 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2241 int index = spills[i].index;
2242 unsigned VReg = spills[i].vreg;
2243 LiveInterval &nI = getOrCreateInterval(VReg);
2244 bool isReMat = vrm.isReMaterialized(VReg);
2245 MachineInstr *MI = getInstructionFromIndex(index);
2246 bool CanFold = false;
2247 bool FoundUse = false;
2249 if (spills[i].canFold) {
2251 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2252 MachineOperand &MO = MI->getOperand(j);
2253 if (!MO.isReg() || MO.getReg() != VReg)
2260 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2261 RestoreMBBs, RestoreIdxes))) {
2262 // MI has two-address uses of the same register. If the use
2263 // isn't the first and only use in the BB, then we can't fold
2264 // it. FIXME: Move this to rewriteInstructionsForSpills.
2271 // Fold the store into the def if possible.
2272 bool Folded = false;
2273 if (CanFold && !Ops.empty()) {
2274 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2277 // Also folded uses, do not issue a load.
2278 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2279 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2281 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2285 // Otherwise tell the spiller to issue a spill.
2287 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2288 bool isKill = LR->end == getStoreIndex(index);
2289 if (!MI->registerDefIsDead(nI.reg))
2290 // No need to spill a dead def.
2291 vrm.addSpillPoint(VReg, isKill, MI);
2293 AddedKill.insert(&nI);
2296 Id = SpillMBBs.find_next(Id);
2300 int Id = RestoreMBBs.find_first();
2302 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2303 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2304 int index = restores[i].index;
2307 unsigned VReg = restores[i].vreg;
2308 LiveInterval &nI = getOrCreateInterval(VReg);
2309 bool isReMat = vrm.isReMaterialized(VReg);
2310 MachineInstr *MI = getInstructionFromIndex(index);
2311 bool CanFold = false;
2313 if (restores[i].canFold) {
2315 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2316 MachineOperand &MO = MI->getOperand(j);
2317 if (!MO.isReg() || MO.getReg() != VReg)
2321 // If this restore were to be folded, it would have been folded
2330 // Fold the load into the use if possible.
2331 bool Folded = false;
2332 if (CanFold && !Ops.empty()) {
2334 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2336 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2338 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2339 // If the rematerializable def is a load, also try to fold it.
2340 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2341 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2342 Ops, isLoadSS, LdSlot, VReg);
2344 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2346 // Re-matting an instruction with virtual register use. Add the
2347 // register as an implicit use on the use MI and update the register
2348 // interval's spill weight to HUGE_VALF to prevent it from being
2350 LiveInterval &ImpLi = getInterval(ImpUse);
2351 ImpLi.weight = HUGE_VALF;
2352 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2357 // If folding is not possible / failed, then tell the spiller to issue a
2358 // load / rematerialization for us.
2360 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2362 vrm.addRestorePoint(VReg, MI);
2364 Id = RestoreMBBs.find_next(Id);
2367 // Finalize intervals: add kills, finalize spill weights, and filter out
2369 std::vector<LiveInterval*> RetNewLIs;
2370 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2371 LiveInterval *LI = NewLIs[i];
2373 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2374 if (!AddedKill.count(LI)) {
2375 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2376 unsigned LastUseIdx = getBaseIndex(LR->end);
2377 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2378 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2379 assert(UseIdx != -1);
2380 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2381 LastUse->getOperand(UseIdx).setIsKill();
2382 vrm.addKillPoint(LI->reg, LastUseIdx);
2385 RetNewLIs.push_back(LI);
2389 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2393 /// hasAllocatableSuperReg - Return true if the specified physical register has
2394 /// any super register that's allocatable.
2395 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2396 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2397 if (allocatableRegs_[*AS] && hasInterval(*AS))
2402 /// getRepresentativeReg - Find the largest super register of the specified
2403 /// physical register.
2404 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2405 // Find the largest super-register that is allocatable.
2406 unsigned BestReg = Reg;
2407 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2408 unsigned SuperReg = *AS;
2409 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2417 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2418 /// specified interval that conflicts with the specified physical register.
2419 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2420 unsigned PhysReg) const {
2421 unsigned NumConflicts = 0;
2422 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2423 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2424 E = mri_->reg_end(); I != E; ++I) {
2425 MachineOperand &O = I.getOperand();
2426 MachineInstr *MI = O.getParent();
2427 unsigned Index = getInstructionIndex(MI);
2428 if (pli.liveAt(Index))
2431 return NumConflicts;
2434 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2435 /// around all defs and uses of the specified interval. Return true if it
2436 /// was able to cut its interval.
2437 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2438 unsigned PhysReg, VirtRegMap &vrm) {
2439 unsigned SpillReg = getRepresentativeReg(PhysReg);
2441 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2442 // If there are registers which alias PhysReg, but which are not a
2443 // sub-register of the chosen representative super register. Assert
2444 // since we can't handle it yet.
2445 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2446 tri_->isSuperRegister(*AS, SpillReg));
2449 LiveInterval &pli = getInterval(SpillReg);
2450 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2451 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2452 E = mri_->reg_end(); I != E; ++I) {
2453 MachineOperand &O = I.getOperand();
2454 MachineInstr *MI = O.getParent();
2455 if (SeenMIs.count(MI))
2458 unsigned Index = getInstructionIndex(MI);
2459 if (pli.liveAt(Index)) {
2460 vrm.addEmergencySpill(SpillReg, MI);
2461 unsigned StartIdx = getLoadIndex(Index);
2462 unsigned EndIdx = getStoreIndex(Index)+1;
2463 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2464 pli.removeRange(StartIdx, EndIdx);
2468 raw_string_ostream Msg(msg);
2469 Msg << "Ran out of registers during register allocation!";
2470 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2471 Msg << "\nPlease check your inline asm statement for invalid "
2472 << "constraints:\n";
2473 MI->print(Msg, tm_);
2475 llvm_report_error(Msg.str());
2477 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2478 if (!hasInterval(*AS))
2480 LiveInterval &spli = getInterval(*AS);
2481 if (spli.liveAt(Index))
2482 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2489 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2490 MachineInstr* startInst) {
2491 LiveInterval& Interval = getOrCreateInterval(reg);
2492 VNInfo* VN = Interval.getNextValue(
2493 getInstructionIndex(startInst) + InstrSlots::DEF,
2494 startInst, true, getVNInfoAllocator());
2495 VN->setHasPHIKill(true);
2496 VN->kills.push_back(
2497 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2498 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2499 getMBBEndIdx(startInst->getParent()) + 1, VN);
2500 Interval.addRange(LR);
2506 IntervalPrefixPrinter::operator()(raw_ostream &out,
2507 const MachineInstr &instr) const {
2508 return out << liinfo.getInstructionIndex(&instr);