1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.addRequired<AliasAnalysis>();
71 AU.addPreserved<AliasAnalysis>();
72 AU.addPreserved<LiveVariables>();
73 AU.addRequired<LiveVariables>();
74 AU.addPreservedID(MachineLoopInfoID);
75 AU.addPreservedID(MachineDominatorsID);
78 AU.addPreservedID(PHIEliminationID);
79 AU.addRequiredID(PHIEliminationID);
82 AU.addRequiredID(TwoAddressInstructionPassID);
83 MachineFunctionPass::getAnalysisUsage(AU);
86 void LiveIntervals::releaseMemory() {
87 // Free the live intervals themselves.
88 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
89 E = r2iMap_.end(); I != E; ++I)
97 terminatorGaps.clear();
99 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
100 VNInfoAllocator.Reset();
101 while (!ClonedMIs.empty()) {
102 MachineInstr *MI = ClonedMIs.back();
103 ClonedMIs.pop_back();
104 mf_->DeleteMachineInstr(MI);
108 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
109 /// there is one implicit_def for each use. Add isUndef marker to
110 /// implicit_def defs and their uses.
111 void LiveIntervals::processImplicitDefs() {
112 SmallSet<unsigned, 8> ImpDefRegs;
113 SmallVector<MachineInstr*, 8> ImpDefMIs;
114 MachineBasicBlock *Entry = mf_->begin();
115 SmallPtrSet<MachineBasicBlock*,16> Visited;
116 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
117 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
119 MachineBasicBlock *MBB = *DFI;
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 MachineInstr *MI = &*I;
124 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
125 unsigned Reg = MI->getOperand(0).getReg();
126 MI->getOperand(0).setIsUndef();
127 ImpDefRegs.insert(Reg);
128 ImpDefMIs.push_back(MI);
132 bool ChangedToImpDef = false;
133 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
134 MachineOperand& MO = MI->getOperand(i);
135 if (!MO.isReg() || !MO.isUse())
137 unsigned Reg = MO.getReg();
140 if (!ImpDefRegs.count(Reg))
142 // Use is a copy, just turn it into an implicit_def.
143 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
144 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
146 bool isKill = MO.isKill();
147 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
148 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
149 MI->RemoveOperand(j);
151 ImpDefRegs.erase(Reg);
152 ChangedToImpDef = true;
157 if (MO.isKill() || MI->isRegTiedToDefOperand(i))
158 ImpDefRegs.erase(Reg);
161 if (ChangedToImpDef) {
162 // Backtrack to process this new implicit_def.
165 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
166 MachineOperand& MO = MI->getOperand(i);
167 if (!MO.isReg() || !MO.isDef())
169 ImpDefRegs.erase(MO.getReg());
174 // Any outstanding liveout implicit_def's?
175 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
176 MachineInstr *MI = ImpDefMIs[i];
177 unsigned Reg = MI->getOperand(0).getReg();
178 if (TargetRegisterInfo::isPhysicalRegister(Reg))
179 // Physical registers are not liveout (yet).
181 if (!ImpDefRegs.count(Reg))
184 // If there are multiple defs of the same register and at least one
185 // is not an implicit_def, do not insert implicit_def's before the
188 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
189 DE = mri_->def_end(); DI != DE; ++DI) {
190 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
198 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
199 UE = mri_->use_end(); UI != UE; ) {
200 MachineOperand &RMO = UI.getOperand();
201 MachineInstr *RMI = &*UI;
203 MachineBasicBlock *RMBB = RMI->getParent();
206 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
207 unsigned NewVReg = mri_->createVirtualRegister(RC);
208 MachineInstrBuilder MIB =
209 BuildMI(*RMBB, RMI, RMI->getDebugLoc(),
210 tii_->get(TargetInstrInfo::IMPLICIT_DEF), NewVReg);
211 (*MIB).getOperand(0).setIsUndef();
222 void LiveIntervals::computeNumbering() {
223 Index2MiMap OldI2MI = i2miMap_;
224 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
230 terminatorGaps.clear();
234 // Number MachineInstrs and MachineBasicBlocks.
235 // Initialize MBB indexes to a sentinal.
236 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
238 unsigned MIIndex = 0;
239 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
241 unsigned StartIdx = MIIndex;
243 // Insert an empty slot at the beginning of each block.
244 MIIndex += InstrSlots::NUM;
245 i2miMap_.push_back(0);
247 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
250 if (I == MBB->getFirstTerminator()) {
251 // Leave a gap for before terminators, this is where we will point
254 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
256 "Multiple 'first' terminators encountered during numbering.");
257 inserted = inserted; // Avoid compiler warning if assertions turned off.
258 i2miMap_.push_back(0);
260 MIIndex += InstrSlots::NUM;
263 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
264 assert(inserted && "multiple MachineInstr -> index mappings");
266 i2miMap_.push_back(I);
267 MIIndex += InstrSlots::NUM;
270 // Insert max(1, numdefs) empty slots after every instruction.
271 unsigned Slots = I->getDesc().getNumDefs();
274 MIIndex += InstrSlots::NUM * Slots;
276 i2miMap_.push_back(0);
279 if (MBB->getFirstTerminator() == MBB->end()) {
280 // Leave a gap for before terminators, this is where we will point
283 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
285 "Multiple 'first' terminators encountered during numbering.");
286 inserted = inserted; // Avoid compiler warning if assertions turned off.
287 i2miMap_.push_back(0);
289 MIIndex += InstrSlots::NUM;
292 // Set the MBB2IdxMap entry for this MBB.
293 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
294 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
297 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
299 if (!OldI2MI.empty())
300 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
301 for (LiveInterval::iterator LI = OI->second->begin(),
302 LE = OI->second->end(); LI != LE; ++LI) {
304 // Remap the start index of the live range to the corresponding new
305 // number, or our best guess at what it _should_ correspond to if the
306 // original instruction has been erased. This is either the following
307 // instruction or its predecessor.
308 unsigned index = LI->start / InstrSlots::NUM;
309 unsigned offset = LI->start % InstrSlots::NUM;
310 if (offset == InstrSlots::LOAD) {
311 std::vector<IdxMBBPair>::const_iterator I =
312 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
313 // Take the pair containing the index
314 std::vector<IdxMBBPair>::const_iterator J =
315 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
317 LI->start = getMBBStartIdx(J->second);
319 LI->start = mi2iMap_[OldI2MI[index]] + offset;
322 // Remap the ending index in the same way that we remapped the start,
323 // except for the final step where we always map to the immediately
324 // following instruction.
325 index = (LI->end - 1) / InstrSlots::NUM;
326 offset = LI->end % InstrSlots::NUM;
327 if (offset == InstrSlots::LOAD) {
328 // VReg dies at end of block.
329 std::vector<IdxMBBPair>::const_iterator I =
330 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
333 LI->end = getMBBEndIdx(I->second) + 1;
335 unsigned idx = index;
336 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
338 if (index != OldI2MI.size())
339 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
341 LI->end = InstrSlots::NUM * i2miMap_.size();
345 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
346 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
349 // Remap the VNInfo def index, which works the same as the
350 // start indices above. VN's with special sentinel defs
351 // don't need to be remapped.
352 if (vni->isDefAccurate() && !vni->isUnused()) {
353 unsigned index = vni->def / InstrSlots::NUM;
354 unsigned offset = vni->def % InstrSlots::NUM;
355 if (offset == InstrSlots::LOAD) {
356 std::vector<IdxMBBPair>::const_iterator I =
357 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
358 // Take the pair containing the index
359 std::vector<IdxMBBPair>::const_iterator J =
360 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
362 vni->def = getMBBStartIdx(J->second);
364 vni->def = mi2iMap_[OldI2MI[index]] + offset;
368 // Remap the VNInfo kill indices, which works the same as
369 // the end indices above.
370 for (size_t i = 0; i < vni->kills.size(); ++i) {
371 unsigned killIdx = vni->kills[i].killIdx;
373 unsigned index = (killIdx - 1) / InstrSlots::NUM;
374 unsigned offset = killIdx % InstrSlots::NUM;
376 if (offset == InstrSlots::LOAD) {
377 assert("Value killed at a load slot.");
378 /*std::vector<IdxMBBPair>::const_iterator I =
379 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
382 vni->kills[i] = getMBBEndIdx(I->second);*/
384 if (vni->kills[i].isPHIKill) {
385 std::vector<IdxMBBPair>::const_iterator I =
386 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
388 vni->kills[i].killIdx = terminatorGaps[I->second];
390 assert(OldI2MI[index] != 0 &&
391 "Kill refers to instruction not present in index maps.");
392 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
396 unsigned idx = index;
397 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
399 if (index != OldI2MI.size())
400 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
401 (idx == index ? offset : 0);
403 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
411 void LiveIntervals::scaleNumbering(int factor) {
413 // * scale MBB begin and end points
414 // * scale all ranges.
415 // * Update VNI structures.
416 // * Scale instruction numberings
418 // Scale the MBB indices.
420 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
421 MBB != MBBE; ++MBB) {
422 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
423 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
424 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
425 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
427 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
429 // Scale terminator gaps.
430 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
431 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
433 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
436 // Scale the intervals.
437 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
438 LI->second->scaleNumbering(factor);
441 // Scale MachineInstrs.
442 Mi2IndexMap oldmi2iMap = mi2iMap_;
443 unsigned highestSlot = 0;
444 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
446 unsigned newSlot = InstrSlots::scale(MI->second, factor);
447 mi2iMap_[MI->first] = newSlot;
448 highestSlot = std::max(highestSlot, newSlot);
452 i2miMap_.resize(highestSlot + 1);
453 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
455 i2miMap_[MI->second] = MI->first;
461 /// runOnMachineFunction - Register allocate the whole function
463 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
465 mri_ = &mf_->getRegInfo();
466 tm_ = &fn.getTarget();
467 tri_ = tm_->getRegisterInfo();
468 tii_ = tm_->getInstrInfo();
469 aa_ = &getAnalysis<AliasAnalysis>();
470 lv_ = &getAnalysis<LiveVariables>();
471 allocatableRegs_ = tri_->getAllocatableSet(fn);
473 processImplicitDefs();
477 numIntervals += getNumIntervals();
483 /// print - Implement the dump method.
484 void LiveIntervals::print(std::ostream &O, const Module* ) const {
485 O << "********** INTERVALS **********\n";
486 for (const_iterator I = begin(), E = end(); I != E; ++I) {
487 I->second->print(O, tri_);
491 O << "********** MACHINEINSTRS **********\n";
492 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
493 mbbi != mbbe; ++mbbi) {
494 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
495 for (MachineBasicBlock::iterator mii = mbbi->begin(),
496 mie = mbbi->end(); mii != mie; ++mii) {
497 O << getInstructionIndex(mii) << '\t' << *mii;
502 /// conflictsWithPhysRegDef - Returns true if the specified register
503 /// is defined during the duration of the specified interval.
504 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
505 VirtRegMap &vrm, unsigned reg) {
506 for (LiveInterval::Ranges::const_iterator
507 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
508 for (unsigned index = getBaseIndex(I->start),
509 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
510 index += InstrSlots::NUM) {
511 // skip deleted instructions
512 while (index != end && !getInstructionFromIndex(index))
513 index += InstrSlots::NUM;
514 if (index == end) break;
516 MachineInstr *MI = getInstructionFromIndex(index);
517 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
518 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
519 if (SrcReg == li.reg || DstReg == li.reg)
521 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
522 MachineOperand& mop = MI->getOperand(i);
525 unsigned PhysReg = mop.getReg();
526 if (PhysReg == 0 || PhysReg == li.reg)
528 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
529 if (!vrm.hasPhys(PhysReg))
531 PhysReg = vrm.getPhys(PhysReg);
533 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
542 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
543 /// it can check use as well.
544 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
545 unsigned Reg, bool CheckUse,
546 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
547 for (LiveInterval::Ranges::const_iterator
548 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
549 for (unsigned index = getBaseIndex(I->start),
550 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
551 index += InstrSlots::NUM) {
552 // Skip deleted instructions.
553 MachineInstr *MI = 0;
554 while (index != end) {
555 MI = getInstructionFromIndex(index);
558 index += InstrSlots::NUM;
560 if (index == end) break;
562 if (JoinedCopies.count(MI))
564 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
565 MachineOperand& MO = MI->getOperand(i);
568 if (MO.isUse() && !CheckUse)
570 unsigned PhysReg = MO.getReg();
571 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
573 if (tri_->isSubRegister(Reg, PhysReg))
583 void LiveIntervals::printRegName(unsigned reg) const {
584 if (TargetRegisterInfo::isPhysicalRegister(reg))
585 cerr << tri_->getName(reg);
587 cerr << "%reg" << reg;
590 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
591 MachineBasicBlock::iterator mi,
592 unsigned MIIdx, MachineOperand& MO,
594 LiveInterval &interval) {
595 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
596 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
598 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
599 DOUT << "is a implicit_def\n";
603 // Virtual registers may be defined multiple times (due to phi
604 // elimination and 2-addr elimination). Much of what we do only has to be
605 // done once for the vreg. We use an empty interval to detect the first
606 // time we see a vreg.
607 if (interval.empty()) {
608 // Get the Idx of the defining instructions.
609 unsigned defIndex = getDefIndex(MIIdx);
610 // Earlyclobbers move back one.
611 if (MO.isEarlyClobber())
612 defIndex = getUseIndex(MIIdx);
614 MachineInstr *CopyMI = NULL;
615 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
616 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
617 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
618 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
619 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
621 // Earlyclobbers move back one.
622 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
624 assert(ValNo->id == 0 && "First value in interval is not 0?");
626 // Loop over all of the blocks that the vreg is defined in. There are
627 // two cases we have to handle here. The most common case is a vreg
628 // whose lifetime is contained within a basic block. In this case there
629 // will be a single kill, in MBB, which comes after the definition.
630 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
631 // FIXME: what about dead vars?
633 if (vi.Kills[0] != mi)
634 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
636 killIdx = defIndex+1;
638 // If the kill happens after the definition, we have an intra-block
640 if (killIdx > defIndex) {
641 assert(vi.AliveBlocks.empty() &&
642 "Shouldn't be alive across any blocks!");
643 LiveRange LR(defIndex, killIdx, ValNo);
644 interval.addRange(LR);
645 DOUT << " +" << LR << "\n";
646 interval.addKill(ValNo, killIdx, false);
651 // The other case we handle is when a virtual register lives to the end
652 // of the defining block, potentially live across some blocks, then is
653 // live into some number of blocks, but gets killed. Start by adding a
654 // range that goes from this definition to the end of the defining block.
655 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
656 DOUT << " +" << NewLR;
657 interval.addRange(NewLR);
659 // Iterate over all of the blocks that the variable is completely
660 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
662 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
663 E = vi.AliveBlocks.end(); I != E; ++I) {
664 LiveRange LR(getMBBStartIdx(*I),
665 getMBBEndIdx(*I)+1, // MBB ends at -1.
667 interval.addRange(LR);
671 // Finally, this virtual register is live from the start of any killing
672 // block to the 'use' slot of the killing instruction.
673 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
674 MachineInstr *Kill = vi.Kills[i];
675 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
676 LiveRange LR(getMBBStartIdx(Kill->getParent()),
678 interval.addRange(LR);
679 interval.addKill(ValNo, killIdx, false);
684 // If this is the second time we see a virtual register definition, it
685 // must be due to phi elimination or two addr elimination. If this is
686 // the result of two address elimination, then the vreg is one of the
687 // def-and-use register operand.
688 if (mi->isRegTiedToUseOperand(MOIdx)) {
689 // If this is a two-address definition, then we have already processed
690 // the live range. The only problem is that we didn't realize there
691 // are actually two values in the live interval. Because of this we
692 // need to take the LiveRegion that defines this register and split it
694 assert(interval.containsOneValue());
695 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
696 unsigned RedefIndex = getDefIndex(MIIdx);
697 if (MO.isEarlyClobber())
698 RedefIndex = getUseIndex(MIIdx);
700 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
701 VNInfo *OldValNo = OldLR->valno;
703 // Delete the initial value, which should be short and continuous,
704 // because the 2-addr copy must be in the same MBB as the redef.
705 interval.removeRange(DefIndex, RedefIndex);
707 // Two-address vregs should always only be redefined once. This means
708 // that at this point, there should be exactly one value number in it.
709 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
711 // The new value number (#1) is defined by the instruction we claimed
713 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
714 false, // update at *
716 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
718 // Value#0 is now defined by the 2-addr instruction.
719 OldValNo->def = RedefIndex;
721 if (MO.isEarlyClobber())
722 OldValNo->setHasRedefByEC(true);
724 // Add the new live interval which replaces the range for the input copy.
725 LiveRange LR(DefIndex, RedefIndex, ValNo);
726 DOUT << " replace range with " << LR;
727 interval.addRange(LR);
728 interval.addKill(ValNo, RedefIndex, false);
730 // If this redefinition is dead, we need to add a dummy unit live
731 // range covering the def slot.
733 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
736 interval.print(DOUT, tri_);
739 // Otherwise, this must be because of phi elimination. If this is the
740 // first redefinition of the vreg that we have seen, go back and change
741 // the live range in the PHI block to be a different value number.
742 if (interval.containsOneValue()) {
743 assert(vi.Kills.size() == 1 &&
744 "PHI elimination vreg should have one kill, the PHI itself!");
746 // Remove the old range that we now know has an incorrect number.
747 VNInfo *VNI = interval.getValNumInfo(0);
748 MachineInstr *Killer = vi.Kills[0];
749 unsigned Start = getMBBStartIdx(Killer->getParent());
750 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
751 DOUT << " Removing [" << Start << "," << End << "] from: ";
752 interval.print(DOUT, tri_); DOUT << "\n";
753 interval.removeRange(Start, End);
754 assert(interval.ranges.size() == 1 &&
755 "newly discovered PHI interval has >1 ranges.");
756 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
757 interval.addKill(VNI, terminatorGaps[killMBB], true);
758 VNI->setHasPHIKill(true);
759 DOUT << " RESULT: "; interval.print(DOUT, tri_);
761 // Replace the interval with one of a NEW value number. Note that this
762 // value number isn't actually defined by an instruction, weird huh? :)
763 LiveRange LR(Start, End,
764 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
765 LR.valno->setIsPHIDef(true);
766 DOUT << " replace range with " << LR;
767 interval.addRange(LR);
768 interval.addKill(LR.valno, End, false);
769 DOUT << " RESULT: "; interval.print(DOUT, tri_);
772 // In the case of PHI elimination, each variable definition is only
773 // live until the end of the block. We've already taken care of the
774 // rest of the live range.
775 unsigned defIndex = getDefIndex(MIIdx);
776 if (MO.isEarlyClobber())
777 defIndex = getUseIndex(MIIdx);
780 MachineInstr *CopyMI = NULL;
781 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
782 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
783 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
784 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
785 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
787 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
789 unsigned killIndex = getMBBEndIdx(mbb) + 1;
790 LiveRange LR(defIndex, killIndex, ValNo);
791 interval.addRange(LR);
792 interval.addKill(ValNo, terminatorGaps[mbb], true);
793 ValNo->setHasPHIKill(true);
801 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
802 MachineBasicBlock::iterator mi,
805 LiveInterval &interval,
806 MachineInstr *CopyMI) {
807 // A physical register cannot be live across basic block, so its
808 // lifetime must end somewhere in its defining basic block.
809 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
811 unsigned baseIndex = MIIdx;
812 unsigned start = getDefIndex(baseIndex);
813 // Earlyclobbers move back one.
814 if (MO.isEarlyClobber())
815 start = getUseIndex(MIIdx);
816 unsigned end = start;
818 // If it is not used after definition, it is considered dead at
819 // the instruction defining it. Hence its interval is:
820 // [defSlot(def), defSlot(def)+1)
827 // If it is not dead on definition, it must be killed by a
828 // subsequent instruction. Hence its interval is:
829 // [defSlot(def), useSlot(kill)+1)
830 baseIndex += InstrSlots::NUM;
831 while (++mi != MBB->end()) {
832 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
833 getInstructionFromIndex(baseIndex) == 0)
834 baseIndex += InstrSlots::NUM;
835 if (mi->killsRegister(interval.reg, tri_)) {
837 end = getUseIndex(baseIndex) + 1;
840 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
842 if (mi->isRegTiedToUseOperand(DefIdx)) {
843 // Two-address instruction.
844 end = getDefIndex(baseIndex);
845 if (mi->getOperand(DefIdx).isEarlyClobber())
846 end = getUseIndex(baseIndex);
848 // Another instruction redefines the register before it is ever read.
849 // Then the register is essentially dead at the instruction that defines
850 // it. Hence its interval is:
851 // [defSlot(def), defSlot(def)+1)
859 baseIndex += InstrSlots::NUM;
862 // The only case we should have a dead physreg here without a killing or
863 // instruction where we know it's dead is if it is live-in to the function
864 // and never used. Another possible case is the implicit use of the
865 // physical register has been deleted by two-address pass.
869 assert(start < end && "did not find end of interval?");
871 // Already exists? Extend old live interval.
872 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
873 bool Extend = OldLR != interval.end();
874 VNInfo *ValNo = Extend
875 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
876 if (MO.isEarlyClobber() && Extend)
877 ValNo->setHasRedefByEC(true);
878 LiveRange LR(start, end, ValNo);
879 interval.addRange(LR);
880 interval.addKill(LR.valno, end, false);
881 DOUT << " +" << LR << '\n';
884 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
885 MachineBasicBlock::iterator MI,
889 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
890 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
891 getOrCreateInterval(MO.getReg()));
892 else if (allocatableRegs_[MO.getReg()]) {
893 MachineInstr *CopyMI = NULL;
894 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
895 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
896 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
897 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
898 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
900 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
901 getOrCreateInterval(MO.getReg()), CopyMI);
902 // Def of a register also defines its sub-registers.
903 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
904 // If MI also modifies the sub-register explicitly, avoid processing it
905 // more than once. Do not pass in TRI here so it checks for exact match.
906 if (!MI->modifiesRegister(*AS))
907 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
908 getOrCreateInterval(*AS), 0);
912 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
914 LiveInterval &interval, bool isAlias) {
915 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
917 // Look for kills, if it reaches a def before it's killed, then it shouldn't
918 // be considered a livein.
919 MachineBasicBlock::iterator mi = MBB->begin();
920 unsigned baseIndex = MIIdx;
921 unsigned start = baseIndex;
922 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
923 getInstructionFromIndex(baseIndex) == 0)
924 baseIndex += InstrSlots::NUM;
925 unsigned end = baseIndex;
926 bool SeenDefUse = false;
928 while (mi != MBB->end()) {
929 if (mi->killsRegister(interval.reg, tri_)) {
931 end = getUseIndex(baseIndex) + 1;
934 } else if (mi->modifiesRegister(interval.reg, tri_)) {
935 // Another instruction redefines the register before it is ever read.
936 // Then the register is essentially dead at the instruction that defines
937 // it. Hence its interval is:
938 // [defSlot(def), defSlot(def)+1)
940 end = getDefIndex(start) + 1;
945 baseIndex += InstrSlots::NUM;
947 if (mi != MBB->end()) {
948 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
949 getInstructionFromIndex(baseIndex) == 0)
950 baseIndex += InstrSlots::NUM;
954 // Live-in register might not be used at all.
958 end = getDefIndex(MIIdx) + 1;
960 DOUT << " live through";
966 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
967 vni->setIsPHIDef(true);
968 LiveRange LR(start, end, vni);
970 interval.addRange(LR);
971 interval.addKill(LR.valno, end, false);
972 DOUT << " +" << LR << '\n';
975 /// computeIntervals - computes the live intervals for virtual
976 /// registers. for some ordering of the machine instructions [1,N] a
977 /// live interval is an interval [i, j) where 1 <= i <= j < N for
978 /// which a variable is live
979 void LiveIntervals::computeIntervals() {
981 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
982 << "********** Function: "
983 << ((Value*)mf_->getFunction())->getName() << '\n';
985 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
987 MachineBasicBlock *MBB = MBBI;
988 // Track the index of the current machine instr.
989 unsigned MIIndex = getMBBStartIdx(MBB);
990 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
992 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
994 // Create intervals for live-ins to this BB first.
995 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
996 LE = MBB->livein_end(); LI != LE; ++LI) {
997 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
998 // Multiple live-ins can alias the same register.
999 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1000 if (!hasInterval(*AS))
1001 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1005 // Skip over empty initial indices.
1006 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1007 getInstructionFromIndex(MIIndex) == 0)
1008 MIIndex += InstrSlots::NUM;
1010 for (; MI != miEnd; ++MI) {
1011 DOUT << MIIndex << "\t" << *MI;
1014 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1015 MachineOperand &MO = MI->getOperand(i);
1016 // handle register defs - build intervals
1017 if (MO.isReg() && MO.getReg() && MO.isDef()) {
1018 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1022 // Skip over the empty slots after each instruction.
1023 unsigned Slots = MI->getDesc().getNumDefs();
1026 MIIndex += InstrSlots::NUM * Slots;
1028 // Skip over empty indices.
1029 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1030 getInstructionFromIndex(MIIndex) == 0)
1031 MIIndex += InstrSlots::NUM;
1036 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1037 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1038 std::vector<IdxMBBPair>::const_iterator I =
1039 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1041 bool ResVal = false;
1042 while (I != Idx2MBBMap.end()) {
1043 if (I->first >= End)
1045 MBBs.push_back(I->second);
1052 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1053 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1054 std::vector<IdxMBBPair>::const_iterator I =
1055 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1057 bool ResVal = false;
1058 while (I != Idx2MBBMap.end()) {
1061 MachineBasicBlock *MBB = I->second;
1062 if (getMBBEndIdx(MBB) > End)
1064 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1065 SE = MBB->succ_end(); SI != SE; ++SI)
1066 MBBs.push_back(*SI);
1073 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1074 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1075 return new LiveInterval(reg, Weight);
1078 /// dupInterval - Duplicate a live interval. The caller is responsible for
1079 /// managing the allocated memory.
1080 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1081 LiveInterval *NewLI = createInterval(li->reg);
1082 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1086 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1087 /// copy field and returns the source register that defines it.
1088 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1092 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1093 // If it's extracting out of a physical register, return the sub-register.
1094 unsigned Reg = VNI->copy->getOperand(1).getReg();
1095 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1096 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1098 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1099 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1100 return VNI->copy->getOperand(2).getReg();
1102 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1103 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1105 llvm_unreachable("Unrecognized copy instruction!");
1109 //===----------------------------------------------------------------------===//
1110 // Register allocator hooks.
1113 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1114 /// allow one) virtual register operand, then its uses are implicitly using
1115 /// the register. Returns the virtual register.
1116 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1117 MachineInstr *MI) const {
1119 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1120 MachineOperand &MO = MI->getOperand(i);
1121 if (!MO.isReg() || !MO.isUse())
1123 unsigned Reg = MO.getReg();
1124 if (Reg == 0 || Reg == li.reg)
1127 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1128 !allocatableRegs_[Reg])
1130 // FIXME: For now, only remat MI with at most one register operand.
1132 "Can't rematerialize instruction with multiple register operand!");
1133 RegOp = MO.getReg();
1141 /// isValNoAvailableAt - Return true if the val# of the specified interval
1142 /// which reaches the given instruction also reaches the specified use index.
1143 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1144 unsigned UseIdx) const {
1145 unsigned Index = getInstructionIndex(MI);
1146 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1147 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1148 return UI != li.end() && UI->valno == ValNo;
1151 /// isReMaterializable - Returns true if the definition MI of the specified
1152 /// val# of the specified interval is re-materializable.
1153 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1154 const VNInfo *ValNo, MachineInstr *MI,
1155 SmallVectorImpl<LiveInterval*> &SpillIs,
1160 // FIXME: For now, avoid remating instructions whose definition has a subreg
1161 // index. It's just incredibly difficult to get right.
1162 if (MI->findRegisterDefOperand(li.reg)->getSubReg())
1165 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1169 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1170 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1171 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1172 // this but remember this is not safe to fold into a two-address
1174 // This is a load from fixed stack slot. It can be rematerialized.
1177 // If the target-specific rules don't identify an instruction as
1178 // being trivially rematerializable, use some target-independent
1180 if (!MI->getDesc().isRematerializable() ||
1181 !tii_->isTriviallyReMaterializable(MI)) {
1182 if (!EnableAggressiveRemat)
1185 // If the instruction accesses memory but the memoperands have been lost,
1186 // we can't analyze it.
1187 const TargetInstrDesc &TID = MI->getDesc();
1188 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1191 // Avoid instructions obviously unsafe for remat.
1192 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1195 // If the instruction accesses memory and the memory could be non-constant,
1196 // assume the instruction is not rematerializable.
1197 for (std::list<MachineMemOperand>::const_iterator
1198 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1199 const MachineMemOperand &MMO = *I;
1200 if (MMO.isVolatile() || MMO.isStore())
1202 const Value *V = MMO.getValue();
1205 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1206 if (!PSV->isConstant(mf_->getFrameInfo()))
1208 } else if (!aa_->pointsToConstantMemory(V))
1212 // If any of the registers accessed are non-constant, conservatively assume
1213 // the instruction is not rematerializable.
1214 unsigned ImpUse = 0;
1215 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1216 const MachineOperand &MO = MI->getOperand(i);
1218 unsigned Reg = MO.getReg();
1221 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1224 // Only allow one def, and that in the first operand.
1225 if (MO.isDef() != (i == 0))
1228 // Only allow constant-valued registers.
1229 bool IsLiveIn = mri_->isLiveIn(Reg);
1230 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1231 E = mri_->def_end();
1233 // For the def, it should be the only def of that register.
1234 if (MO.isDef() && (next(I) != E || IsLiveIn))
1238 // Only allow one use other register use, as that's all the
1239 // remat mechanisms support currently.
1240 if (Reg != li.reg) {
1243 else if (Reg != ImpUse)
1246 // For the use, there should be only one associated def.
1247 if (I != E && (next(I) != E || IsLiveIn))
1254 unsigned ImpUse = getReMatImplicitUse(li, MI);
1256 const LiveInterval &ImpLi = getInterval(ImpUse);
1257 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1258 re = mri_->use_end(); ri != re; ++ri) {
1259 MachineInstr *UseMI = &*ri;
1260 unsigned UseIdx = getInstructionIndex(UseMI);
1261 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1263 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1267 // If a register operand of the re-materialized instruction is going to
1268 // be spilled next, then it's not legal to re-materialize this instruction.
1269 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1270 if (ImpUse == SpillIs[i]->reg)
1276 /// isReMaterializable - Returns true if the definition MI of the specified
1277 /// val# of the specified interval is re-materializable.
1278 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1279 const VNInfo *ValNo, MachineInstr *MI) {
1280 SmallVector<LiveInterval*, 4> Dummy1;
1282 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1285 /// isReMaterializable - Returns true if every definition of MI of every
1286 /// val# of the specified interval is re-materializable.
1287 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1288 SmallVectorImpl<LiveInterval*> &SpillIs,
1291 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1293 const VNInfo *VNI = *i;
1294 if (VNI->isUnused())
1295 continue; // Dead val#.
1296 // Is the def for the val# rematerializable?
1297 if (!VNI->isDefAccurate())
1299 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1300 bool DefIsLoad = false;
1302 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1304 isLoad |= DefIsLoad;
1309 /// FilterFoldedOps - Filter out two-address use operands. Return
1310 /// true if it finds any issue with the operands that ought to prevent
1312 static bool FilterFoldedOps(MachineInstr *MI,
1313 SmallVector<unsigned, 2> &Ops,
1315 SmallVector<unsigned, 2> &FoldOps) {
1317 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1318 unsigned OpIdx = Ops[i];
1319 MachineOperand &MO = MI->getOperand(OpIdx);
1320 // FIXME: fold subreg use.
1324 MRInfo |= (unsigned)VirtRegMap::isMod;
1326 // Filter out two-address use operand(s).
1327 if (MI->isRegTiedToDefOperand(OpIdx)) {
1328 MRInfo = VirtRegMap::isModRef;
1331 MRInfo |= (unsigned)VirtRegMap::isRef;
1333 FoldOps.push_back(OpIdx);
1339 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1340 /// slot / to reg or any rematerialized load into ith operand of specified
1341 /// MI. If it is successul, MI is updated with the newly created MI and
1343 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1344 VirtRegMap &vrm, MachineInstr *DefMI,
1346 SmallVector<unsigned, 2> &Ops,
1347 bool isSS, int Slot, unsigned Reg) {
1348 // If it is an implicit def instruction, just delete it.
1349 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1350 RemoveMachineInstrFromMaps(MI);
1351 vrm.RemoveMachineInstrFromMaps(MI);
1352 MI->eraseFromParent();
1357 // Filter the list of operand indexes that are to be folded. Abort if
1358 // any operand will prevent folding.
1359 unsigned MRInfo = 0;
1360 SmallVector<unsigned, 2> FoldOps;
1361 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1364 // The only time it's safe to fold into a two address instruction is when
1365 // it's folding reload and spill from / into a spill stack slot.
1366 if (DefMI && (MRInfo & VirtRegMap::isMod))
1369 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1370 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1372 // Remember this instruction uses the spill slot.
1373 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1375 // Attempt to fold the memory reference into the instruction. If
1376 // we can do this, we don't need to insert spill code.
1377 MachineBasicBlock &MBB = *MI->getParent();
1378 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1379 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1380 vrm.transferSpillPts(MI, fmi);
1381 vrm.transferRestorePts(MI, fmi);
1382 vrm.transferEmergencySpills(MI, fmi);
1384 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1385 mi2iMap_[fmi] = InstrIdx;
1386 MI = MBB.insert(MBB.erase(MI), fmi);
1393 /// canFoldMemoryOperand - Returns true if the specified load / store
1394 /// folding is possible.
1395 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1396 SmallVector<unsigned, 2> &Ops,
1398 // Filter the list of operand indexes that are to be folded. Abort if
1399 // any operand will prevent folding.
1400 unsigned MRInfo = 0;
1401 SmallVector<unsigned, 2> FoldOps;
1402 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1405 // It's only legal to remat for a use, not a def.
1406 if (ReMat && (MRInfo & VirtRegMap::isMod))
1409 return tii_->canFoldMemoryOperand(MI, FoldOps);
1412 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1413 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1414 for (LiveInterval::Ranges::const_iterator
1415 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1416 std::vector<IdxMBBPair>::const_iterator II =
1417 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1418 if (II == Idx2MBBMap.end())
1420 if (I->end > II->first) // crossing a MBB.
1422 MBBs.insert(II->second);
1423 if (MBBs.size() > 1)
1429 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1430 /// interval on to-be re-materialized operands of MI) with new register.
1431 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1432 MachineInstr *MI, unsigned NewVReg,
1434 // There is an implicit use. That means one of the other operand is
1435 // being remat'ed and the remat'ed instruction has li.reg as an
1436 // use operand. Make sure we rewrite that as well.
1437 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1438 MachineOperand &MO = MI->getOperand(i);
1441 unsigned Reg = MO.getReg();
1442 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1444 if (!vrm.isReMaterialized(Reg))
1446 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1447 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1449 UseMO->setReg(NewVReg);
1453 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1454 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1455 bool LiveIntervals::
1456 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1457 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1458 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1459 unsigned Slot, int LdSlot,
1460 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1462 const TargetRegisterClass* rc,
1463 SmallVector<int, 4> &ReMatIds,
1464 const MachineLoopInfo *loopInfo,
1465 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1466 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1467 std::vector<LiveInterval*> &NewLIs) {
1468 bool CanFold = false;
1470 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1471 MachineOperand& mop = MI->getOperand(i);
1474 unsigned Reg = mop.getReg();
1475 unsigned RegI = Reg;
1476 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1481 bool TryFold = !DefIsReMat;
1482 bool FoldSS = true; // Default behavior unless it's a remat.
1483 int FoldSlot = Slot;
1485 // If this is the rematerializable definition MI itself and
1486 // all of its uses are rematerialized, simply delete it.
1487 if (MI == ReMatOrigDefMI && CanDelete) {
1488 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1490 RemoveMachineInstrFromMaps(MI);
1491 vrm.RemoveMachineInstrFromMaps(MI);
1492 MI->eraseFromParent();
1496 // If def for this use can't be rematerialized, then try folding.
1497 // If def is rematerializable and it's a load, also try folding.
1498 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1500 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1506 // Scan all of the operands of this instruction rewriting operands
1507 // to use NewVReg instead of li.reg as appropriate. We do this for
1510 // 1. If the instr reads the same spilled vreg multiple times, we
1511 // want to reuse the NewVReg.
1512 // 2. If the instr is a two-addr instruction, we are required to
1513 // keep the src/dst regs pinned.
1515 // Keep track of whether we replace a use and/or def so that we can
1516 // create the spill interval with the appropriate range.
1518 HasUse = mop.isUse();
1519 HasDef = mop.isDef();
1520 SmallVector<unsigned, 2> Ops;
1522 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1523 const MachineOperand &MOj = MI->getOperand(j);
1526 unsigned RegJ = MOj.getReg();
1527 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1531 HasUse |= MOj.isUse();
1532 HasDef |= MOj.isDef();
1536 if (HasUse && !li.liveAt(getUseIndex(index)))
1537 // Must be defined by an implicit def. It should not be spilled. Note,
1538 // this is for correctness reason. e.g.
1539 // 8 %reg1024<def> = IMPLICIT_DEF
1540 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1541 // The live range [12, 14) are not part of the r1024 live interval since
1542 // it's defined by an implicit def. It will not conflicts with live
1543 // interval of r1025. Now suppose both registers are spilled, you can
1544 // easily see a situation where both registers are reloaded before
1545 // the INSERT_SUBREG and both target registers that would overlap.
1548 // Create a new virtual register for the spill interval.
1549 // Create the new register now so we can map the fold instruction
1550 // to the new register so when it is unfolded we get the correct
1552 bool CreatedNewVReg = false;
1554 NewVReg = mri_->createVirtualRegister(rc);
1556 CreatedNewVReg = true;
1562 // Do not fold load / store here if we are splitting. We'll find an
1563 // optimal point to insert a load / store later.
1565 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1566 Ops, FoldSS, FoldSlot, NewVReg)) {
1567 // Folding the load/store can completely change the instruction in
1568 // unpredictable ways, rescan it from the beginning.
1571 // We need to give the new vreg the same stack slot as the
1572 // spilled interval.
1573 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1579 if (isNotInMIMap(MI))
1581 goto RestartInstruction;
1584 // We'll try to fold it later if it's profitable.
1585 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1589 mop.setReg(NewVReg);
1590 if (mop.isImplicit())
1591 rewriteImplicitOps(li, MI, NewVReg, vrm);
1593 // Reuse NewVReg for other reads.
1594 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1595 MachineOperand &mopj = MI->getOperand(Ops[j]);
1596 mopj.setReg(NewVReg);
1597 if (mopj.isImplicit())
1598 rewriteImplicitOps(li, MI, NewVReg, vrm);
1601 if (CreatedNewVReg) {
1603 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1604 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1605 // Each valnum may have its own remat id.
1606 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1608 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1610 if (!CanDelete || (HasUse && HasDef)) {
1611 // If this is a two-addr instruction then its use operands are
1612 // rematerializable but its def is not. It should be assigned a
1614 vrm.assignVirt2StackSlot(NewVReg, Slot);
1617 vrm.assignVirt2StackSlot(NewVReg, Slot);
1619 } else if (HasUse && HasDef &&
1620 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1621 // If this interval hasn't been assigned a stack slot (because earlier
1622 // def is a deleted remat def), do it now.
1623 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1624 vrm.assignVirt2StackSlot(NewVReg, Slot);
1627 // Re-matting an instruction with virtual register use. Add the
1628 // register as an implicit use on the use MI.
1629 if (DefIsReMat && ImpUse)
1630 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1632 // Create a new register interval for this spill / remat.
1633 LiveInterval &nI = getOrCreateInterval(NewVReg);
1634 if (CreatedNewVReg) {
1635 NewLIs.push_back(&nI);
1636 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1638 vrm.setIsSplitFromReg(NewVReg, li.reg);
1642 if (CreatedNewVReg) {
1643 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1644 nI.getNextValue(0, 0, false, VNInfoAllocator));
1648 // Extend the split live interval to this def / use.
1649 unsigned End = getUseIndex(index)+1;
1650 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1651 nI.getValNumInfo(nI.getNumValNums()-1));
1657 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1658 nI.getNextValue(0, 0, false, VNInfoAllocator));
1663 DOUT << "\t\t\t\tAdded new interval: ";
1664 nI.print(DOUT, tri_);
1669 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1671 MachineBasicBlock *MBB, unsigned Idx) const {
1672 unsigned End = getMBBEndIdx(MBB);
1673 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1674 if (VNI->kills[j].isPHIKill)
1677 unsigned KillIdx = VNI->kills[j].killIdx;
1678 if (KillIdx > Idx && KillIdx < End)
1684 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1685 /// during spilling.
1687 struct RewriteInfo {
1692 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1693 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1696 struct RewriteInfoCompare {
1697 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1698 return LHS.Index < RHS.Index;
1703 void LiveIntervals::
1704 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1705 LiveInterval::Ranges::const_iterator &I,
1706 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1707 unsigned Slot, int LdSlot,
1708 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1710 const TargetRegisterClass* rc,
1711 SmallVector<int, 4> &ReMatIds,
1712 const MachineLoopInfo *loopInfo,
1713 BitVector &SpillMBBs,
1714 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1715 BitVector &RestoreMBBs,
1716 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1717 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1718 std::vector<LiveInterval*> &NewLIs) {
1719 bool AllCanFold = true;
1720 unsigned NewVReg = 0;
1721 unsigned start = getBaseIndex(I->start);
1722 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1724 // First collect all the def / use in this live range that will be rewritten.
1725 // Make sure they are sorted according to instruction index.
1726 std::vector<RewriteInfo> RewriteMIs;
1727 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1728 re = mri_->reg_end(); ri != re; ) {
1729 MachineInstr *MI = &*ri;
1730 MachineOperand &O = ri.getOperand();
1732 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1733 unsigned index = getInstructionIndex(MI);
1734 if (index < start || index >= end)
1736 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1737 // Must be defined by an implicit def. It should not be spilled. Note,
1738 // this is for correctness reason. e.g.
1739 // 8 %reg1024<def> = IMPLICIT_DEF
1740 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1741 // The live range [12, 14) are not part of the r1024 live interval since
1742 // it's defined by an implicit def. It will not conflicts with live
1743 // interval of r1025. Now suppose both registers are spilled, you can
1744 // easily see a situation where both registers are reloaded before
1745 // the INSERT_SUBREG and both target registers that would overlap.
1747 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1749 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1751 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1752 // Now rewrite the defs and uses.
1753 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1754 RewriteInfo &rwi = RewriteMIs[i];
1756 unsigned index = rwi.Index;
1757 bool MIHasUse = rwi.HasUse;
1758 bool MIHasDef = rwi.HasDef;
1759 MachineInstr *MI = rwi.MI;
1760 // If MI def and/or use the same register multiple times, then there
1761 // are multiple entries.
1762 unsigned NumUses = MIHasUse;
1763 while (i != e && RewriteMIs[i].MI == MI) {
1764 assert(RewriteMIs[i].Index == index);
1765 bool isUse = RewriteMIs[i].HasUse;
1766 if (isUse) ++NumUses;
1768 MIHasDef |= RewriteMIs[i].HasDef;
1771 MachineBasicBlock *MBB = MI->getParent();
1773 if (ImpUse && MI != ReMatDefMI) {
1774 // Re-matting an instruction with virtual register use. Update the
1775 // register interval's spill weight to HUGE_VALF to prevent it from
1777 LiveInterval &ImpLi = getInterval(ImpUse);
1778 ImpLi.weight = HUGE_VALF;
1781 unsigned MBBId = MBB->getNumber();
1782 unsigned ThisVReg = 0;
1784 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1785 if (NVI != MBBVRegsMap.end()) {
1786 ThisVReg = NVI->second;
1793 // It's better to start a new interval to avoid artifically
1794 // extend the new interval.
1795 if (MIHasDef && !MIHasUse) {
1796 MBBVRegsMap.erase(MBB->getNumber());
1802 bool IsNew = ThisVReg == 0;
1804 // This ends the previous live interval. If all of its def / use
1805 // can be folded, give it a low spill weight.
1806 if (NewVReg && TrySplit && AllCanFold) {
1807 LiveInterval &nI = getOrCreateInterval(NewVReg);
1814 bool HasDef = false;
1815 bool HasUse = false;
1816 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1817 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1818 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1819 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1820 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1821 if (!HasDef && !HasUse)
1824 AllCanFold &= CanFold;
1826 // Update weight of spill interval.
1827 LiveInterval &nI = getOrCreateInterval(NewVReg);
1829 // The spill weight is now infinity as it cannot be spilled again.
1830 nI.weight = HUGE_VALF;
1834 // Keep track of the last def and first use in each MBB.
1836 if (MI != ReMatOrigDefMI || !CanDelete) {
1837 bool HasKill = false;
1839 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1841 // If this is a two-address code, then this index starts a new VNInfo.
1842 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1844 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1846 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1847 SpillIdxes.find(MBBId);
1849 if (SII == SpillIdxes.end()) {
1850 std::vector<SRInfo> S;
1851 S.push_back(SRInfo(index, NewVReg, true));
1852 SpillIdxes.insert(std::make_pair(MBBId, S));
1853 } else if (SII->second.back().vreg != NewVReg) {
1854 SII->second.push_back(SRInfo(index, NewVReg, true));
1855 } else if ((int)index > SII->second.back().index) {
1856 // If there is an earlier def and this is a two-address
1857 // instruction, then it's not possible to fold the store (which
1858 // would also fold the load).
1859 SRInfo &Info = SII->second.back();
1861 Info.canFold = !HasUse;
1863 SpillMBBs.set(MBBId);
1864 } else if (SII != SpillIdxes.end() &&
1865 SII->second.back().vreg == NewVReg &&
1866 (int)index > SII->second.back().index) {
1867 // There is an earlier def that's not killed (must be two-address).
1868 // The spill is no longer needed.
1869 SII->second.pop_back();
1870 if (SII->second.empty()) {
1871 SpillIdxes.erase(MBBId);
1872 SpillMBBs.reset(MBBId);
1879 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1880 SpillIdxes.find(MBBId);
1881 if (SII != SpillIdxes.end() &&
1882 SII->second.back().vreg == NewVReg &&
1883 (int)index > SII->second.back().index)
1884 // Use(s) following the last def, it's not safe to fold the spill.
1885 SII->second.back().canFold = false;
1886 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1887 RestoreIdxes.find(MBBId);
1888 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1889 // If we are splitting live intervals, only fold if it's the first
1890 // use and there isn't another use later in the MBB.
1891 RII->second.back().canFold = false;
1893 // Only need a reload if there isn't an earlier def / use.
1894 if (RII == RestoreIdxes.end()) {
1895 std::vector<SRInfo> Infos;
1896 Infos.push_back(SRInfo(index, NewVReg, true));
1897 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1899 RII->second.push_back(SRInfo(index, NewVReg, true));
1901 RestoreMBBs.set(MBBId);
1905 // Update spill weight.
1906 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1907 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1910 if (NewVReg && TrySplit && AllCanFold) {
1911 // If all of its def / use can be folded, give it a low spill weight.
1912 LiveInterval &nI = getOrCreateInterval(NewVReg);
1917 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1918 BitVector &RestoreMBBs,
1919 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1920 if (!RestoreMBBs[Id])
1922 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1923 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1924 if (Restores[i].index == index &&
1925 Restores[i].vreg == vr &&
1926 Restores[i].canFold)
1931 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1932 BitVector &RestoreMBBs,
1933 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1934 if (!RestoreMBBs[Id])
1936 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1937 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1938 if (Restores[i].index == index && Restores[i].vreg)
1939 Restores[i].index = -1;
1942 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1943 /// spilled and create empty intervals for their uses.
1945 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1946 const TargetRegisterClass* rc,
1947 std::vector<LiveInterval*> &NewLIs) {
1948 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1949 re = mri_->reg_end(); ri != re; ) {
1950 MachineOperand &O = ri.getOperand();
1951 MachineInstr *MI = &*ri;
1954 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1955 "Register def was not rewritten?");
1956 RemoveMachineInstrFromMaps(MI);
1957 vrm.RemoveMachineInstrFromMaps(MI);
1958 MI->eraseFromParent();
1960 // This must be an use of an implicit_def so it's not part of the live
1961 // interval. Create a new empty live interval for it.
1962 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1963 unsigned NewVReg = mri_->createVirtualRegister(rc);
1965 vrm.setIsImplicitlyDefined(NewVReg);
1966 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1967 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1968 MachineOperand &MO = MI->getOperand(i);
1969 if (MO.isReg() && MO.getReg() == li.reg) {
1978 std::vector<LiveInterval*> LiveIntervals::
1979 addIntervalsForSpillsFast(const LiveInterval &li,
1980 const MachineLoopInfo *loopInfo,
1982 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1984 std::vector<LiveInterval*> added;
1986 assert(li.weight != HUGE_VALF &&
1987 "attempt to spill already spilled interval!");
1989 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1993 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1995 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1996 while (RI != mri_->reg_end()) {
1997 MachineInstr* MI = &*RI;
1999 SmallVector<unsigned, 2> Indices;
2000 bool HasUse = false;
2001 bool HasDef = false;
2003 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2004 MachineOperand& mop = MI->getOperand(i);
2005 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2007 HasUse |= MI->getOperand(i).isUse();
2008 HasDef |= MI->getOperand(i).isDef();
2010 Indices.push_back(i);
2013 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2014 Indices, true, slot, li.reg)) {
2015 unsigned NewVReg = mri_->createVirtualRegister(rc);
2017 vrm.assignVirt2StackSlot(NewVReg, slot);
2019 // create a new register for this spill
2020 LiveInterval &nI = getOrCreateInterval(NewVReg);
2022 // the spill weight is now infinity as it
2023 // cannot be spilled again
2024 nI.weight = HUGE_VALF;
2026 // Rewrite register operands to use the new vreg.
2027 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2028 E = Indices.end(); I != E; ++I) {
2029 MI->getOperand(*I).setReg(NewVReg);
2031 if (MI->getOperand(*I).isUse())
2032 MI->getOperand(*I).setIsKill(true);
2035 // Fill in the new live interval.
2036 unsigned index = getInstructionIndex(MI);
2038 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2039 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2042 vrm.addRestorePoint(NewVReg, MI);
2045 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2046 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2049 vrm.addSpillPoint(NewVReg, true, MI);
2052 added.push_back(&nI);
2054 DOUT << "\t\t\t\tadded new interval: ";
2060 RI = mri_->reg_begin(li.reg);
2066 std::vector<LiveInterval*> LiveIntervals::
2067 addIntervalsForSpills(const LiveInterval &li,
2068 SmallVectorImpl<LiveInterval*> &SpillIs,
2069 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2071 if (EnableFastSpilling)
2072 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2074 assert(li.weight != HUGE_VALF &&
2075 "attempt to spill already spilled interval!");
2077 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2078 li.print(DOUT, tri_);
2081 // Each bit specify whether a spill is required in the MBB.
2082 BitVector SpillMBBs(mf_->getNumBlockIDs());
2083 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2084 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2085 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2086 DenseMap<unsigned,unsigned> MBBVRegsMap;
2087 std::vector<LiveInterval*> NewLIs;
2088 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2090 unsigned NumValNums = li.getNumValNums();
2091 SmallVector<MachineInstr*, 4> ReMatDefs;
2092 ReMatDefs.resize(NumValNums, NULL);
2093 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2094 ReMatOrigDefs.resize(NumValNums, NULL);
2095 SmallVector<int, 4> ReMatIds;
2096 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2097 BitVector ReMatDelete(NumValNums);
2098 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2100 // Spilling a split live interval. It cannot be split any further. Also,
2101 // it's also guaranteed to be a single val# / range interval.
2102 if (vrm.getPreSplitReg(li.reg)) {
2103 vrm.setIsSplitFromReg(li.reg, 0);
2104 // Unset the split kill marker on the last use.
2105 unsigned KillIdx = vrm.getKillPoint(li.reg);
2107 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2108 assert(KillMI && "Last use disappeared?");
2109 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2110 assert(KillOp != -1 && "Last use disappeared?");
2111 KillMI->getOperand(KillOp).setIsKill(false);
2113 vrm.removeKillPoint(li.reg);
2114 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2115 Slot = vrm.getStackSlot(li.reg);
2116 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2117 MachineInstr *ReMatDefMI = DefIsReMat ?
2118 vrm.getReMaterializedMI(li.reg) : NULL;
2120 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2121 bool isLoad = isLoadSS ||
2122 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2123 bool IsFirstRange = true;
2124 for (LiveInterval::Ranges::const_iterator
2125 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2126 // If this is a split live interval with multiple ranges, it means there
2127 // are two-address instructions that re-defined the value. Only the
2128 // first def can be rematerialized!
2130 // Note ReMatOrigDefMI has already been deleted.
2131 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2132 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2133 false, vrm, rc, ReMatIds, loopInfo,
2134 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2135 MBBVRegsMap, NewLIs);
2137 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2138 Slot, 0, false, false, false,
2139 false, vrm, rc, ReMatIds, loopInfo,
2140 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2141 MBBVRegsMap, NewLIs);
2143 IsFirstRange = false;
2146 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2150 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2151 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2155 bool NeedStackSlot = false;
2156 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2158 const VNInfo *VNI = *i;
2159 unsigned VN = VNI->id;
2160 if (VNI->isUnused())
2161 continue; // Dead val#.
2162 // Is the def for the val# rematerializable?
2163 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2164 ? getInstructionFromIndex(VNI->def) : 0;
2166 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2167 // Remember how to remat the def of this val#.
2168 ReMatOrigDefs[VN] = ReMatDefMI;
2169 // Original def may be modified so we have to make a copy here.
2170 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2171 ClonedMIs.push_back(Clone);
2172 ReMatDefs[VN] = Clone;
2174 bool CanDelete = true;
2175 if (VNI->hasPHIKill()) {
2176 // A kill is a phi node, not all of its uses can be rematerialized.
2177 // It must not be deleted.
2179 // Need a stack slot if there is any live range where uses cannot be
2181 NeedStackSlot = true;
2184 ReMatDelete.set(VN);
2186 // Need a stack slot if there is any live range where uses cannot be
2188 NeedStackSlot = true;
2192 // One stack slot per live interval.
2193 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2194 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2195 Slot = vrm.assignVirt2StackSlot(li.reg);
2197 // This case only occurs when the prealloc splitter has already assigned
2198 // a stack slot to this vreg.
2200 Slot = vrm.getStackSlot(li.reg);
2203 // Create new intervals and rewrite defs and uses.
2204 for (LiveInterval::Ranges::const_iterator
2205 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2206 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2207 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2208 bool DefIsReMat = ReMatDefMI != NULL;
2209 bool CanDelete = ReMatDelete[I->valno->id];
2211 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2212 bool isLoad = isLoadSS ||
2213 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2214 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2215 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2216 CanDelete, vrm, rc, ReMatIds, loopInfo,
2217 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2218 MBBVRegsMap, NewLIs);
2221 // Insert spills / restores if we are splitting.
2223 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2227 SmallPtrSet<LiveInterval*, 4> AddedKill;
2228 SmallVector<unsigned, 2> Ops;
2229 if (NeedStackSlot) {
2230 int Id = SpillMBBs.find_first();
2232 std::vector<SRInfo> &spills = SpillIdxes[Id];
2233 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2234 int index = spills[i].index;
2235 unsigned VReg = spills[i].vreg;
2236 LiveInterval &nI = getOrCreateInterval(VReg);
2237 bool isReMat = vrm.isReMaterialized(VReg);
2238 MachineInstr *MI = getInstructionFromIndex(index);
2239 bool CanFold = false;
2240 bool FoundUse = false;
2242 if (spills[i].canFold) {
2244 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2245 MachineOperand &MO = MI->getOperand(j);
2246 if (!MO.isReg() || MO.getReg() != VReg)
2253 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2254 RestoreMBBs, RestoreIdxes))) {
2255 // MI has two-address uses of the same register. If the use
2256 // isn't the first and only use in the BB, then we can't fold
2257 // it. FIXME: Move this to rewriteInstructionsForSpills.
2264 // Fold the store into the def if possible.
2265 bool Folded = false;
2266 if (CanFold && !Ops.empty()) {
2267 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2270 // Also folded uses, do not issue a load.
2271 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2272 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2274 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2278 // Otherwise tell the spiller to issue a spill.
2280 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2281 bool isKill = LR->end == getStoreIndex(index);
2282 if (!MI->registerDefIsDead(nI.reg))
2283 // No need to spill a dead def.
2284 vrm.addSpillPoint(VReg, isKill, MI);
2286 AddedKill.insert(&nI);
2289 Id = SpillMBBs.find_next(Id);
2293 int Id = RestoreMBBs.find_first();
2295 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2296 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2297 int index = restores[i].index;
2300 unsigned VReg = restores[i].vreg;
2301 LiveInterval &nI = getOrCreateInterval(VReg);
2302 bool isReMat = vrm.isReMaterialized(VReg);
2303 MachineInstr *MI = getInstructionFromIndex(index);
2304 bool CanFold = false;
2306 if (restores[i].canFold) {
2308 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2309 MachineOperand &MO = MI->getOperand(j);
2310 if (!MO.isReg() || MO.getReg() != VReg)
2314 // If this restore were to be folded, it would have been folded
2323 // Fold the load into the use if possible.
2324 bool Folded = false;
2325 if (CanFold && !Ops.empty()) {
2327 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2329 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2331 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2332 // If the rematerializable def is a load, also try to fold it.
2333 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2334 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2335 Ops, isLoadSS, LdSlot, VReg);
2337 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2339 // Re-matting an instruction with virtual register use. Add the
2340 // register as an implicit use on the use MI and update the register
2341 // interval's spill weight to HUGE_VALF to prevent it from being
2343 LiveInterval &ImpLi = getInterval(ImpUse);
2344 ImpLi.weight = HUGE_VALF;
2345 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2350 // If folding is not possible / failed, then tell the spiller to issue a
2351 // load / rematerialization for us.
2353 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2355 vrm.addRestorePoint(VReg, MI);
2357 Id = RestoreMBBs.find_next(Id);
2360 // Finalize intervals: add kills, finalize spill weights, and filter out
2362 std::vector<LiveInterval*> RetNewLIs;
2363 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2364 LiveInterval *LI = NewLIs[i];
2366 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2367 if (!AddedKill.count(LI)) {
2368 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2369 unsigned LastUseIdx = getBaseIndex(LR->end);
2370 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2371 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2372 assert(UseIdx != -1);
2373 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2374 LastUse->getOperand(UseIdx).setIsKill();
2375 vrm.addKillPoint(LI->reg, LastUseIdx);
2378 RetNewLIs.push_back(LI);
2382 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2386 /// hasAllocatableSuperReg - Return true if the specified physical register has
2387 /// any super register that's allocatable.
2388 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2389 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2390 if (allocatableRegs_[*AS] && hasInterval(*AS))
2395 /// getRepresentativeReg - Find the largest super register of the specified
2396 /// physical register.
2397 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2398 // Find the largest super-register that is allocatable.
2399 unsigned BestReg = Reg;
2400 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2401 unsigned SuperReg = *AS;
2402 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2410 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2411 /// specified interval that conflicts with the specified physical register.
2412 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2413 unsigned PhysReg) const {
2414 unsigned NumConflicts = 0;
2415 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2416 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2417 E = mri_->reg_end(); I != E; ++I) {
2418 MachineOperand &O = I.getOperand();
2419 MachineInstr *MI = O.getParent();
2420 unsigned Index = getInstructionIndex(MI);
2421 if (pli.liveAt(Index))
2424 return NumConflicts;
2427 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2428 /// around all defs and uses of the specified interval. Return true if it
2429 /// was able to cut its interval.
2430 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2431 unsigned PhysReg, VirtRegMap &vrm) {
2432 unsigned SpillReg = getRepresentativeReg(PhysReg);
2434 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2435 // If there are registers which alias PhysReg, but which are not a
2436 // sub-register of the chosen representative super register. Assert
2437 // since we can't handle it yet.
2438 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2439 tri_->isSuperRegister(*AS, SpillReg));
2442 LiveInterval &pli = getInterval(SpillReg);
2443 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2444 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2445 E = mri_->reg_end(); I != E; ++I) {
2446 MachineOperand &O = I.getOperand();
2447 MachineInstr *MI = O.getParent();
2448 if (SeenMIs.count(MI))
2451 unsigned Index = getInstructionIndex(MI);
2452 if (pli.liveAt(Index)) {
2453 vrm.addEmergencySpill(SpillReg, MI);
2454 unsigned StartIdx = getLoadIndex(Index);
2455 unsigned EndIdx = getStoreIndex(Index)+1;
2456 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2457 pli.removeRange(StartIdx, EndIdx);
2461 raw_string_ostream Msg(msg);
2462 Msg << "Ran out of registers during register allocation!";
2463 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2464 Msg << "\nPlease check your inline asm statement for invalid "
2465 << "constraints:\n";
2466 MI->print(Msg, tm_);
2468 llvm_report_error(Msg.str());
2470 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2471 if (!hasInterval(*AS))
2473 LiveInterval &spli = getInterval(*AS);
2474 if (spli.liveAt(Index))
2475 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2482 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2483 MachineInstr* startInst) {
2484 LiveInterval& Interval = getOrCreateInterval(reg);
2485 VNInfo* VN = Interval.getNextValue(
2486 getInstructionIndex(startInst) + InstrSlots::DEF,
2487 startInst, true, getVNInfoAllocator());
2488 VN->setHasPHIKill(true);
2489 VN->kills.push_back(
2490 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2491 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2492 getMBBEndIdx(startInst->getParent()) + 1, VN);
2493 Interval.addRange(LR);