1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 MachineFunctionPass::getAnalysisUsage(AU);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90 E = r2iMap_.end(); I != E; ++I)
98 terminatorGaps.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!ClonedMIs.empty()) {
103 MachineInstr *MI = ClonedMIs.back();
104 ClonedMIs.pop_back();
105 mf_->DeleteMachineInstr(MI);
109 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110 const TargetInstrInfo *tii_) {
111 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
116 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
117 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
118 MI->getOperand(2).getReg() == Reg)
120 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
121 MI->getOperand(1).getReg() == Reg)
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
140 MachineInstr *MI = &*I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 ImpDefMIs.push_back(MI);
149 bool ChangedToImpDef = false;
150 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
151 MachineOperand& MO = MI->getOperand(i);
152 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
154 unsigned Reg = MO.getReg();
157 if (!ImpDefRegs.count(Reg))
159 // Use is a copy, just turn it into an implicit_def.
160 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
161 bool isKill = MO.isKill();
162 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
163 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
164 MI->RemoveOperand(j);
166 ImpDefRegs.erase(Reg);
167 ChangedToImpDef = true;
172 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
173 // Make sure other uses of
174 for (unsigned j = i+1; j != e; ++j) {
175 MachineOperand &MOJ = MI->getOperand(j);
176 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
179 ImpDefRegs.erase(Reg);
183 if (ChangedToImpDef) {
184 // Backtrack to process this new implicit_def.
187 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
188 MachineOperand& MO = MI->getOperand(i);
189 if (!MO.isReg() || !MO.isDef())
191 ImpDefRegs.erase(MO.getReg());
196 // Any outstanding liveout implicit_def's?
197 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
198 MachineInstr *MI = ImpDefMIs[i];
199 unsigned Reg = MI->getOperand(0).getReg();
200 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
201 !ImpDefRegs.count(Reg)) {
202 // Delete all "local" implicit_def's. That include those which define
203 // physical registers since they cannot be liveout.
204 MI->eraseFromParent();
208 // If there are multiple defs of the same register and at least one
209 // is not an implicit_def, do not insert implicit_def's before the
212 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
213 DE = mri_->def_end(); DI != DE; ++DI) {
214 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
222 // The only implicit_def which we want to keep are those that are live
224 MI->eraseFromParent();
226 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
227 UE = mri_->use_end(); UI != UE; ) {
228 MachineOperand &RMO = UI.getOperand();
229 MachineInstr *RMI = &*UI;
231 MachineBasicBlock *RMBB = RMI->getParent();
235 // Turn a copy use into an implicit_def.
236 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
237 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
239 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
240 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
241 RMI->RemoveOperand(j);
245 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
246 unsigned NewVReg = mri_->createVirtualRegister(RC);
257 void LiveIntervals::computeNumbering() {
258 Index2MiMap OldI2MI = i2miMap_;
259 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
265 terminatorGaps.clear();
269 // Number MachineInstrs and MachineBasicBlocks.
270 // Initialize MBB indexes to a sentinal.
271 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
273 unsigned MIIndex = 0;
274 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
276 unsigned StartIdx = MIIndex;
278 // Insert an empty slot at the beginning of each block.
279 MIIndex += InstrSlots::NUM;
280 i2miMap_.push_back(0);
282 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
285 if (I == MBB->getFirstTerminator()) {
286 // Leave a gap for before terminators, this is where we will point
289 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
291 "Multiple 'first' terminators encountered during numbering.");
292 inserted = inserted; // Avoid compiler warning if assertions turned off.
293 i2miMap_.push_back(0);
295 MIIndex += InstrSlots::NUM;
298 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
299 assert(inserted && "multiple MachineInstr -> index mappings");
301 i2miMap_.push_back(I);
302 MIIndex += InstrSlots::NUM;
305 // Insert max(1, numdefs) empty slots after every instruction.
306 unsigned Slots = I->getDesc().getNumDefs();
309 MIIndex += InstrSlots::NUM * Slots;
311 i2miMap_.push_back(0);
314 if (MBB->getFirstTerminator() == MBB->end()) {
315 // Leave a gap for before terminators, this is where we will point
318 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
320 "Multiple 'first' terminators encountered during numbering.");
321 inserted = inserted; // Avoid compiler warning if assertions turned off.
322 i2miMap_.push_back(0);
324 MIIndex += InstrSlots::NUM;
327 // Set the MBB2IdxMap entry for this MBB.
328 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
329 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
332 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
334 if (!OldI2MI.empty())
335 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
336 for (LiveInterval::iterator LI = OI->second->begin(),
337 LE = OI->second->end(); LI != LE; ++LI) {
339 // Remap the start index of the live range to the corresponding new
340 // number, or our best guess at what it _should_ correspond to if the
341 // original instruction has been erased. This is either the following
342 // instruction or its predecessor.
343 unsigned index = LI->start / InstrSlots::NUM;
344 unsigned offset = LI->start % InstrSlots::NUM;
345 if (offset == InstrSlots::LOAD) {
346 std::vector<IdxMBBPair>::const_iterator I =
347 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
348 // Take the pair containing the index
349 std::vector<IdxMBBPair>::const_iterator J =
350 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
352 LI->start = getMBBStartIdx(J->second);
354 LI->start = mi2iMap_[OldI2MI[index]] + offset;
357 // Remap the ending index in the same way that we remapped the start,
358 // except for the final step where we always map to the immediately
359 // following instruction.
360 index = (LI->end - 1) / InstrSlots::NUM;
361 offset = LI->end % InstrSlots::NUM;
362 if (offset == InstrSlots::LOAD) {
363 // VReg dies at end of block.
364 std::vector<IdxMBBPair>::const_iterator I =
365 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
368 LI->end = getMBBEndIdx(I->second) + 1;
370 unsigned idx = index;
371 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
373 if (index != OldI2MI.size())
374 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
376 LI->end = InstrSlots::NUM * i2miMap_.size();
380 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
381 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
384 // Remap the VNInfo def index, which works the same as the
385 // start indices above. VN's with special sentinel defs
386 // don't need to be remapped.
387 if (vni->isDefAccurate() && !vni->isUnused()) {
388 unsigned index = vni->def / InstrSlots::NUM;
389 unsigned offset = vni->def % InstrSlots::NUM;
390 if (offset == InstrSlots::LOAD) {
391 std::vector<IdxMBBPair>::const_iterator I =
392 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
393 // Take the pair containing the index
394 std::vector<IdxMBBPair>::const_iterator J =
395 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
397 vni->def = getMBBStartIdx(J->second);
399 vni->def = mi2iMap_[OldI2MI[index]] + offset;
403 // Remap the VNInfo kill indices, which works the same as
404 // the end indices above.
405 for (size_t i = 0; i < vni->kills.size(); ++i) {
406 unsigned killIdx = vni->kills[i].killIdx;
408 unsigned index = (killIdx - 1) / InstrSlots::NUM;
409 unsigned offset = killIdx % InstrSlots::NUM;
411 if (offset == InstrSlots::LOAD) {
412 assert("Value killed at a load slot.");
413 /*std::vector<IdxMBBPair>::const_iterator I =
414 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
417 vni->kills[i] = getMBBEndIdx(I->second);*/
419 if (vni->kills[i].isPHIKill) {
420 std::vector<IdxMBBPair>::const_iterator I =
421 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
423 vni->kills[i].killIdx = terminatorGaps[I->second];
425 assert(OldI2MI[index] != 0 &&
426 "Kill refers to instruction not present in index maps.");
427 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
431 unsigned idx = index;
432 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
434 if (index != OldI2MI.size())
435 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
436 (idx == index ? offset : 0);
438 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
446 void LiveIntervals::scaleNumbering(int factor) {
448 // * scale MBB begin and end points
449 // * scale all ranges.
450 // * Update VNI structures.
451 // * Scale instruction numberings
453 // Scale the MBB indices.
455 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
456 MBB != MBBE; ++MBB) {
457 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
458 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
459 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
460 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
462 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
464 // Scale terminator gaps.
465 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
466 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
468 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
471 // Scale the intervals.
472 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
473 LI->second->scaleNumbering(factor);
476 // Scale MachineInstrs.
477 Mi2IndexMap oldmi2iMap = mi2iMap_;
478 unsigned highestSlot = 0;
479 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
481 unsigned newSlot = InstrSlots::scale(MI->second, factor);
482 mi2iMap_[MI->first] = newSlot;
483 highestSlot = std::max(highestSlot, newSlot);
487 i2miMap_.resize(highestSlot + 1);
488 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
490 i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
496 /// runOnMachineFunction - Register allocate the whole function
498 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
500 mri_ = &mf_->getRegInfo();
501 tm_ = &fn.getTarget();
502 tri_ = tm_->getRegisterInfo();
503 tii_ = tm_->getInstrInfo();
504 aa_ = &getAnalysis<AliasAnalysis>();
505 lv_ = &getAnalysis<LiveVariables>();
506 allocatableRegs_ = tri_->getAllocatableSet(fn);
508 processImplicitDefs();
512 numIntervals += getNumIntervals();
518 /// print - Implement the dump method.
519 void LiveIntervals::print(std::ostream &O, const Module* ) const {
520 raw_os_ostream OS(O);
521 OS << "********** INTERVALS **********\n";
522 for (const_iterator I = begin(), E = end(); I != E; ++I) {
523 I->second->print(OS, tri_);
527 OS << "********** MACHINEINSTRS **********\n";
529 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
530 mbbi != mbbe; ++mbbi) {
531 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
532 for (MachineBasicBlock::iterator mii = mbbi->begin(),
533 mie = mbbi->end(); mii != mie; ++mii) {
534 OS << getInstructionIndex(mii) << '\t' << *mii;
539 /// conflictsWithPhysRegDef - Returns true if the specified register
540 /// is defined during the duration of the specified interval.
541 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
542 VirtRegMap &vrm, unsigned reg) {
543 for (LiveInterval::Ranges::const_iterator
544 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
545 for (unsigned index = getBaseIndex(I->start),
546 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
547 index += InstrSlots::NUM) {
548 // skip deleted instructions
549 while (index != end && !getInstructionFromIndex(index))
550 index += InstrSlots::NUM;
551 if (index == end) break;
553 MachineInstr *MI = getInstructionFromIndex(index);
554 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
555 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
556 if (SrcReg == li.reg || DstReg == li.reg)
558 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
559 MachineOperand& mop = MI->getOperand(i);
562 unsigned PhysReg = mop.getReg();
563 if (PhysReg == 0 || PhysReg == li.reg)
565 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
566 if (!vrm.hasPhys(PhysReg))
568 PhysReg = vrm.getPhys(PhysReg);
570 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
579 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
580 /// it can check use as well.
581 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
582 unsigned Reg, bool CheckUse,
583 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
584 for (LiveInterval::Ranges::const_iterator
585 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
586 for (unsigned index = getBaseIndex(I->start),
587 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
588 index += InstrSlots::NUM) {
589 // Skip deleted instructions.
590 MachineInstr *MI = 0;
591 while (index != end) {
592 MI = getInstructionFromIndex(index);
595 index += InstrSlots::NUM;
597 if (index == end) break;
599 if (JoinedCopies.count(MI))
601 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
602 MachineOperand& MO = MI->getOperand(i);
605 if (MO.isUse() && !CheckUse)
607 unsigned PhysReg = MO.getReg();
608 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
610 if (tri_->isSubRegister(Reg, PhysReg))
620 void LiveIntervals::printRegName(unsigned reg) const {
621 if (TargetRegisterInfo::isPhysicalRegister(reg))
622 errs() << tri_->getName(reg);
624 errs() << "%reg" << reg;
627 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
628 MachineBasicBlock::iterator mi,
629 unsigned MIIdx, MachineOperand& MO,
631 LiveInterval &interval) {
633 errs() << "\t\tregister: ";
634 printRegName(interval.reg);
637 // Virtual registers may be defined multiple times (due to phi
638 // elimination and 2-addr elimination). Much of what we do only has to be
639 // done once for the vreg. We use an empty interval to detect the first
640 // time we see a vreg.
641 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
642 if (interval.empty()) {
643 // Get the Idx of the defining instructions.
644 unsigned defIndex = getDefIndex(MIIdx);
645 // Earlyclobbers move back one.
646 if (MO.isEarlyClobber())
647 defIndex = getUseIndex(MIIdx);
649 MachineInstr *CopyMI = NULL;
650 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
651 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
652 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
653 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
654 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
656 // Earlyclobbers move back one.
657 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
659 assert(ValNo->id == 0 && "First value in interval is not 0?");
661 // Loop over all of the blocks that the vreg is defined in. There are
662 // two cases we have to handle here. The most common case is a vreg
663 // whose lifetime is contained within a basic block. In this case there
664 // will be a single kill, in MBB, which comes after the definition.
665 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
666 // FIXME: what about dead vars?
668 if (vi.Kills[0] != mi)
669 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
671 killIdx = defIndex+1;
673 // If the kill happens after the definition, we have an intra-block
675 if (killIdx > defIndex) {
676 assert(vi.AliveBlocks.empty() &&
677 "Shouldn't be alive across any blocks!");
678 LiveRange LR(defIndex, killIdx, ValNo);
679 interval.addRange(LR);
680 DEBUG(errs() << " +" << LR << "\n");
681 interval.addKill(ValNo, killIdx, false);
686 // The other case we handle is when a virtual register lives to the end
687 // of the defining block, potentially live across some blocks, then is
688 // live into some number of blocks, but gets killed. Start by adding a
689 // range that goes from this definition to the end of the defining block.
690 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
691 DEBUG(errs() << " +" << NewLR);
692 interval.addRange(NewLR);
694 // Iterate over all of the blocks that the variable is completely
695 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
697 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
698 E = vi.AliveBlocks.end(); I != E; ++I) {
699 LiveRange LR(getMBBStartIdx(*I),
700 getMBBEndIdx(*I)+1, // MBB ends at -1.
702 interval.addRange(LR);
703 DEBUG(errs() << " +" << LR);
706 // Finally, this virtual register is live from the start of any killing
707 // block to the 'use' slot of the killing instruction.
708 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
709 MachineInstr *Kill = vi.Kills[i];
710 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
711 LiveRange LR(getMBBStartIdx(Kill->getParent()),
713 interval.addRange(LR);
714 interval.addKill(ValNo, killIdx, false);
715 DEBUG(errs() << " +" << LR);
719 // If this is the second time we see a virtual register definition, it
720 // must be due to phi elimination or two addr elimination. If this is
721 // the result of two address elimination, then the vreg is one of the
722 // def-and-use register operand.
723 if (mi->isRegTiedToUseOperand(MOIdx)) {
724 // If this is a two-address definition, then we have already processed
725 // the live range. The only problem is that we didn't realize there
726 // are actually two values in the live interval. Because of this we
727 // need to take the LiveRegion that defines this register and split it
729 assert(interval.containsOneValue());
730 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
731 unsigned RedefIndex = getDefIndex(MIIdx);
732 if (MO.isEarlyClobber())
733 RedefIndex = getUseIndex(MIIdx);
735 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
736 VNInfo *OldValNo = OldLR->valno;
738 // Delete the initial value, which should be short and continuous,
739 // because the 2-addr copy must be in the same MBB as the redef.
740 interval.removeRange(DefIndex, RedefIndex);
742 // Two-address vregs should always only be redefined once. This means
743 // that at this point, there should be exactly one value number in it.
744 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
746 // The new value number (#1) is defined by the instruction we claimed
748 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
749 false, // update at *
751 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
753 // Value#0 is now defined by the 2-addr instruction.
754 OldValNo->def = RedefIndex;
755 OldValNo->setCopy(0);
756 if (MO.isEarlyClobber())
757 OldValNo->setHasRedefByEC(true);
759 // Add the new live interval which replaces the range for the input copy.
760 LiveRange LR(DefIndex, RedefIndex, ValNo);
761 DEBUG(errs() << " replace range with " << LR);
762 interval.addRange(LR);
763 interval.addKill(ValNo, RedefIndex, false);
765 // If this redefinition is dead, we need to add a dummy unit live
766 // range covering the def slot.
768 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
771 errs() << " RESULT: ";
772 interval.print(errs(), tri_);
775 // Otherwise, this must be because of phi elimination. If this is the
776 // first redefinition of the vreg that we have seen, go back and change
777 // the live range in the PHI block to be a different value number.
778 if (interval.containsOneValue()) {
779 assert(vi.Kills.size() == 1 &&
780 "PHI elimination vreg should have one kill, the PHI itself!");
782 // Remove the old range that we now know has an incorrect number.
783 VNInfo *VNI = interval.getValNumInfo(0);
784 MachineInstr *Killer = vi.Kills[0];
785 unsigned Start = getMBBStartIdx(Killer->getParent());
786 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
788 errs() << " Removing [" << Start << "," << End << "] from: ";
789 interval.print(errs(), tri_);
792 interval.removeRange(Start, End);
793 assert(interval.ranges.size() == 1 &&
794 "newly discovered PHI interval has >1 ranges.");
795 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
796 interval.addKill(VNI, terminatorGaps[killMBB], true);
797 VNI->setHasPHIKill(true);
799 errs() << " RESULT: ";
800 interval.print(errs(), tri_);
803 // Replace the interval with one of a NEW value number. Note that this
804 // value number isn't actually defined by an instruction, weird huh? :)
805 LiveRange LR(Start, End,
806 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
807 LR.valno->setIsPHIDef(true);
808 DEBUG(errs() << " replace range with " << LR);
809 interval.addRange(LR);
810 interval.addKill(LR.valno, End, false);
812 errs() << " RESULT: ";
813 interval.print(errs(), tri_);
817 // In the case of PHI elimination, each variable definition is only
818 // live until the end of the block. We've already taken care of the
819 // rest of the live range.
820 unsigned defIndex = getDefIndex(MIIdx);
821 if (MO.isEarlyClobber())
822 defIndex = getUseIndex(MIIdx);
825 MachineInstr *CopyMI = NULL;
826 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
827 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
828 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
829 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
830 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
832 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
834 unsigned killIndex = getMBBEndIdx(mbb) + 1;
835 LiveRange LR(defIndex, killIndex, ValNo);
836 interval.addRange(LR);
837 interval.addKill(ValNo, terminatorGaps[mbb], true);
838 ValNo->setHasPHIKill(true);
839 DEBUG(errs() << " +" << LR);
843 DEBUG(errs() << '\n');
846 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
847 MachineBasicBlock::iterator mi,
850 LiveInterval &interval,
851 MachineInstr *CopyMI) {
852 // A physical register cannot be live across basic block, so its
853 // lifetime must end somewhere in its defining basic block.
855 errs() << "\t\tregister: ";
856 printRegName(interval.reg);
859 unsigned baseIndex = MIIdx;
860 unsigned start = getDefIndex(baseIndex);
861 // Earlyclobbers move back one.
862 if (MO.isEarlyClobber())
863 start = getUseIndex(MIIdx);
864 unsigned end = start;
866 // If it is not used after definition, it is considered dead at
867 // the instruction defining it. Hence its interval is:
868 // [defSlot(def), defSlot(def)+1)
870 DEBUG(errs() << " dead");
875 // If it is not dead on definition, it must be killed by a
876 // subsequent instruction. Hence its interval is:
877 // [defSlot(def), useSlot(kill)+1)
878 baseIndex += InstrSlots::NUM;
879 while (++mi != MBB->end()) {
880 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
881 getInstructionFromIndex(baseIndex) == 0)
882 baseIndex += InstrSlots::NUM;
883 if (mi->killsRegister(interval.reg, tri_)) {
884 DEBUG(errs() << " killed");
885 end = getUseIndex(baseIndex) + 1;
888 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
890 if (mi->isRegTiedToUseOperand(DefIdx)) {
891 // Two-address instruction.
892 end = getDefIndex(baseIndex);
893 if (mi->getOperand(DefIdx).isEarlyClobber())
894 end = getUseIndex(baseIndex);
896 // Another instruction redefines the register before it is ever read.
897 // Then the register is essentially dead at the instruction that defines
898 // it. Hence its interval is:
899 // [defSlot(def), defSlot(def)+1)
900 DEBUG(errs() << " dead");
907 baseIndex += InstrSlots::NUM;
910 // The only case we should have a dead physreg here without a killing or
911 // instruction where we know it's dead is if it is live-in to the function
912 // and never used. Another possible case is the implicit use of the
913 // physical register has been deleted by two-address pass.
917 assert(start < end && "did not find end of interval?");
919 // Already exists? Extend old live interval.
920 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
921 bool Extend = OldLR != interval.end();
922 VNInfo *ValNo = Extend
923 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
924 if (MO.isEarlyClobber() && Extend)
925 ValNo->setHasRedefByEC(true);
926 LiveRange LR(start, end, ValNo);
927 interval.addRange(LR);
928 interval.addKill(LR.valno, end, false);
929 DEBUG(errs() << " +" << LR << '\n');
932 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
933 MachineBasicBlock::iterator MI,
937 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
938 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
939 getOrCreateInterval(MO.getReg()));
940 else if (allocatableRegs_[MO.getReg()]) {
941 MachineInstr *CopyMI = NULL;
942 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
943 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
944 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
945 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
946 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
948 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
949 getOrCreateInterval(MO.getReg()), CopyMI);
950 // Def of a register also defines its sub-registers.
951 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
952 // If MI also modifies the sub-register explicitly, avoid processing it
953 // more than once. Do not pass in TRI here so it checks for exact match.
954 if (!MI->modifiesRegister(*AS))
955 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
956 getOrCreateInterval(*AS), 0);
960 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
962 LiveInterval &interval, bool isAlias) {
964 errs() << "\t\tlivein register: ";
965 printRegName(interval.reg);
968 // Look for kills, if it reaches a def before it's killed, then it shouldn't
969 // be considered a livein.
970 MachineBasicBlock::iterator mi = MBB->begin();
971 unsigned baseIndex = MIIdx;
972 unsigned start = baseIndex;
973 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
974 getInstructionFromIndex(baseIndex) == 0)
975 baseIndex += InstrSlots::NUM;
976 unsigned end = baseIndex;
977 bool SeenDefUse = false;
979 while (mi != MBB->end()) {
980 if (mi->killsRegister(interval.reg, tri_)) {
981 DEBUG(errs() << " killed");
982 end = getUseIndex(baseIndex) + 1;
985 } else if (mi->modifiesRegister(interval.reg, tri_)) {
986 // Another instruction redefines the register before it is ever read.
987 // Then the register is essentially dead at the instruction that defines
988 // it. Hence its interval is:
989 // [defSlot(def), defSlot(def)+1)
990 DEBUG(errs() << " dead");
991 end = getDefIndex(start) + 1;
996 baseIndex += InstrSlots::NUM;
998 if (mi != MBB->end()) {
999 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
1000 getInstructionFromIndex(baseIndex) == 0)
1001 baseIndex += InstrSlots::NUM;
1005 // Live-in register might not be used at all.
1008 DEBUG(errs() << " dead");
1009 end = getDefIndex(MIIdx) + 1;
1011 DEBUG(errs() << " live through");
1017 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
1018 vni->setIsPHIDef(true);
1019 LiveRange LR(start, end, vni);
1021 interval.addRange(LR);
1022 interval.addKill(LR.valno, end, false);
1023 DEBUG(errs() << " +" << LR << '\n');
1026 /// computeIntervals - computes the live intervals for virtual
1027 /// registers. for some ordering of the machine instructions [1,N] a
1028 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1029 /// which a variable is live
1030 void LiveIntervals::computeIntervals() {
1031 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1032 << "********** Function: "
1033 << ((Value*)mf_->getFunction())->getName() << '\n');
1035 SmallVector<unsigned, 8> UndefUses;
1036 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1037 MBBI != E; ++MBBI) {
1038 MachineBasicBlock *MBB = MBBI;
1039 // Track the index of the current machine instr.
1040 unsigned MIIndex = getMBBStartIdx(MBB);
1041 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1043 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1045 // Create intervals for live-ins to this BB first.
1046 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1047 LE = MBB->livein_end(); LI != LE; ++LI) {
1048 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1049 // Multiple live-ins can alias the same register.
1050 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1051 if (!hasInterval(*AS))
1052 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1056 // Skip over empty initial indices.
1057 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1058 getInstructionFromIndex(MIIndex) == 0)
1059 MIIndex += InstrSlots::NUM;
1061 for (; MI != miEnd; ++MI) {
1062 DEBUG(errs() << MIIndex << "\t" << *MI);
1065 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1066 MachineOperand &MO = MI->getOperand(i);
1067 if (!MO.isReg() || !MO.getReg())
1070 // handle register defs - build intervals
1072 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1073 else if (MO.isUndef())
1074 UndefUses.push_back(MO.getReg());
1077 // Skip over the empty slots after each instruction.
1078 unsigned Slots = MI->getDesc().getNumDefs();
1081 MIIndex += InstrSlots::NUM * Slots;
1083 // Skip over empty indices.
1084 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1085 getInstructionFromIndex(MIIndex) == 0)
1086 MIIndex += InstrSlots::NUM;
1090 // Create empty intervals for registers defined by implicit_def's (except
1091 // for those implicit_def that define values which are liveout of their
1093 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1094 unsigned UndefReg = UndefUses[i];
1095 (void)getOrCreateInterval(UndefReg);
1099 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1100 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1101 std::vector<IdxMBBPair>::const_iterator I =
1102 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1104 bool ResVal = false;
1105 while (I != Idx2MBBMap.end()) {
1106 if (I->first >= End)
1108 MBBs.push_back(I->second);
1115 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1116 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1117 std::vector<IdxMBBPair>::const_iterator I =
1118 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1120 bool ResVal = false;
1121 while (I != Idx2MBBMap.end()) {
1124 MachineBasicBlock *MBB = I->second;
1125 if (getMBBEndIdx(MBB) > End)
1127 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1128 SE = MBB->succ_end(); SI != SE; ++SI)
1129 MBBs.push_back(*SI);
1136 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1137 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1138 return new LiveInterval(reg, Weight);
1141 /// dupInterval - Duplicate a live interval. The caller is responsible for
1142 /// managing the allocated memory.
1143 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1144 LiveInterval *NewLI = createInterval(li->reg);
1145 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1149 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1150 /// copy field and returns the source register that defines it.
1151 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1152 if (!VNI->getCopy())
1155 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1156 // If it's extracting out of a physical register, return the sub-register.
1157 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1158 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1159 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1161 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1162 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1163 return VNI->getCopy()->getOperand(2).getReg();
1165 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1166 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1168 llvm_unreachable("Unrecognized copy instruction!");
1172 //===----------------------------------------------------------------------===//
1173 // Register allocator hooks.
1176 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1177 /// allow one) virtual register operand, then its uses are implicitly using
1178 /// the register. Returns the virtual register.
1179 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1180 MachineInstr *MI) const {
1182 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1183 MachineOperand &MO = MI->getOperand(i);
1184 if (!MO.isReg() || !MO.isUse())
1186 unsigned Reg = MO.getReg();
1187 if (Reg == 0 || Reg == li.reg)
1190 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1191 !allocatableRegs_[Reg])
1193 // FIXME: For now, only remat MI with at most one register operand.
1195 "Can't rematerialize instruction with multiple register operand!");
1196 RegOp = MO.getReg();
1204 /// isValNoAvailableAt - Return true if the val# of the specified interval
1205 /// which reaches the given instruction also reaches the specified use index.
1206 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1207 unsigned UseIdx) const {
1208 unsigned Index = getInstructionIndex(MI);
1209 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1210 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1211 return UI != li.end() && UI->valno == ValNo;
1214 /// isReMaterializable - Returns true if the definition MI of the specified
1215 /// val# of the specified interval is re-materializable.
1216 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1217 const VNInfo *ValNo, MachineInstr *MI,
1218 SmallVectorImpl<LiveInterval*> &SpillIs,
1223 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1227 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1228 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1229 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1230 // this but remember this is not safe to fold into a two-address
1232 // This is a load from fixed stack slot. It can be rematerialized.
1235 // If the target-specific rules don't identify an instruction as
1236 // being trivially rematerializable, use some target-independent
1238 if (!MI->getDesc().isRematerializable() ||
1239 !tii_->isTriviallyReMaterializable(MI)) {
1240 if (!EnableAggressiveRemat)
1243 // If the instruction accesses memory but the memoperands have been lost,
1244 // we can't analyze it.
1245 const TargetInstrDesc &TID = MI->getDesc();
1246 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1249 // Avoid instructions obviously unsafe for remat.
1250 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1253 // If the instruction accesses memory and the memory could be non-constant,
1254 // assume the instruction is not rematerializable.
1255 for (std::list<MachineMemOperand>::const_iterator
1256 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1257 const MachineMemOperand &MMO = *I;
1258 if (MMO.isVolatile() || MMO.isStore())
1260 const Value *V = MMO.getValue();
1263 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1264 if (!PSV->isConstant(mf_->getFrameInfo()))
1266 } else if (!aa_->pointsToConstantMemory(V))
1270 // If any of the registers accessed are non-constant, conservatively assume
1271 // the instruction is not rematerializable.
1272 unsigned ImpUse = 0;
1273 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1274 const MachineOperand &MO = MI->getOperand(i);
1276 unsigned Reg = MO.getReg();
1279 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1282 // Only allow one def, and that in the first operand.
1283 if (MO.isDef() != (i == 0))
1286 // Only allow constant-valued registers.
1287 bool IsLiveIn = mri_->isLiveIn(Reg);
1288 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1289 E = mri_->def_end();
1291 // For the def, it should be the only def of that register.
1292 if (MO.isDef() && (next(I) != E || IsLiveIn))
1296 // Only allow one use other register use, as that's all the
1297 // remat mechanisms support currently.
1298 if (Reg != li.reg) {
1301 else if (Reg != ImpUse)
1304 // For the use, there should be only one associated def.
1305 if (I != E && (next(I) != E || IsLiveIn))
1312 unsigned ImpUse = getReMatImplicitUse(li, MI);
1314 const LiveInterval &ImpLi = getInterval(ImpUse);
1315 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1316 re = mri_->use_end(); ri != re; ++ri) {
1317 MachineInstr *UseMI = &*ri;
1318 unsigned UseIdx = getInstructionIndex(UseMI);
1319 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1321 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1325 // If a register operand of the re-materialized instruction is going to
1326 // be spilled next, then it's not legal to re-materialize this instruction.
1327 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1328 if (ImpUse == SpillIs[i]->reg)
1334 /// isReMaterializable - Returns true if the definition MI of the specified
1335 /// val# of the specified interval is re-materializable.
1336 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1337 const VNInfo *ValNo, MachineInstr *MI) {
1338 SmallVector<LiveInterval*, 4> Dummy1;
1340 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1343 /// isReMaterializable - Returns true if every definition of MI of every
1344 /// val# of the specified interval is re-materializable.
1345 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1346 SmallVectorImpl<LiveInterval*> &SpillIs,
1349 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1351 const VNInfo *VNI = *i;
1352 if (VNI->isUnused())
1353 continue; // Dead val#.
1354 // Is the def for the val# rematerializable?
1355 if (!VNI->isDefAccurate())
1357 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1358 bool DefIsLoad = false;
1360 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1362 isLoad |= DefIsLoad;
1367 /// FilterFoldedOps - Filter out two-address use operands. Return
1368 /// true if it finds any issue with the operands that ought to prevent
1370 static bool FilterFoldedOps(MachineInstr *MI,
1371 SmallVector<unsigned, 2> &Ops,
1373 SmallVector<unsigned, 2> &FoldOps) {
1375 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1376 unsigned OpIdx = Ops[i];
1377 MachineOperand &MO = MI->getOperand(OpIdx);
1378 // FIXME: fold subreg use.
1382 MRInfo |= (unsigned)VirtRegMap::isMod;
1384 // Filter out two-address use operand(s).
1385 if (MI->isRegTiedToDefOperand(OpIdx)) {
1386 MRInfo = VirtRegMap::isModRef;
1389 MRInfo |= (unsigned)VirtRegMap::isRef;
1391 FoldOps.push_back(OpIdx);
1397 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1398 /// slot / to reg or any rematerialized load into ith operand of specified
1399 /// MI. If it is successul, MI is updated with the newly created MI and
1401 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1402 VirtRegMap &vrm, MachineInstr *DefMI,
1404 SmallVector<unsigned, 2> &Ops,
1405 bool isSS, int Slot, unsigned Reg) {
1406 // If it is an implicit def instruction, just delete it.
1407 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1408 RemoveMachineInstrFromMaps(MI);
1409 vrm.RemoveMachineInstrFromMaps(MI);
1410 MI->eraseFromParent();
1415 // Filter the list of operand indexes that are to be folded. Abort if
1416 // any operand will prevent folding.
1417 unsigned MRInfo = 0;
1418 SmallVector<unsigned, 2> FoldOps;
1419 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1422 // The only time it's safe to fold into a two address instruction is when
1423 // it's folding reload and spill from / into a spill stack slot.
1424 if (DefMI && (MRInfo & VirtRegMap::isMod))
1427 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1428 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1430 // Remember this instruction uses the spill slot.
1431 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1433 // Attempt to fold the memory reference into the instruction. If
1434 // we can do this, we don't need to insert spill code.
1435 MachineBasicBlock &MBB = *MI->getParent();
1436 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1437 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1438 vrm.transferSpillPts(MI, fmi);
1439 vrm.transferRestorePts(MI, fmi);
1440 vrm.transferEmergencySpills(MI, fmi);
1442 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1443 mi2iMap_[fmi] = InstrIdx;
1444 MI = MBB.insert(MBB.erase(MI), fmi);
1451 /// canFoldMemoryOperand - Returns true if the specified load / store
1452 /// folding is possible.
1453 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1454 SmallVector<unsigned, 2> &Ops,
1456 // Filter the list of operand indexes that are to be folded. Abort if
1457 // any operand will prevent folding.
1458 unsigned MRInfo = 0;
1459 SmallVector<unsigned, 2> FoldOps;
1460 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1463 // It's only legal to remat for a use, not a def.
1464 if (ReMat && (MRInfo & VirtRegMap::isMod))
1467 return tii_->canFoldMemoryOperand(MI, FoldOps);
1470 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1471 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1472 for (LiveInterval::Ranges::const_iterator
1473 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1474 std::vector<IdxMBBPair>::const_iterator II =
1475 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1476 if (II == Idx2MBBMap.end())
1478 if (I->end > II->first) // crossing a MBB.
1480 MBBs.insert(II->second);
1481 if (MBBs.size() > 1)
1487 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1488 /// interval on to-be re-materialized operands of MI) with new register.
1489 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1490 MachineInstr *MI, unsigned NewVReg,
1492 // There is an implicit use. That means one of the other operand is
1493 // being remat'ed and the remat'ed instruction has li.reg as an
1494 // use operand. Make sure we rewrite that as well.
1495 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1496 MachineOperand &MO = MI->getOperand(i);
1499 unsigned Reg = MO.getReg();
1500 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1502 if (!vrm.isReMaterialized(Reg))
1504 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1505 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1507 UseMO->setReg(NewVReg);
1511 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1512 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1513 bool LiveIntervals::
1514 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1515 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1516 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1517 unsigned Slot, int LdSlot,
1518 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1520 const TargetRegisterClass* rc,
1521 SmallVector<int, 4> &ReMatIds,
1522 const MachineLoopInfo *loopInfo,
1523 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1524 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1525 std::vector<LiveInterval*> &NewLIs) {
1526 bool CanFold = false;
1528 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1529 MachineOperand& mop = MI->getOperand(i);
1532 unsigned Reg = mop.getReg();
1533 unsigned RegI = Reg;
1534 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1539 bool TryFold = !DefIsReMat;
1540 bool FoldSS = true; // Default behavior unless it's a remat.
1541 int FoldSlot = Slot;
1543 // If this is the rematerializable definition MI itself and
1544 // all of its uses are rematerialized, simply delete it.
1545 if (MI == ReMatOrigDefMI && CanDelete) {
1546 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1548 RemoveMachineInstrFromMaps(MI);
1549 vrm.RemoveMachineInstrFromMaps(MI);
1550 MI->eraseFromParent();
1554 // If def for this use can't be rematerialized, then try folding.
1555 // If def is rematerializable and it's a load, also try folding.
1556 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1558 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1564 // Scan all of the operands of this instruction rewriting operands
1565 // to use NewVReg instead of li.reg as appropriate. We do this for
1568 // 1. If the instr reads the same spilled vreg multiple times, we
1569 // want to reuse the NewVReg.
1570 // 2. If the instr is a two-addr instruction, we are required to
1571 // keep the src/dst regs pinned.
1573 // Keep track of whether we replace a use and/or def so that we can
1574 // create the spill interval with the appropriate range.
1576 HasUse = mop.isUse();
1577 HasDef = mop.isDef();
1578 SmallVector<unsigned, 2> Ops;
1580 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1581 const MachineOperand &MOj = MI->getOperand(j);
1584 unsigned RegJ = MOj.getReg();
1585 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1589 if (!MOj.isUndef()) {
1590 HasUse |= MOj.isUse();
1591 HasDef |= MOj.isDef();
1596 // Create a new virtual register for the spill interval.
1597 // Create the new register now so we can map the fold instruction
1598 // to the new register so when it is unfolded we get the correct
1600 bool CreatedNewVReg = false;
1602 NewVReg = mri_->createVirtualRegister(rc);
1604 CreatedNewVReg = true;
1610 // Do not fold load / store here if we are splitting. We'll find an
1611 // optimal point to insert a load / store later.
1613 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1614 Ops, FoldSS, FoldSlot, NewVReg)) {
1615 // Folding the load/store can completely change the instruction in
1616 // unpredictable ways, rescan it from the beginning.
1619 // We need to give the new vreg the same stack slot as the
1620 // spilled interval.
1621 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1627 if (isNotInMIMap(MI))
1629 goto RestartInstruction;
1632 // We'll try to fold it later if it's profitable.
1633 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1637 mop.setReg(NewVReg);
1638 if (mop.isImplicit())
1639 rewriteImplicitOps(li, MI, NewVReg, vrm);
1641 // Reuse NewVReg for other reads.
1642 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1643 MachineOperand &mopj = MI->getOperand(Ops[j]);
1644 mopj.setReg(NewVReg);
1645 if (mopj.isImplicit())
1646 rewriteImplicitOps(li, MI, NewVReg, vrm);
1649 if (CreatedNewVReg) {
1651 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1652 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1653 // Each valnum may have its own remat id.
1654 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1656 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1658 if (!CanDelete || (HasUse && HasDef)) {
1659 // If this is a two-addr instruction then its use operands are
1660 // rematerializable but its def is not. It should be assigned a
1662 vrm.assignVirt2StackSlot(NewVReg, Slot);
1665 vrm.assignVirt2StackSlot(NewVReg, Slot);
1667 } else if (HasUse && HasDef &&
1668 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1669 // If this interval hasn't been assigned a stack slot (because earlier
1670 // def is a deleted remat def), do it now.
1671 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1672 vrm.assignVirt2StackSlot(NewVReg, Slot);
1675 // Re-matting an instruction with virtual register use. Add the
1676 // register as an implicit use on the use MI.
1677 if (DefIsReMat && ImpUse)
1678 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1680 // Create a new register interval for this spill / remat.
1681 LiveInterval &nI = getOrCreateInterval(NewVReg);
1682 if (CreatedNewVReg) {
1683 NewLIs.push_back(&nI);
1684 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1686 vrm.setIsSplitFromReg(NewVReg, li.reg);
1690 if (CreatedNewVReg) {
1691 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1692 nI.getNextValue(0, 0, false, VNInfoAllocator));
1693 DEBUG(errs() << " +" << LR);
1696 // Extend the split live interval to this def / use.
1697 unsigned End = getUseIndex(index)+1;
1698 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1699 nI.getValNumInfo(nI.getNumValNums()-1));
1700 DEBUG(errs() << " +" << LR);
1705 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1706 nI.getNextValue(0, 0, false, VNInfoAllocator));
1707 DEBUG(errs() << " +" << LR);
1712 errs() << "\t\t\t\tAdded new interval: ";
1713 nI.print(errs(), tri_);
1719 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1721 MachineBasicBlock *MBB, unsigned Idx) const {
1722 unsigned End = getMBBEndIdx(MBB);
1723 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1724 if (VNI->kills[j].isPHIKill)
1727 unsigned KillIdx = VNI->kills[j].killIdx;
1728 if (KillIdx > Idx && KillIdx < End)
1734 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1735 /// during spilling.
1737 struct RewriteInfo {
1742 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1743 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1746 struct RewriteInfoCompare {
1747 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1748 return LHS.Index < RHS.Index;
1753 void LiveIntervals::
1754 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1755 LiveInterval::Ranges::const_iterator &I,
1756 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1757 unsigned Slot, int LdSlot,
1758 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1760 const TargetRegisterClass* rc,
1761 SmallVector<int, 4> &ReMatIds,
1762 const MachineLoopInfo *loopInfo,
1763 BitVector &SpillMBBs,
1764 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1765 BitVector &RestoreMBBs,
1766 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1767 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1768 std::vector<LiveInterval*> &NewLIs) {
1769 bool AllCanFold = true;
1770 unsigned NewVReg = 0;
1771 unsigned start = getBaseIndex(I->start);
1772 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1774 // First collect all the def / use in this live range that will be rewritten.
1775 // Make sure they are sorted according to instruction index.
1776 std::vector<RewriteInfo> RewriteMIs;
1777 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1778 re = mri_->reg_end(); ri != re; ) {
1779 MachineInstr *MI = &*ri;
1780 MachineOperand &O = ri.getOperand();
1782 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1783 unsigned index = getInstructionIndex(MI);
1784 if (index < start || index >= end)
1788 // Must be defined by an implicit def. It should not be spilled. Note,
1789 // this is for correctness reason. e.g.
1790 // 8 %reg1024<def> = IMPLICIT_DEF
1791 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1792 // The live range [12, 14) are not part of the r1024 live interval since
1793 // it's defined by an implicit def. It will not conflicts with live
1794 // interval of r1025. Now suppose both registers are spilled, you can
1795 // easily see a situation where both registers are reloaded before
1796 // the INSERT_SUBREG and both target registers that would overlap.
1798 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1800 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1802 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1803 // Now rewrite the defs and uses.
1804 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1805 RewriteInfo &rwi = RewriteMIs[i];
1807 unsigned index = rwi.Index;
1808 bool MIHasUse = rwi.HasUse;
1809 bool MIHasDef = rwi.HasDef;
1810 MachineInstr *MI = rwi.MI;
1811 // If MI def and/or use the same register multiple times, then there
1812 // are multiple entries.
1813 unsigned NumUses = MIHasUse;
1814 while (i != e && RewriteMIs[i].MI == MI) {
1815 assert(RewriteMIs[i].Index == index);
1816 bool isUse = RewriteMIs[i].HasUse;
1817 if (isUse) ++NumUses;
1819 MIHasDef |= RewriteMIs[i].HasDef;
1822 MachineBasicBlock *MBB = MI->getParent();
1824 if (ImpUse && MI != ReMatDefMI) {
1825 // Re-matting an instruction with virtual register use. Update the
1826 // register interval's spill weight to HUGE_VALF to prevent it from
1828 LiveInterval &ImpLi = getInterval(ImpUse);
1829 ImpLi.weight = HUGE_VALF;
1832 unsigned MBBId = MBB->getNumber();
1833 unsigned ThisVReg = 0;
1835 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1836 if (NVI != MBBVRegsMap.end()) {
1837 ThisVReg = NVI->second;
1844 // It's better to start a new interval to avoid artifically
1845 // extend the new interval.
1846 if (MIHasDef && !MIHasUse) {
1847 MBBVRegsMap.erase(MBB->getNumber());
1853 bool IsNew = ThisVReg == 0;
1855 // This ends the previous live interval. If all of its def / use
1856 // can be folded, give it a low spill weight.
1857 if (NewVReg && TrySplit && AllCanFold) {
1858 LiveInterval &nI = getOrCreateInterval(NewVReg);
1865 bool HasDef = false;
1866 bool HasUse = false;
1867 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1868 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1869 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1870 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1871 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1872 if (!HasDef && !HasUse)
1875 AllCanFold &= CanFold;
1877 // Update weight of spill interval.
1878 LiveInterval &nI = getOrCreateInterval(NewVReg);
1880 // The spill weight is now infinity as it cannot be spilled again.
1881 nI.weight = HUGE_VALF;
1885 // Keep track of the last def and first use in each MBB.
1887 if (MI != ReMatOrigDefMI || !CanDelete) {
1888 bool HasKill = false;
1890 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1892 // If this is a two-address code, then this index starts a new VNInfo.
1893 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1895 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1897 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1898 SpillIdxes.find(MBBId);
1900 if (SII == SpillIdxes.end()) {
1901 std::vector<SRInfo> S;
1902 S.push_back(SRInfo(index, NewVReg, true));
1903 SpillIdxes.insert(std::make_pair(MBBId, S));
1904 } else if (SII->second.back().vreg != NewVReg) {
1905 SII->second.push_back(SRInfo(index, NewVReg, true));
1906 } else if ((int)index > SII->second.back().index) {
1907 // If there is an earlier def and this is a two-address
1908 // instruction, then it's not possible to fold the store (which
1909 // would also fold the load).
1910 SRInfo &Info = SII->second.back();
1912 Info.canFold = !HasUse;
1914 SpillMBBs.set(MBBId);
1915 } else if (SII != SpillIdxes.end() &&
1916 SII->second.back().vreg == NewVReg &&
1917 (int)index > SII->second.back().index) {
1918 // There is an earlier def that's not killed (must be two-address).
1919 // The spill is no longer needed.
1920 SII->second.pop_back();
1921 if (SII->second.empty()) {
1922 SpillIdxes.erase(MBBId);
1923 SpillMBBs.reset(MBBId);
1930 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1931 SpillIdxes.find(MBBId);
1932 if (SII != SpillIdxes.end() &&
1933 SII->second.back().vreg == NewVReg &&
1934 (int)index > SII->second.back().index)
1935 // Use(s) following the last def, it's not safe to fold the spill.
1936 SII->second.back().canFold = false;
1937 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1938 RestoreIdxes.find(MBBId);
1939 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1940 // If we are splitting live intervals, only fold if it's the first
1941 // use and there isn't another use later in the MBB.
1942 RII->second.back().canFold = false;
1944 // Only need a reload if there isn't an earlier def / use.
1945 if (RII == RestoreIdxes.end()) {
1946 std::vector<SRInfo> Infos;
1947 Infos.push_back(SRInfo(index, NewVReg, true));
1948 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1950 RII->second.push_back(SRInfo(index, NewVReg, true));
1952 RestoreMBBs.set(MBBId);
1956 // Update spill weight.
1957 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1958 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1961 if (NewVReg && TrySplit && AllCanFold) {
1962 // If all of its def / use can be folded, give it a low spill weight.
1963 LiveInterval &nI = getOrCreateInterval(NewVReg);
1968 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1969 BitVector &RestoreMBBs,
1970 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1971 if (!RestoreMBBs[Id])
1973 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1974 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1975 if (Restores[i].index == index &&
1976 Restores[i].vreg == vr &&
1977 Restores[i].canFold)
1982 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1983 BitVector &RestoreMBBs,
1984 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1985 if (!RestoreMBBs[Id])
1987 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1988 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1989 if (Restores[i].index == index && Restores[i].vreg)
1990 Restores[i].index = -1;
1993 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1994 /// spilled and create empty intervals for their uses.
1996 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1997 const TargetRegisterClass* rc,
1998 std::vector<LiveInterval*> &NewLIs) {
1999 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2000 re = mri_->reg_end(); ri != re; ) {
2001 MachineOperand &O = ri.getOperand();
2002 MachineInstr *MI = &*ri;
2005 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2006 "Register def was not rewritten?");
2007 RemoveMachineInstrFromMaps(MI);
2008 vrm.RemoveMachineInstrFromMaps(MI);
2009 MI->eraseFromParent();
2011 // This must be an use of an implicit_def so it's not part of the live
2012 // interval. Create a new empty live interval for it.
2013 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2014 unsigned NewVReg = mri_->createVirtualRegister(rc);
2016 vrm.setIsImplicitlyDefined(NewVReg);
2017 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2018 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2019 MachineOperand &MO = MI->getOperand(i);
2020 if (MO.isReg() && MO.getReg() == li.reg) {
2029 std::vector<LiveInterval*> LiveIntervals::
2030 addIntervalsForSpillsFast(const LiveInterval &li,
2031 const MachineLoopInfo *loopInfo,
2033 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2035 std::vector<LiveInterval*> added;
2037 assert(li.weight != HUGE_VALF &&
2038 "attempt to spill already spilled interval!");
2041 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2046 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2048 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2049 while (RI != mri_->reg_end()) {
2050 MachineInstr* MI = &*RI;
2052 SmallVector<unsigned, 2> Indices;
2053 bool HasUse = false;
2054 bool HasDef = false;
2056 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2057 MachineOperand& mop = MI->getOperand(i);
2058 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2060 HasUse |= MI->getOperand(i).isUse();
2061 HasDef |= MI->getOperand(i).isDef();
2063 Indices.push_back(i);
2066 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2067 Indices, true, slot, li.reg)) {
2068 unsigned NewVReg = mri_->createVirtualRegister(rc);
2070 vrm.assignVirt2StackSlot(NewVReg, slot);
2072 // create a new register for this spill
2073 LiveInterval &nI = getOrCreateInterval(NewVReg);
2075 // the spill weight is now infinity as it
2076 // cannot be spilled again
2077 nI.weight = HUGE_VALF;
2079 // Rewrite register operands to use the new vreg.
2080 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2081 E = Indices.end(); I != E; ++I) {
2082 MI->getOperand(*I).setReg(NewVReg);
2084 if (MI->getOperand(*I).isUse())
2085 MI->getOperand(*I).setIsKill(true);
2088 // Fill in the new live interval.
2089 unsigned index = getInstructionIndex(MI);
2091 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2092 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2093 DEBUG(errs() << " +" << LR);
2095 vrm.addRestorePoint(NewVReg, MI);
2098 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2099 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2100 DEBUG(errs() << " +" << LR);
2102 vrm.addSpillPoint(NewVReg, true, MI);
2105 added.push_back(&nI);
2108 errs() << "\t\t\t\tadded new interval: ";
2115 RI = mri_->reg_begin(li.reg);
2121 std::vector<LiveInterval*> LiveIntervals::
2122 addIntervalsForSpills(const LiveInterval &li,
2123 SmallVectorImpl<LiveInterval*> &SpillIs,
2124 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2126 if (EnableFastSpilling)
2127 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2129 assert(li.weight != HUGE_VALF &&
2130 "attempt to spill already spilled interval!");
2133 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2134 li.print(errs(), tri_);
2138 // Each bit specify whether a spill is required in the MBB.
2139 BitVector SpillMBBs(mf_->getNumBlockIDs());
2140 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2141 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2142 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2143 DenseMap<unsigned,unsigned> MBBVRegsMap;
2144 std::vector<LiveInterval*> NewLIs;
2145 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2147 unsigned NumValNums = li.getNumValNums();
2148 SmallVector<MachineInstr*, 4> ReMatDefs;
2149 ReMatDefs.resize(NumValNums, NULL);
2150 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2151 ReMatOrigDefs.resize(NumValNums, NULL);
2152 SmallVector<int, 4> ReMatIds;
2153 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2154 BitVector ReMatDelete(NumValNums);
2155 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2157 // Spilling a split live interval. It cannot be split any further. Also,
2158 // it's also guaranteed to be a single val# / range interval.
2159 if (vrm.getPreSplitReg(li.reg)) {
2160 vrm.setIsSplitFromReg(li.reg, 0);
2161 // Unset the split kill marker on the last use.
2162 unsigned KillIdx = vrm.getKillPoint(li.reg);
2164 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2165 assert(KillMI && "Last use disappeared?");
2166 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2167 assert(KillOp != -1 && "Last use disappeared?");
2168 KillMI->getOperand(KillOp).setIsKill(false);
2170 vrm.removeKillPoint(li.reg);
2171 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2172 Slot = vrm.getStackSlot(li.reg);
2173 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2174 MachineInstr *ReMatDefMI = DefIsReMat ?
2175 vrm.getReMaterializedMI(li.reg) : NULL;
2177 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2178 bool isLoad = isLoadSS ||
2179 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2180 bool IsFirstRange = true;
2181 for (LiveInterval::Ranges::const_iterator
2182 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2183 // If this is a split live interval with multiple ranges, it means there
2184 // are two-address instructions that re-defined the value. Only the
2185 // first def can be rematerialized!
2187 // Note ReMatOrigDefMI has already been deleted.
2188 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2189 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2190 false, vrm, rc, ReMatIds, loopInfo,
2191 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2192 MBBVRegsMap, NewLIs);
2194 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2195 Slot, 0, false, false, false,
2196 false, vrm, rc, ReMatIds, loopInfo,
2197 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2198 MBBVRegsMap, NewLIs);
2200 IsFirstRange = false;
2203 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2207 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2208 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2212 bool NeedStackSlot = false;
2213 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2215 const VNInfo *VNI = *i;
2216 unsigned VN = VNI->id;
2217 if (VNI->isUnused())
2218 continue; // Dead val#.
2219 // Is the def for the val# rematerializable?
2220 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2221 ? getInstructionFromIndex(VNI->def) : 0;
2223 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2224 // Remember how to remat the def of this val#.
2225 ReMatOrigDefs[VN] = ReMatDefMI;
2226 // Original def may be modified so we have to make a copy here.
2227 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2228 ClonedMIs.push_back(Clone);
2229 ReMatDefs[VN] = Clone;
2231 bool CanDelete = true;
2232 if (VNI->hasPHIKill()) {
2233 // A kill is a phi node, not all of its uses can be rematerialized.
2234 // It must not be deleted.
2236 // Need a stack slot if there is any live range where uses cannot be
2238 NeedStackSlot = true;
2241 ReMatDelete.set(VN);
2243 // Need a stack slot if there is any live range where uses cannot be
2245 NeedStackSlot = true;
2249 // One stack slot per live interval.
2250 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2251 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2252 Slot = vrm.assignVirt2StackSlot(li.reg);
2254 // This case only occurs when the prealloc splitter has already assigned
2255 // a stack slot to this vreg.
2257 Slot = vrm.getStackSlot(li.reg);
2260 // Create new intervals and rewrite defs and uses.
2261 for (LiveInterval::Ranges::const_iterator
2262 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2263 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2264 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2265 bool DefIsReMat = ReMatDefMI != NULL;
2266 bool CanDelete = ReMatDelete[I->valno->id];
2268 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2269 bool isLoad = isLoadSS ||
2270 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2271 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2272 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2273 CanDelete, vrm, rc, ReMatIds, loopInfo,
2274 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2275 MBBVRegsMap, NewLIs);
2278 // Insert spills / restores if we are splitting.
2280 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2284 SmallPtrSet<LiveInterval*, 4> AddedKill;
2285 SmallVector<unsigned, 2> Ops;
2286 if (NeedStackSlot) {
2287 int Id = SpillMBBs.find_first();
2289 std::vector<SRInfo> &spills = SpillIdxes[Id];
2290 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2291 int index = spills[i].index;
2292 unsigned VReg = spills[i].vreg;
2293 LiveInterval &nI = getOrCreateInterval(VReg);
2294 bool isReMat = vrm.isReMaterialized(VReg);
2295 MachineInstr *MI = getInstructionFromIndex(index);
2296 bool CanFold = false;
2297 bool FoundUse = false;
2299 if (spills[i].canFold) {
2301 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2302 MachineOperand &MO = MI->getOperand(j);
2303 if (!MO.isReg() || MO.getReg() != VReg)
2310 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2311 RestoreMBBs, RestoreIdxes))) {
2312 // MI has two-address uses of the same register. If the use
2313 // isn't the first and only use in the BB, then we can't fold
2314 // it. FIXME: Move this to rewriteInstructionsForSpills.
2321 // Fold the store into the def if possible.
2322 bool Folded = false;
2323 if (CanFold && !Ops.empty()) {
2324 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2327 // Also folded uses, do not issue a load.
2328 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2329 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2331 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2335 // Otherwise tell the spiller to issue a spill.
2337 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2338 bool isKill = LR->end == getStoreIndex(index);
2339 if (!MI->registerDefIsDead(nI.reg))
2340 // No need to spill a dead def.
2341 vrm.addSpillPoint(VReg, isKill, MI);
2343 AddedKill.insert(&nI);
2346 Id = SpillMBBs.find_next(Id);
2350 int Id = RestoreMBBs.find_first();
2352 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2353 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2354 int index = restores[i].index;
2357 unsigned VReg = restores[i].vreg;
2358 LiveInterval &nI = getOrCreateInterval(VReg);
2359 bool isReMat = vrm.isReMaterialized(VReg);
2360 MachineInstr *MI = getInstructionFromIndex(index);
2361 bool CanFold = false;
2363 if (restores[i].canFold) {
2365 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2366 MachineOperand &MO = MI->getOperand(j);
2367 if (!MO.isReg() || MO.getReg() != VReg)
2371 // If this restore were to be folded, it would have been folded
2380 // Fold the load into the use if possible.
2381 bool Folded = false;
2382 if (CanFold && !Ops.empty()) {
2384 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2386 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2388 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2389 // If the rematerializable def is a load, also try to fold it.
2390 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2391 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2392 Ops, isLoadSS, LdSlot, VReg);
2394 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2396 // Re-matting an instruction with virtual register use. Add the
2397 // register as an implicit use on the use MI and update the register
2398 // interval's spill weight to HUGE_VALF to prevent it from being
2400 LiveInterval &ImpLi = getInterval(ImpUse);
2401 ImpLi.weight = HUGE_VALF;
2402 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2407 // If folding is not possible / failed, then tell the spiller to issue a
2408 // load / rematerialization for us.
2410 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2412 vrm.addRestorePoint(VReg, MI);
2414 Id = RestoreMBBs.find_next(Id);
2417 // Finalize intervals: add kills, finalize spill weights, and filter out
2419 std::vector<LiveInterval*> RetNewLIs;
2420 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2421 LiveInterval *LI = NewLIs[i];
2423 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2424 if (!AddedKill.count(LI)) {
2425 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2426 unsigned LastUseIdx = getBaseIndex(LR->end);
2427 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2428 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2429 assert(UseIdx != -1);
2430 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2431 LastUse->getOperand(UseIdx).setIsKill();
2432 vrm.addKillPoint(LI->reg, LastUseIdx);
2435 RetNewLIs.push_back(LI);
2439 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2443 /// hasAllocatableSuperReg - Return true if the specified physical register has
2444 /// any super register that's allocatable.
2445 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2446 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2447 if (allocatableRegs_[*AS] && hasInterval(*AS))
2452 /// getRepresentativeReg - Find the largest super register of the specified
2453 /// physical register.
2454 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2455 // Find the largest super-register that is allocatable.
2456 unsigned BestReg = Reg;
2457 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2458 unsigned SuperReg = *AS;
2459 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2467 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2468 /// specified interval that conflicts with the specified physical register.
2469 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2470 unsigned PhysReg) const {
2471 unsigned NumConflicts = 0;
2472 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2473 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2474 E = mri_->reg_end(); I != E; ++I) {
2475 MachineOperand &O = I.getOperand();
2476 MachineInstr *MI = O.getParent();
2477 unsigned Index = getInstructionIndex(MI);
2478 if (pli.liveAt(Index))
2481 return NumConflicts;
2484 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2485 /// around all defs and uses of the specified interval. Return true if it
2486 /// was able to cut its interval.
2487 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2488 unsigned PhysReg, VirtRegMap &vrm) {
2489 unsigned SpillReg = getRepresentativeReg(PhysReg);
2491 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2492 // If there are registers which alias PhysReg, but which are not a
2493 // sub-register of the chosen representative super register. Assert
2494 // since we can't handle it yet.
2495 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2496 tri_->isSuperRegister(*AS, SpillReg));
2499 LiveInterval &pli = getInterval(SpillReg);
2500 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2501 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2502 E = mri_->reg_end(); I != E; ++I) {
2503 MachineOperand &O = I.getOperand();
2504 MachineInstr *MI = O.getParent();
2505 if (SeenMIs.count(MI))
2508 unsigned Index = getInstructionIndex(MI);
2509 if (pli.liveAt(Index)) {
2510 vrm.addEmergencySpill(SpillReg, MI);
2511 unsigned StartIdx = getLoadIndex(Index);
2512 unsigned EndIdx = getStoreIndex(Index)+1;
2513 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2514 pli.removeRange(StartIdx, EndIdx);
2518 raw_string_ostream Msg(msg);
2519 Msg << "Ran out of registers during register allocation!";
2520 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2521 Msg << "\nPlease check your inline asm statement for invalid "
2522 << "constraints:\n";
2523 MI->print(Msg, tm_);
2525 llvm_report_error(Msg.str());
2527 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2528 if (!hasInterval(*AS))
2530 LiveInterval &spli = getInterval(*AS);
2531 if (spli.liveAt(Index))
2532 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2539 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2540 MachineInstr* startInst) {
2541 LiveInterval& Interval = getOrCreateInterval(reg);
2542 VNInfo* VN = Interval.getNextValue(
2543 getInstructionIndex(startInst) + InstrSlots::DEF,
2544 startInst, true, getVNInfoAllocator());
2545 VN->setHasPHIKill(true);
2546 VN->kills.push_back(
2547 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2548 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2549 getMBBEndIdx(startInst->getParent()) + 1, VN);
2550 Interval.addRange(LR);