1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 MachineFunctionPass::getAnalysisUsage(AU);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90 E = r2iMap_.end(); I != E; ++I)
98 terminatorGaps.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!ClonedMIs.empty()) {
103 MachineInstr *MI = ClonedMIs.back();
104 ClonedMIs.pop_back();
105 mf_->DeleteMachineInstr(MI);
109 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110 const TargetInstrInfo *tii_) {
111 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
116 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
117 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
118 MI->getOperand(2).getReg() == Reg)
120 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
121 MI->getOperand(1).getReg() == Reg)
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
140 MachineInstr *MI = &*I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 ImpDefMIs.push_back(MI);
149 bool ChangedToImpDef = false;
150 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
151 MachineOperand& MO = MI->getOperand(i);
152 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
154 unsigned Reg = MO.getReg();
157 if (!ImpDefRegs.count(Reg))
159 // Use is a copy, just turn it into an implicit_def.
160 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
161 bool isKill = MO.isKill();
162 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
163 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
164 MI->RemoveOperand(j);
166 ImpDefRegs.erase(Reg);
167 ChangedToImpDef = true;
172 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
173 // Make sure other uses of
174 for (unsigned j = i+1; j != e; ++j) {
175 MachineOperand &MOJ = MI->getOperand(j);
176 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
179 ImpDefRegs.erase(Reg);
183 if (ChangedToImpDef) {
184 // Backtrack to process this new implicit_def.
187 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
188 MachineOperand& MO = MI->getOperand(i);
189 if (!MO.isReg() || !MO.isDef())
191 ImpDefRegs.erase(MO.getReg());
196 // Any outstanding liveout implicit_def's?
197 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
198 MachineInstr *MI = ImpDefMIs[i];
199 unsigned Reg = MI->getOperand(0).getReg();
200 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
201 !ImpDefRegs.count(Reg)) {
202 // Delete all "local" implicit_def's. That include those which define
203 // physical registers since they cannot be liveout.
204 MI->eraseFromParent();
208 // If there are multiple defs of the same register and at least one
209 // is not an implicit_def, do not insert implicit_def's before the
212 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
213 DE = mri_->def_end(); DI != DE; ++DI) {
214 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
222 // The only implicit_def which we want to keep are those that are live
224 MI->eraseFromParent();
226 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
227 UE = mri_->use_end(); UI != UE; ) {
228 MachineOperand &RMO = UI.getOperand();
229 MachineInstr *RMI = &*UI;
231 MachineBasicBlock *RMBB = RMI->getParent();
235 // Turn a copy use into an implicit_def.
236 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
237 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
239 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
240 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
241 RMI->RemoveOperand(j);
245 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
246 unsigned NewVReg = mri_->createVirtualRegister(RC);
257 void LiveIntervals::computeNumbering() {
258 Index2MiMap OldI2MI = i2miMap_;
259 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
265 terminatorGaps.clear();
269 // Number MachineInstrs and MachineBasicBlocks.
270 // Initialize MBB indexes to a sentinal.
271 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
273 unsigned MIIndex = 0;
274 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
276 unsigned StartIdx = MIIndex;
278 // Insert an empty slot at the beginning of each block.
279 MIIndex += InstrSlots::NUM;
280 i2miMap_.push_back(0);
282 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
285 if (I == MBB->getFirstTerminator()) {
286 // Leave a gap for before terminators, this is where we will point
289 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
291 "Multiple 'first' terminators encountered during numbering.");
292 inserted = inserted; // Avoid compiler warning if assertions turned off.
293 i2miMap_.push_back(0);
295 MIIndex += InstrSlots::NUM;
298 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
299 assert(inserted && "multiple MachineInstr -> index mappings");
301 i2miMap_.push_back(I);
302 MIIndex += InstrSlots::NUM;
305 // Insert max(1, numdefs) empty slots after every instruction.
306 unsigned Slots = I->getDesc().getNumDefs();
309 MIIndex += InstrSlots::NUM * Slots;
311 i2miMap_.push_back(0);
314 if (MBB->getFirstTerminator() == MBB->end()) {
315 // Leave a gap for before terminators, this is where we will point
318 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
320 "Multiple 'first' terminators encountered during numbering.");
321 inserted = inserted; // Avoid compiler warning if assertions turned off.
322 i2miMap_.push_back(0);
324 MIIndex += InstrSlots::NUM;
327 // Set the MBB2IdxMap entry for this MBB.
328 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
329 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
332 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
334 if (!OldI2MI.empty())
335 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
336 for (LiveInterval::iterator LI = OI->second->begin(),
337 LE = OI->second->end(); LI != LE; ++LI) {
339 // Remap the start index of the live range to the corresponding new
340 // number, or our best guess at what it _should_ correspond to if the
341 // original instruction has been erased. This is either the following
342 // instruction or its predecessor.
343 unsigned index = LI->start / InstrSlots::NUM;
344 unsigned offset = LI->start % InstrSlots::NUM;
345 if (offset == InstrSlots::LOAD) {
346 std::vector<IdxMBBPair>::const_iterator I =
347 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
348 // Take the pair containing the index
349 std::vector<IdxMBBPair>::const_iterator J =
350 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
352 LI->start = getMBBStartIdx(J->second);
354 LI->start = mi2iMap_[OldI2MI[index]] + offset;
357 // Remap the ending index in the same way that we remapped the start,
358 // except for the final step where we always map to the immediately
359 // following instruction.
360 index = (LI->end - 1) / InstrSlots::NUM;
361 offset = LI->end % InstrSlots::NUM;
362 if (offset == InstrSlots::LOAD) {
363 // VReg dies at end of block.
364 std::vector<IdxMBBPair>::const_iterator I =
365 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
368 LI->end = getMBBEndIdx(I->second) + 1;
370 unsigned idx = index;
371 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
373 if (index != OldI2MI.size())
374 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
376 LI->end = InstrSlots::NUM * i2miMap_.size();
380 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
381 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
384 // Remap the VNInfo def index, which works the same as the
385 // start indices above. VN's with special sentinel defs
386 // don't need to be remapped.
387 if (vni->isDefAccurate() && !vni->isUnused()) {
388 unsigned index = vni->def / InstrSlots::NUM;
389 unsigned offset = vni->def % InstrSlots::NUM;
390 if (offset == InstrSlots::LOAD) {
391 std::vector<IdxMBBPair>::const_iterator I =
392 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
393 // Take the pair containing the index
394 std::vector<IdxMBBPair>::const_iterator J =
395 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
397 vni->def = getMBBStartIdx(J->second);
399 vni->def = mi2iMap_[OldI2MI[index]] + offset;
403 // Remap the VNInfo kill indices, which works the same as
404 // the end indices above.
405 for (size_t i = 0; i < vni->kills.size(); ++i) {
406 unsigned killIdx = vni->kills[i].killIdx;
408 unsigned index = (killIdx - 1) / InstrSlots::NUM;
409 unsigned offset = killIdx % InstrSlots::NUM;
411 if (offset == InstrSlots::LOAD) {
412 assert("Value killed at a load slot.");
413 /*std::vector<IdxMBBPair>::const_iterator I =
414 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
417 vni->kills[i] = getMBBEndIdx(I->second);*/
419 if (vni->kills[i].isPHIKill) {
420 std::vector<IdxMBBPair>::const_iterator I =
421 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
423 vni->kills[i].killIdx = terminatorGaps[I->second];
425 assert(OldI2MI[index] != 0 &&
426 "Kill refers to instruction not present in index maps.");
427 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
431 unsigned idx = index;
432 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
434 if (index != OldI2MI.size())
435 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
436 (idx == index ? offset : 0);
438 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
446 void LiveIntervals::scaleNumbering(int factor) {
448 // * scale MBB begin and end points
449 // * scale all ranges.
450 // * Update VNI structures.
451 // * Scale instruction numberings
453 // Scale the MBB indices.
455 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
456 MBB != MBBE; ++MBB) {
457 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
458 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
459 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
460 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
462 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
464 // Scale terminator gaps.
465 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
466 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
468 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
471 // Scale the intervals.
472 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
473 LI->second->scaleNumbering(factor);
476 // Scale MachineInstrs.
477 Mi2IndexMap oldmi2iMap = mi2iMap_;
478 unsigned highestSlot = 0;
479 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
481 unsigned newSlot = InstrSlots::scale(MI->second, factor);
482 mi2iMap_[MI->first] = newSlot;
483 highestSlot = std::max(highestSlot, newSlot);
487 i2miMap_.resize(highestSlot + 1);
488 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
490 i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
496 /// runOnMachineFunction - Register allocate the whole function
498 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
500 mri_ = &mf_->getRegInfo();
501 tm_ = &fn.getTarget();
502 tri_ = tm_->getRegisterInfo();
503 tii_ = tm_->getInstrInfo();
504 aa_ = &getAnalysis<AliasAnalysis>();
505 lv_ = &getAnalysis<LiveVariables>();
506 allocatableRegs_ = tri_->getAllocatableSet(fn);
508 processImplicitDefs();
512 numIntervals += getNumIntervals();
518 /// print - Implement the dump method.
519 void LiveIntervals::print(std::ostream &O, const Module* ) const {
520 O << "********** INTERVALS **********\n";
521 for (const_iterator I = begin(), E = end(); I != E; ++I) {
522 I->second->print(O, tri_);
526 O << "********** MACHINEINSTRS **********\n";
527 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
528 mbbi != mbbe; ++mbbi) {
529 O << ((Value*)mbbi->getBasicBlock())->getNameStr() << ":\n";
530 for (MachineBasicBlock::iterator mii = mbbi->begin(),
531 mie = mbbi->end(); mii != mie; ++mii) {
532 O << getInstructionIndex(mii) << '\t' << *mii;
537 /// conflictsWithPhysRegDef - Returns true if the specified register
538 /// is defined during the duration of the specified interval.
539 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
540 VirtRegMap &vrm, unsigned reg) {
541 for (LiveInterval::Ranges::const_iterator
542 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
543 for (unsigned index = getBaseIndex(I->start),
544 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
545 index += InstrSlots::NUM) {
546 // skip deleted instructions
547 while (index != end && !getInstructionFromIndex(index))
548 index += InstrSlots::NUM;
549 if (index == end) break;
551 MachineInstr *MI = getInstructionFromIndex(index);
552 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
553 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
554 if (SrcReg == li.reg || DstReg == li.reg)
556 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
557 MachineOperand& mop = MI->getOperand(i);
560 unsigned PhysReg = mop.getReg();
561 if (PhysReg == 0 || PhysReg == li.reg)
563 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
564 if (!vrm.hasPhys(PhysReg))
566 PhysReg = vrm.getPhys(PhysReg);
568 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
577 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
578 /// it can check use as well.
579 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
580 unsigned Reg, bool CheckUse,
581 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
582 for (LiveInterval::Ranges::const_iterator
583 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
584 for (unsigned index = getBaseIndex(I->start),
585 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
586 index += InstrSlots::NUM) {
587 // Skip deleted instructions.
588 MachineInstr *MI = 0;
589 while (index != end) {
590 MI = getInstructionFromIndex(index);
593 index += InstrSlots::NUM;
595 if (index == end) break;
597 if (JoinedCopies.count(MI))
599 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
600 MachineOperand& MO = MI->getOperand(i);
603 if (MO.isUse() && !CheckUse)
605 unsigned PhysReg = MO.getReg();
606 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
608 if (tri_->isSubRegister(Reg, PhysReg))
618 void LiveIntervals::printRegName(unsigned reg) const {
619 if (TargetRegisterInfo::isPhysicalRegister(reg))
620 errs() << tri_->getName(reg);
622 errs() << "%reg" << reg;
625 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
626 MachineBasicBlock::iterator mi,
627 unsigned MIIdx, MachineOperand& MO,
629 LiveInterval &interval) {
631 errs() << "\t\tregister: ";
632 printRegName(interval.reg);
635 // Virtual registers may be defined multiple times (due to phi
636 // elimination and 2-addr elimination). Much of what we do only has to be
637 // done once for the vreg. We use an empty interval to detect the first
638 // time we see a vreg.
639 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
640 if (interval.empty()) {
641 // Get the Idx of the defining instructions.
642 unsigned defIndex = getDefIndex(MIIdx);
643 // Earlyclobbers move back one.
644 if (MO.isEarlyClobber())
645 defIndex = getUseIndex(MIIdx);
647 MachineInstr *CopyMI = NULL;
648 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
649 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
650 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
651 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
652 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
654 // Earlyclobbers move back one.
655 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
657 assert(ValNo->id == 0 && "First value in interval is not 0?");
659 // Loop over all of the blocks that the vreg is defined in. There are
660 // two cases we have to handle here. The most common case is a vreg
661 // whose lifetime is contained within a basic block. In this case there
662 // will be a single kill, in MBB, which comes after the definition.
663 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
664 // FIXME: what about dead vars?
666 if (vi.Kills[0] != mi)
667 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
669 killIdx = defIndex+1;
671 // If the kill happens after the definition, we have an intra-block
673 if (killIdx > defIndex) {
674 assert(vi.AliveBlocks.empty() &&
675 "Shouldn't be alive across any blocks!");
676 LiveRange LR(defIndex, killIdx, ValNo);
677 interval.addRange(LR);
678 DEBUG(errs() << " +" << LR << "\n");
679 interval.addKill(ValNo, killIdx, false);
684 // The other case we handle is when a virtual register lives to the end
685 // of the defining block, potentially live across some blocks, then is
686 // live into some number of blocks, but gets killed. Start by adding a
687 // range that goes from this definition to the end of the defining block.
688 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
689 DEBUG(errs() << " +" << NewLR);
690 interval.addRange(NewLR);
692 // Iterate over all of the blocks that the variable is completely
693 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
695 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
696 E = vi.AliveBlocks.end(); I != E; ++I) {
697 LiveRange LR(getMBBStartIdx(*I),
698 getMBBEndIdx(*I)+1, // MBB ends at -1.
700 interval.addRange(LR);
701 DEBUG(errs() << " +" << LR);
704 // Finally, this virtual register is live from the start of any killing
705 // block to the 'use' slot of the killing instruction.
706 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
707 MachineInstr *Kill = vi.Kills[i];
708 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
709 LiveRange LR(getMBBStartIdx(Kill->getParent()),
711 interval.addRange(LR);
712 interval.addKill(ValNo, killIdx, false);
713 DEBUG(errs() << " +" << LR);
717 // If this is the second time we see a virtual register definition, it
718 // must be due to phi elimination or two addr elimination. If this is
719 // the result of two address elimination, then the vreg is one of the
720 // def-and-use register operand.
721 if (mi->isRegTiedToUseOperand(MOIdx)) {
722 // If this is a two-address definition, then we have already processed
723 // the live range. The only problem is that we didn't realize there
724 // are actually two values in the live interval. Because of this we
725 // need to take the LiveRegion that defines this register and split it
727 assert(interval.containsOneValue());
728 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
729 unsigned RedefIndex = getDefIndex(MIIdx);
730 if (MO.isEarlyClobber())
731 RedefIndex = getUseIndex(MIIdx);
733 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
734 VNInfo *OldValNo = OldLR->valno;
736 // Delete the initial value, which should be short and continuous,
737 // because the 2-addr copy must be in the same MBB as the redef.
738 interval.removeRange(DefIndex, RedefIndex);
740 // Two-address vregs should always only be redefined once. This means
741 // that at this point, there should be exactly one value number in it.
742 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
744 // The new value number (#1) is defined by the instruction we claimed
746 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
747 false, // update at *
749 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
751 // Value#0 is now defined by the 2-addr instruction.
752 OldValNo->def = RedefIndex;
753 OldValNo->setCopy(0);
754 if (MO.isEarlyClobber())
755 OldValNo->setHasRedefByEC(true);
757 // Add the new live interval which replaces the range for the input copy.
758 LiveRange LR(DefIndex, RedefIndex, ValNo);
759 DEBUG(errs() << " replace range with " << LR);
760 interval.addRange(LR);
761 interval.addKill(ValNo, RedefIndex, false);
763 // If this redefinition is dead, we need to add a dummy unit live
764 // range covering the def slot.
766 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
769 errs() << " RESULT: ";
770 interval.print(errs(), tri_);
773 // Otherwise, this must be because of phi elimination. If this is the
774 // first redefinition of the vreg that we have seen, go back and change
775 // the live range in the PHI block to be a different value number.
776 if (interval.containsOneValue()) {
777 assert(vi.Kills.size() == 1 &&
778 "PHI elimination vreg should have one kill, the PHI itself!");
780 // Remove the old range that we now know has an incorrect number.
781 VNInfo *VNI = interval.getValNumInfo(0);
782 MachineInstr *Killer = vi.Kills[0];
783 unsigned Start = getMBBStartIdx(Killer->getParent());
784 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
786 errs() << " Removing [" << Start << "," << End << "] from: ";
787 interval.print(errs(), tri_);
790 interval.removeRange(Start, End);
791 assert(interval.ranges.size() == 1 &&
792 "newly discovered PHI interval has >1 ranges.");
793 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
794 interval.addKill(VNI, terminatorGaps[killMBB], true);
795 VNI->setHasPHIKill(true);
797 errs() << " RESULT: ";
798 interval.print(errs(), tri_);
801 // Replace the interval with one of a NEW value number. Note that this
802 // value number isn't actually defined by an instruction, weird huh? :)
803 LiveRange LR(Start, End,
804 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
805 LR.valno->setIsPHIDef(true);
806 DEBUG(errs() << " replace range with " << LR);
807 interval.addRange(LR);
808 interval.addKill(LR.valno, End, false);
810 errs() << " RESULT: ";
811 interval.print(errs(), tri_);
815 // In the case of PHI elimination, each variable definition is only
816 // live until the end of the block. We've already taken care of the
817 // rest of the live range.
818 unsigned defIndex = getDefIndex(MIIdx);
819 if (MO.isEarlyClobber())
820 defIndex = getUseIndex(MIIdx);
823 MachineInstr *CopyMI = NULL;
824 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
825 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
826 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
827 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
828 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
830 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
832 unsigned killIndex = getMBBEndIdx(mbb) + 1;
833 LiveRange LR(defIndex, killIndex, ValNo);
834 interval.addRange(LR);
835 interval.addKill(ValNo, terminatorGaps[mbb], true);
836 ValNo->setHasPHIKill(true);
837 DEBUG(errs() << " +" << LR);
841 DEBUG(errs() << '\n');
844 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
845 MachineBasicBlock::iterator mi,
848 LiveInterval &interval,
849 MachineInstr *CopyMI) {
850 // A physical register cannot be live across basic block, so its
851 // lifetime must end somewhere in its defining basic block.
853 errs() << "\t\tregister: ";
854 printRegName(interval.reg);
857 unsigned baseIndex = MIIdx;
858 unsigned start = getDefIndex(baseIndex);
859 // Earlyclobbers move back one.
860 if (MO.isEarlyClobber())
861 start = getUseIndex(MIIdx);
862 unsigned end = start;
864 // If it is not used after definition, it is considered dead at
865 // the instruction defining it. Hence its interval is:
866 // [defSlot(def), defSlot(def)+1)
868 DEBUG(errs() << " dead");
873 // If it is not dead on definition, it must be killed by a
874 // subsequent instruction. Hence its interval is:
875 // [defSlot(def), useSlot(kill)+1)
876 baseIndex += InstrSlots::NUM;
877 while (++mi != MBB->end()) {
878 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
879 getInstructionFromIndex(baseIndex) == 0)
880 baseIndex += InstrSlots::NUM;
881 if (mi->killsRegister(interval.reg, tri_)) {
882 DEBUG(errs() << " killed");
883 end = getUseIndex(baseIndex) + 1;
886 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
888 if (mi->isRegTiedToUseOperand(DefIdx)) {
889 // Two-address instruction.
890 end = getDefIndex(baseIndex);
891 if (mi->getOperand(DefIdx).isEarlyClobber())
892 end = getUseIndex(baseIndex);
894 // Another instruction redefines the register before it is ever read.
895 // Then the register is essentially dead at the instruction that defines
896 // it. Hence its interval is:
897 // [defSlot(def), defSlot(def)+1)
898 DEBUG(errs() << " dead");
905 baseIndex += InstrSlots::NUM;
908 // The only case we should have a dead physreg here without a killing or
909 // instruction where we know it's dead is if it is live-in to the function
910 // and never used. Another possible case is the implicit use of the
911 // physical register has been deleted by two-address pass.
915 assert(start < end && "did not find end of interval?");
917 // Already exists? Extend old live interval.
918 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
919 bool Extend = OldLR != interval.end();
920 VNInfo *ValNo = Extend
921 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
922 if (MO.isEarlyClobber() && Extend)
923 ValNo->setHasRedefByEC(true);
924 LiveRange LR(start, end, ValNo);
925 interval.addRange(LR);
926 interval.addKill(LR.valno, end, false);
927 DEBUG(errs() << " +" << LR << '\n');
930 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
931 MachineBasicBlock::iterator MI,
935 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
936 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
937 getOrCreateInterval(MO.getReg()));
938 else if (allocatableRegs_[MO.getReg()]) {
939 MachineInstr *CopyMI = NULL;
940 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
941 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
942 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
943 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
944 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
946 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
947 getOrCreateInterval(MO.getReg()), CopyMI);
948 // Def of a register also defines its sub-registers.
949 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
950 // If MI also modifies the sub-register explicitly, avoid processing it
951 // more than once. Do not pass in TRI here so it checks for exact match.
952 if (!MI->modifiesRegister(*AS))
953 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
954 getOrCreateInterval(*AS), 0);
958 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
960 LiveInterval &interval, bool isAlias) {
962 errs() << "\t\tlivein register: ";
963 printRegName(interval.reg);
966 // Look for kills, if it reaches a def before it's killed, then it shouldn't
967 // be considered a livein.
968 MachineBasicBlock::iterator mi = MBB->begin();
969 unsigned baseIndex = MIIdx;
970 unsigned start = baseIndex;
971 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
972 getInstructionFromIndex(baseIndex) == 0)
973 baseIndex += InstrSlots::NUM;
974 unsigned end = baseIndex;
975 bool SeenDefUse = false;
977 while (mi != MBB->end()) {
978 if (mi->killsRegister(interval.reg, tri_)) {
979 DEBUG(errs() << " killed");
980 end = getUseIndex(baseIndex) + 1;
983 } else if (mi->modifiesRegister(interval.reg, tri_)) {
984 // Another instruction redefines the register before it is ever read.
985 // Then the register is essentially dead at the instruction that defines
986 // it. Hence its interval is:
987 // [defSlot(def), defSlot(def)+1)
988 DEBUG(errs() << " dead");
989 end = getDefIndex(start) + 1;
994 baseIndex += InstrSlots::NUM;
996 if (mi != MBB->end()) {
997 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
998 getInstructionFromIndex(baseIndex) == 0)
999 baseIndex += InstrSlots::NUM;
1003 // Live-in register might not be used at all.
1006 DEBUG(errs() << " dead");
1007 end = getDefIndex(MIIdx) + 1;
1009 DEBUG(errs() << " live through");
1015 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
1016 vni->setIsPHIDef(true);
1017 LiveRange LR(start, end, vni);
1019 interval.addRange(LR);
1020 interval.addKill(LR.valno, end, false);
1021 DEBUG(errs() << " +" << LR << '\n');
1024 /// computeIntervals - computes the live intervals for virtual
1025 /// registers. for some ordering of the machine instructions [1,N] a
1026 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1027 /// which a variable is live
1028 void LiveIntervals::computeIntervals() {
1029 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1030 << "********** Function: "
1031 << ((Value*)mf_->getFunction())->getName() << '\n');
1033 SmallVector<unsigned, 8> UndefUses;
1034 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1035 MBBI != E; ++MBBI) {
1036 MachineBasicBlock *MBB = MBBI;
1037 // Track the index of the current machine instr.
1038 unsigned MIIndex = getMBBStartIdx(MBB);
1039 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1041 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1043 // Create intervals for live-ins to this BB first.
1044 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1045 LE = MBB->livein_end(); LI != LE; ++LI) {
1046 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1047 // Multiple live-ins can alias the same register.
1048 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1049 if (!hasInterval(*AS))
1050 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1054 // Skip over empty initial indices.
1055 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1056 getInstructionFromIndex(MIIndex) == 0)
1057 MIIndex += InstrSlots::NUM;
1059 for (; MI != miEnd; ++MI) {
1060 DEBUG(errs() << MIIndex << "\t" << *MI);
1063 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1064 MachineOperand &MO = MI->getOperand(i);
1065 if (!MO.isReg() || !MO.getReg())
1068 // handle register defs - build intervals
1070 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1071 else if (MO.isUndef())
1072 UndefUses.push_back(MO.getReg());
1075 // Skip over the empty slots after each instruction.
1076 unsigned Slots = MI->getDesc().getNumDefs();
1079 MIIndex += InstrSlots::NUM * Slots;
1081 // Skip over empty indices.
1082 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1083 getInstructionFromIndex(MIIndex) == 0)
1084 MIIndex += InstrSlots::NUM;
1088 // Create empty intervals for registers defined by implicit_def's (except
1089 // for those implicit_def that define values which are liveout of their
1091 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1092 unsigned UndefReg = UndefUses[i];
1093 (void)getOrCreateInterval(UndefReg);
1097 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1098 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1099 std::vector<IdxMBBPair>::const_iterator I =
1100 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1102 bool ResVal = false;
1103 while (I != Idx2MBBMap.end()) {
1104 if (I->first >= End)
1106 MBBs.push_back(I->second);
1113 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1114 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1115 std::vector<IdxMBBPair>::const_iterator I =
1116 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1118 bool ResVal = false;
1119 while (I != Idx2MBBMap.end()) {
1122 MachineBasicBlock *MBB = I->second;
1123 if (getMBBEndIdx(MBB) > End)
1125 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1126 SE = MBB->succ_end(); SI != SE; ++SI)
1127 MBBs.push_back(*SI);
1134 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1135 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1136 return new LiveInterval(reg, Weight);
1139 /// dupInterval - Duplicate a live interval. The caller is responsible for
1140 /// managing the allocated memory.
1141 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1142 LiveInterval *NewLI = createInterval(li->reg);
1143 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1147 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1148 /// copy field and returns the source register that defines it.
1149 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1150 if (!VNI->getCopy())
1153 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1154 // If it's extracting out of a physical register, return the sub-register.
1155 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1156 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1157 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1159 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1160 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1161 return VNI->getCopy()->getOperand(2).getReg();
1163 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1164 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1166 llvm_unreachable("Unrecognized copy instruction!");
1170 //===----------------------------------------------------------------------===//
1171 // Register allocator hooks.
1174 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1175 /// allow one) virtual register operand, then its uses are implicitly using
1176 /// the register. Returns the virtual register.
1177 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1178 MachineInstr *MI) const {
1180 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1181 MachineOperand &MO = MI->getOperand(i);
1182 if (!MO.isReg() || !MO.isUse())
1184 unsigned Reg = MO.getReg();
1185 if (Reg == 0 || Reg == li.reg)
1188 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1189 !allocatableRegs_[Reg])
1191 // FIXME: For now, only remat MI with at most one register operand.
1193 "Can't rematerialize instruction with multiple register operand!");
1194 RegOp = MO.getReg();
1202 /// isValNoAvailableAt - Return true if the val# of the specified interval
1203 /// which reaches the given instruction also reaches the specified use index.
1204 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1205 unsigned UseIdx) const {
1206 unsigned Index = getInstructionIndex(MI);
1207 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1208 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1209 return UI != li.end() && UI->valno == ValNo;
1212 /// isReMaterializable - Returns true if the definition MI of the specified
1213 /// val# of the specified interval is re-materializable.
1214 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1215 const VNInfo *ValNo, MachineInstr *MI,
1216 SmallVectorImpl<LiveInterval*> &SpillIs,
1221 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1225 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1226 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1227 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1228 // this but remember this is not safe to fold into a two-address
1230 // This is a load from fixed stack slot. It can be rematerialized.
1233 // If the target-specific rules don't identify an instruction as
1234 // being trivially rematerializable, use some target-independent
1236 if (!MI->getDesc().isRematerializable() ||
1237 !tii_->isTriviallyReMaterializable(MI)) {
1238 if (!EnableAggressiveRemat)
1241 // If the instruction accesses memory but the memoperands have been lost,
1242 // we can't analyze it.
1243 const TargetInstrDesc &TID = MI->getDesc();
1244 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1247 // Avoid instructions obviously unsafe for remat.
1248 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1251 // If the instruction accesses memory and the memory could be non-constant,
1252 // assume the instruction is not rematerializable.
1253 for (std::list<MachineMemOperand>::const_iterator
1254 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1255 const MachineMemOperand &MMO = *I;
1256 if (MMO.isVolatile() || MMO.isStore())
1258 const Value *V = MMO.getValue();
1261 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1262 if (!PSV->isConstant(mf_->getFrameInfo()))
1264 } else if (!aa_->pointsToConstantMemory(V))
1268 // If any of the registers accessed are non-constant, conservatively assume
1269 // the instruction is not rematerializable.
1270 unsigned ImpUse = 0;
1271 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1272 const MachineOperand &MO = MI->getOperand(i);
1274 unsigned Reg = MO.getReg();
1277 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1280 // Only allow one def, and that in the first operand.
1281 if (MO.isDef() != (i == 0))
1284 // Only allow constant-valued registers.
1285 bool IsLiveIn = mri_->isLiveIn(Reg);
1286 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1287 E = mri_->def_end();
1289 // For the def, it should be the only def of that register.
1290 if (MO.isDef() && (next(I) != E || IsLiveIn))
1294 // Only allow one use other register use, as that's all the
1295 // remat mechanisms support currently.
1296 if (Reg != li.reg) {
1299 else if (Reg != ImpUse)
1302 // For the use, there should be only one associated def.
1303 if (I != E && (next(I) != E || IsLiveIn))
1310 unsigned ImpUse = getReMatImplicitUse(li, MI);
1312 const LiveInterval &ImpLi = getInterval(ImpUse);
1313 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1314 re = mri_->use_end(); ri != re; ++ri) {
1315 MachineInstr *UseMI = &*ri;
1316 unsigned UseIdx = getInstructionIndex(UseMI);
1317 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1319 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1323 // If a register operand of the re-materialized instruction is going to
1324 // be spilled next, then it's not legal to re-materialize this instruction.
1325 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1326 if (ImpUse == SpillIs[i]->reg)
1332 /// isReMaterializable - Returns true if the definition MI of the specified
1333 /// val# of the specified interval is re-materializable.
1334 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1335 const VNInfo *ValNo, MachineInstr *MI) {
1336 SmallVector<LiveInterval*, 4> Dummy1;
1338 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1341 /// isReMaterializable - Returns true if every definition of MI of every
1342 /// val# of the specified interval is re-materializable.
1343 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1344 SmallVectorImpl<LiveInterval*> &SpillIs,
1347 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1349 const VNInfo *VNI = *i;
1350 if (VNI->isUnused())
1351 continue; // Dead val#.
1352 // Is the def for the val# rematerializable?
1353 if (!VNI->isDefAccurate())
1355 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1356 bool DefIsLoad = false;
1358 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1360 isLoad |= DefIsLoad;
1365 /// FilterFoldedOps - Filter out two-address use operands. Return
1366 /// true if it finds any issue with the operands that ought to prevent
1368 static bool FilterFoldedOps(MachineInstr *MI,
1369 SmallVector<unsigned, 2> &Ops,
1371 SmallVector<unsigned, 2> &FoldOps) {
1373 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1374 unsigned OpIdx = Ops[i];
1375 MachineOperand &MO = MI->getOperand(OpIdx);
1376 // FIXME: fold subreg use.
1380 MRInfo |= (unsigned)VirtRegMap::isMod;
1382 // Filter out two-address use operand(s).
1383 if (MI->isRegTiedToDefOperand(OpIdx)) {
1384 MRInfo = VirtRegMap::isModRef;
1387 MRInfo |= (unsigned)VirtRegMap::isRef;
1389 FoldOps.push_back(OpIdx);
1395 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1396 /// slot / to reg or any rematerialized load into ith operand of specified
1397 /// MI. If it is successul, MI is updated with the newly created MI and
1399 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1400 VirtRegMap &vrm, MachineInstr *DefMI,
1402 SmallVector<unsigned, 2> &Ops,
1403 bool isSS, int Slot, unsigned Reg) {
1404 // If it is an implicit def instruction, just delete it.
1405 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1406 RemoveMachineInstrFromMaps(MI);
1407 vrm.RemoveMachineInstrFromMaps(MI);
1408 MI->eraseFromParent();
1413 // Filter the list of operand indexes that are to be folded. Abort if
1414 // any operand will prevent folding.
1415 unsigned MRInfo = 0;
1416 SmallVector<unsigned, 2> FoldOps;
1417 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1420 // The only time it's safe to fold into a two address instruction is when
1421 // it's folding reload and spill from / into a spill stack slot.
1422 if (DefMI && (MRInfo & VirtRegMap::isMod))
1425 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1426 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1428 // Remember this instruction uses the spill slot.
1429 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1431 // Attempt to fold the memory reference into the instruction. If
1432 // we can do this, we don't need to insert spill code.
1433 MachineBasicBlock &MBB = *MI->getParent();
1434 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1435 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1436 vrm.transferSpillPts(MI, fmi);
1437 vrm.transferRestorePts(MI, fmi);
1438 vrm.transferEmergencySpills(MI, fmi);
1440 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1441 mi2iMap_[fmi] = InstrIdx;
1442 MI = MBB.insert(MBB.erase(MI), fmi);
1449 /// canFoldMemoryOperand - Returns true if the specified load / store
1450 /// folding is possible.
1451 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1452 SmallVector<unsigned, 2> &Ops,
1454 // Filter the list of operand indexes that are to be folded. Abort if
1455 // any operand will prevent folding.
1456 unsigned MRInfo = 0;
1457 SmallVector<unsigned, 2> FoldOps;
1458 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1461 // It's only legal to remat for a use, not a def.
1462 if (ReMat && (MRInfo & VirtRegMap::isMod))
1465 return tii_->canFoldMemoryOperand(MI, FoldOps);
1468 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1469 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1470 for (LiveInterval::Ranges::const_iterator
1471 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1472 std::vector<IdxMBBPair>::const_iterator II =
1473 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1474 if (II == Idx2MBBMap.end())
1476 if (I->end > II->first) // crossing a MBB.
1478 MBBs.insert(II->second);
1479 if (MBBs.size() > 1)
1485 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1486 /// interval on to-be re-materialized operands of MI) with new register.
1487 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1488 MachineInstr *MI, unsigned NewVReg,
1490 // There is an implicit use. That means one of the other operand is
1491 // being remat'ed and the remat'ed instruction has li.reg as an
1492 // use operand. Make sure we rewrite that as well.
1493 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1494 MachineOperand &MO = MI->getOperand(i);
1497 unsigned Reg = MO.getReg();
1498 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1500 if (!vrm.isReMaterialized(Reg))
1502 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1503 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1505 UseMO->setReg(NewVReg);
1509 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1510 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1511 bool LiveIntervals::
1512 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1513 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1514 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1515 unsigned Slot, int LdSlot,
1516 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1518 const TargetRegisterClass* rc,
1519 SmallVector<int, 4> &ReMatIds,
1520 const MachineLoopInfo *loopInfo,
1521 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1522 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1523 std::vector<LiveInterval*> &NewLIs) {
1524 bool CanFold = false;
1526 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1527 MachineOperand& mop = MI->getOperand(i);
1530 unsigned Reg = mop.getReg();
1531 unsigned RegI = Reg;
1532 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1537 bool TryFold = !DefIsReMat;
1538 bool FoldSS = true; // Default behavior unless it's a remat.
1539 int FoldSlot = Slot;
1541 // If this is the rematerializable definition MI itself and
1542 // all of its uses are rematerialized, simply delete it.
1543 if (MI == ReMatOrigDefMI && CanDelete) {
1544 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1546 RemoveMachineInstrFromMaps(MI);
1547 vrm.RemoveMachineInstrFromMaps(MI);
1548 MI->eraseFromParent();
1552 // If def for this use can't be rematerialized, then try folding.
1553 // If def is rematerializable and it's a load, also try folding.
1554 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1556 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1562 // Scan all of the operands of this instruction rewriting operands
1563 // to use NewVReg instead of li.reg as appropriate. We do this for
1566 // 1. If the instr reads the same spilled vreg multiple times, we
1567 // want to reuse the NewVReg.
1568 // 2. If the instr is a two-addr instruction, we are required to
1569 // keep the src/dst regs pinned.
1571 // Keep track of whether we replace a use and/or def so that we can
1572 // create the spill interval with the appropriate range.
1574 HasUse = mop.isUse();
1575 HasDef = mop.isDef();
1576 SmallVector<unsigned, 2> Ops;
1578 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1579 const MachineOperand &MOj = MI->getOperand(j);
1582 unsigned RegJ = MOj.getReg();
1583 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1587 if (!MOj.isUndef()) {
1588 HasUse |= MOj.isUse();
1589 HasDef |= MOj.isDef();
1594 // Create a new virtual register for the spill interval.
1595 // Create the new register now so we can map the fold instruction
1596 // to the new register so when it is unfolded we get the correct
1598 bool CreatedNewVReg = false;
1600 NewVReg = mri_->createVirtualRegister(rc);
1602 CreatedNewVReg = true;
1608 // Do not fold load / store here if we are splitting. We'll find an
1609 // optimal point to insert a load / store later.
1611 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1612 Ops, FoldSS, FoldSlot, NewVReg)) {
1613 // Folding the load/store can completely change the instruction in
1614 // unpredictable ways, rescan it from the beginning.
1617 // We need to give the new vreg the same stack slot as the
1618 // spilled interval.
1619 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1625 if (isNotInMIMap(MI))
1627 goto RestartInstruction;
1630 // We'll try to fold it later if it's profitable.
1631 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1635 mop.setReg(NewVReg);
1636 if (mop.isImplicit())
1637 rewriteImplicitOps(li, MI, NewVReg, vrm);
1639 // Reuse NewVReg for other reads.
1640 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1641 MachineOperand &mopj = MI->getOperand(Ops[j]);
1642 mopj.setReg(NewVReg);
1643 if (mopj.isImplicit())
1644 rewriteImplicitOps(li, MI, NewVReg, vrm);
1647 if (CreatedNewVReg) {
1649 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1650 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1651 // Each valnum may have its own remat id.
1652 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1654 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1656 if (!CanDelete || (HasUse && HasDef)) {
1657 // If this is a two-addr instruction then its use operands are
1658 // rematerializable but its def is not. It should be assigned a
1660 vrm.assignVirt2StackSlot(NewVReg, Slot);
1663 vrm.assignVirt2StackSlot(NewVReg, Slot);
1665 } else if (HasUse && HasDef &&
1666 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1667 // If this interval hasn't been assigned a stack slot (because earlier
1668 // def is a deleted remat def), do it now.
1669 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1670 vrm.assignVirt2StackSlot(NewVReg, Slot);
1673 // Re-matting an instruction with virtual register use. Add the
1674 // register as an implicit use on the use MI.
1675 if (DefIsReMat && ImpUse)
1676 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1678 // Create a new register interval for this spill / remat.
1679 LiveInterval &nI = getOrCreateInterval(NewVReg);
1680 if (CreatedNewVReg) {
1681 NewLIs.push_back(&nI);
1682 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1684 vrm.setIsSplitFromReg(NewVReg, li.reg);
1688 if (CreatedNewVReg) {
1689 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1690 nI.getNextValue(0, 0, false, VNInfoAllocator));
1691 DEBUG(errs() << " +" << LR);
1694 // Extend the split live interval to this def / use.
1695 unsigned End = getUseIndex(index)+1;
1696 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1697 nI.getValNumInfo(nI.getNumValNums()-1));
1698 DEBUG(errs() << " +" << LR);
1703 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1704 nI.getNextValue(0, 0, false, VNInfoAllocator));
1705 DEBUG(errs() << " +" << LR);
1710 errs() << "\t\t\t\tAdded new interval: ";
1711 nI.print(errs(), tri_);
1717 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1719 MachineBasicBlock *MBB, unsigned Idx) const {
1720 unsigned End = getMBBEndIdx(MBB);
1721 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1722 if (VNI->kills[j].isPHIKill)
1725 unsigned KillIdx = VNI->kills[j].killIdx;
1726 if (KillIdx > Idx && KillIdx < End)
1732 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1733 /// during spilling.
1735 struct RewriteInfo {
1740 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1741 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1744 struct RewriteInfoCompare {
1745 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1746 return LHS.Index < RHS.Index;
1751 void LiveIntervals::
1752 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1753 LiveInterval::Ranges::const_iterator &I,
1754 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1755 unsigned Slot, int LdSlot,
1756 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1758 const TargetRegisterClass* rc,
1759 SmallVector<int, 4> &ReMatIds,
1760 const MachineLoopInfo *loopInfo,
1761 BitVector &SpillMBBs,
1762 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1763 BitVector &RestoreMBBs,
1764 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1765 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1766 std::vector<LiveInterval*> &NewLIs) {
1767 bool AllCanFold = true;
1768 unsigned NewVReg = 0;
1769 unsigned start = getBaseIndex(I->start);
1770 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1772 // First collect all the def / use in this live range that will be rewritten.
1773 // Make sure they are sorted according to instruction index.
1774 std::vector<RewriteInfo> RewriteMIs;
1775 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1776 re = mri_->reg_end(); ri != re; ) {
1777 MachineInstr *MI = &*ri;
1778 MachineOperand &O = ri.getOperand();
1780 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1781 unsigned index = getInstructionIndex(MI);
1782 if (index < start || index >= end)
1786 // Must be defined by an implicit def. It should not be spilled. Note,
1787 // this is for correctness reason. e.g.
1788 // 8 %reg1024<def> = IMPLICIT_DEF
1789 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1790 // The live range [12, 14) are not part of the r1024 live interval since
1791 // it's defined by an implicit def. It will not conflicts with live
1792 // interval of r1025. Now suppose both registers are spilled, you can
1793 // easily see a situation where both registers are reloaded before
1794 // the INSERT_SUBREG and both target registers that would overlap.
1796 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1798 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1800 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1801 // Now rewrite the defs and uses.
1802 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1803 RewriteInfo &rwi = RewriteMIs[i];
1805 unsigned index = rwi.Index;
1806 bool MIHasUse = rwi.HasUse;
1807 bool MIHasDef = rwi.HasDef;
1808 MachineInstr *MI = rwi.MI;
1809 // If MI def and/or use the same register multiple times, then there
1810 // are multiple entries.
1811 unsigned NumUses = MIHasUse;
1812 while (i != e && RewriteMIs[i].MI == MI) {
1813 assert(RewriteMIs[i].Index == index);
1814 bool isUse = RewriteMIs[i].HasUse;
1815 if (isUse) ++NumUses;
1817 MIHasDef |= RewriteMIs[i].HasDef;
1820 MachineBasicBlock *MBB = MI->getParent();
1822 if (ImpUse && MI != ReMatDefMI) {
1823 // Re-matting an instruction with virtual register use. Update the
1824 // register interval's spill weight to HUGE_VALF to prevent it from
1826 LiveInterval &ImpLi = getInterval(ImpUse);
1827 ImpLi.weight = HUGE_VALF;
1830 unsigned MBBId = MBB->getNumber();
1831 unsigned ThisVReg = 0;
1833 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1834 if (NVI != MBBVRegsMap.end()) {
1835 ThisVReg = NVI->second;
1842 // It's better to start a new interval to avoid artifically
1843 // extend the new interval.
1844 if (MIHasDef && !MIHasUse) {
1845 MBBVRegsMap.erase(MBB->getNumber());
1851 bool IsNew = ThisVReg == 0;
1853 // This ends the previous live interval. If all of its def / use
1854 // can be folded, give it a low spill weight.
1855 if (NewVReg && TrySplit && AllCanFold) {
1856 LiveInterval &nI = getOrCreateInterval(NewVReg);
1863 bool HasDef = false;
1864 bool HasUse = false;
1865 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1866 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1867 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1868 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1869 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1870 if (!HasDef && !HasUse)
1873 AllCanFold &= CanFold;
1875 // Update weight of spill interval.
1876 LiveInterval &nI = getOrCreateInterval(NewVReg);
1878 // The spill weight is now infinity as it cannot be spilled again.
1879 nI.weight = HUGE_VALF;
1883 // Keep track of the last def and first use in each MBB.
1885 if (MI != ReMatOrigDefMI || !CanDelete) {
1886 bool HasKill = false;
1888 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1890 // If this is a two-address code, then this index starts a new VNInfo.
1891 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1893 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1895 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1896 SpillIdxes.find(MBBId);
1898 if (SII == SpillIdxes.end()) {
1899 std::vector<SRInfo> S;
1900 S.push_back(SRInfo(index, NewVReg, true));
1901 SpillIdxes.insert(std::make_pair(MBBId, S));
1902 } else if (SII->second.back().vreg != NewVReg) {
1903 SII->second.push_back(SRInfo(index, NewVReg, true));
1904 } else if ((int)index > SII->second.back().index) {
1905 // If there is an earlier def and this is a two-address
1906 // instruction, then it's not possible to fold the store (which
1907 // would also fold the load).
1908 SRInfo &Info = SII->second.back();
1910 Info.canFold = !HasUse;
1912 SpillMBBs.set(MBBId);
1913 } else if (SII != SpillIdxes.end() &&
1914 SII->second.back().vreg == NewVReg &&
1915 (int)index > SII->second.back().index) {
1916 // There is an earlier def that's not killed (must be two-address).
1917 // The spill is no longer needed.
1918 SII->second.pop_back();
1919 if (SII->second.empty()) {
1920 SpillIdxes.erase(MBBId);
1921 SpillMBBs.reset(MBBId);
1928 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1929 SpillIdxes.find(MBBId);
1930 if (SII != SpillIdxes.end() &&
1931 SII->second.back().vreg == NewVReg &&
1932 (int)index > SII->second.back().index)
1933 // Use(s) following the last def, it's not safe to fold the spill.
1934 SII->second.back().canFold = false;
1935 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1936 RestoreIdxes.find(MBBId);
1937 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1938 // If we are splitting live intervals, only fold if it's the first
1939 // use and there isn't another use later in the MBB.
1940 RII->second.back().canFold = false;
1942 // Only need a reload if there isn't an earlier def / use.
1943 if (RII == RestoreIdxes.end()) {
1944 std::vector<SRInfo> Infos;
1945 Infos.push_back(SRInfo(index, NewVReg, true));
1946 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1948 RII->second.push_back(SRInfo(index, NewVReg, true));
1950 RestoreMBBs.set(MBBId);
1954 // Update spill weight.
1955 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1956 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1959 if (NewVReg && TrySplit && AllCanFold) {
1960 // If all of its def / use can be folded, give it a low spill weight.
1961 LiveInterval &nI = getOrCreateInterval(NewVReg);
1966 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1967 BitVector &RestoreMBBs,
1968 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1969 if (!RestoreMBBs[Id])
1971 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1972 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1973 if (Restores[i].index == index &&
1974 Restores[i].vreg == vr &&
1975 Restores[i].canFold)
1980 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1981 BitVector &RestoreMBBs,
1982 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1983 if (!RestoreMBBs[Id])
1985 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1986 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1987 if (Restores[i].index == index && Restores[i].vreg)
1988 Restores[i].index = -1;
1991 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1992 /// spilled and create empty intervals for their uses.
1994 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1995 const TargetRegisterClass* rc,
1996 std::vector<LiveInterval*> &NewLIs) {
1997 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1998 re = mri_->reg_end(); ri != re; ) {
1999 MachineOperand &O = ri.getOperand();
2000 MachineInstr *MI = &*ri;
2003 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2004 "Register def was not rewritten?");
2005 RemoveMachineInstrFromMaps(MI);
2006 vrm.RemoveMachineInstrFromMaps(MI);
2007 MI->eraseFromParent();
2009 // This must be an use of an implicit_def so it's not part of the live
2010 // interval. Create a new empty live interval for it.
2011 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2012 unsigned NewVReg = mri_->createVirtualRegister(rc);
2014 vrm.setIsImplicitlyDefined(NewVReg);
2015 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2016 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2017 MachineOperand &MO = MI->getOperand(i);
2018 if (MO.isReg() && MO.getReg() == li.reg) {
2027 std::vector<LiveInterval*> LiveIntervals::
2028 addIntervalsForSpillsFast(const LiveInterval &li,
2029 const MachineLoopInfo *loopInfo,
2031 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2033 std::vector<LiveInterval*> added;
2035 assert(li.weight != HUGE_VALF &&
2036 "attempt to spill already spilled interval!");
2039 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2044 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2046 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2047 while (RI != mri_->reg_end()) {
2048 MachineInstr* MI = &*RI;
2050 SmallVector<unsigned, 2> Indices;
2051 bool HasUse = false;
2052 bool HasDef = false;
2054 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2055 MachineOperand& mop = MI->getOperand(i);
2056 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2058 HasUse |= MI->getOperand(i).isUse();
2059 HasDef |= MI->getOperand(i).isDef();
2061 Indices.push_back(i);
2064 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2065 Indices, true, slot, li.reg)) {
2066 unsigned NewVReg = mri_->createVirtualRegister(rc);
2068 vrm.assignVirt2StackSlot(NewVReg, slot);
2070 // create a new register for this spill
2071 LiveInterval &nI = getOrCreateInterval(NewVReg);
2073 // the spill weight is now infinity as it
2074 // cannot be spilled again
2075 nI.weight = HUGE_VALF;
2077 // Rewrite register operands to use the new vreg.
2078 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2079 E = Indices.end(); I != E; ++I) {
2080 MI->getOperand(*I).setReg(NewVReg);
2082 if (MI->getOperand(*I).isUse())
2083 MI->getOperand(*I).setIsKill(true);
2086 // Fill in the new live interval.
2087 unsigned index = getInstructionIndex(MI);
2089 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2090 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2091 DEBUG(errs() << " +" << LR);
2093 vrm.addRestorePoint(NewVReg, MI);
2096 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2097 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2098 DEBUG(errs() << " +" << LR);
2100 vrm.addSpillPoint(NewVReg, true, MI);
2103 added.push_back(&nI);
2106 errs() << "\t\t\t\tadded new interval: ";
2113 RI = mri_->reg_begin(li.reg);
2119 std::vector<LiveInterval*> LiveIntervals::
2120 addIntervalsForSpills(const LiveInterval &li,
2121 SmallVectorImpl<LiveInterval*> &SpillIs,
2122 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2124 if (EnableFastSpilling)
2125 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2127 assert(li.weight != HUGE_VALF &&
2128 "attempt to spill already spilled interval!");
2131 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2132 li.print(errs(), tri_);
2136 // Each bit specify whether a spill is required in the MBB.
2137 BitVector SpillMBBs(mf_->getNumBlockIDs());
2138 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2139 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2140 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2141 DenseMap<unsigned,unsigned> MBBVRegsMap;
2142 std::vector<LiveInterval*> NewLIs;
2143 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2145 unsigned NumValNums = li.getNumValNums();
2146 SmallVector<MachineInstr*, 4> ReMatDefs;
2147 ReMatDefs.resize(NumValNums, NULL);
2148 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2149 ReMatOrigDefs.resize(NumValNums, NULL);
2150 SmallVector<int, 4> ReMatIds;
2151 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2152 BitVector ReMatDelete(NumValNums);
2153 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2155 // Spilling a split live interval. It cannot be split any further. Also,
2156 // it's also guaranteed to be a single val# / range interval.
2157 if (vrm.getPreSplitReg(li.reg)) {
2158 vrm.setIsSplitFromReg(li.reg, 0);
2159 // Unset the split kill marker on the last use.
2160 unsigned KillIdx = vrm.getKillPoint(li.reg);
2162 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2163 assert(KillMI && "Last use disappeared?");
2164 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2165 assert(KillOp != -1 && "Last use disappeared?");
2166 KillMI->getOperand(KillOp).setIsKill(false);
2168 vrm.removeKillPoint(li.reg);
2169 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2170 Slot = vrm.getStackSlot(li.reg);
2171 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2172 MachineInstr *ReMatDefMI = DefIsReMat ?
2173 vrm.getReMaterializedMI(li.reg) : NULL;
2175 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2176 bool isLoad = isLoadSS ||
2177 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2178 bool IsFirstRange = true;
2179 for (LiveInterval::Ranges::const_iterator
2180 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2181 // If this is a split live interval with multiple ranges, it means there
2182 // are two-address instructions that re-defined the value. Only the
2183 // first def can be rematerialized!
2185 // Note ReMatOrigDefMI has already been deleted.
2186 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2187 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2188 false, vrm, rc, ReMatIds, loopInfo,
2189 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2190 MBBVRegsMap, NewLIs);
2192 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2193 Slot, 0, false, false, false,
2194 false, vrm, rc, ReMatIds, loopInfo,
2195 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2196 MBBVRegsMap, NewLIs);
2198 IsFirstRange = false;
2201 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2205 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2206 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2210 bool NeedStackSlot = false;
2211 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2213 const VNInfo *VNI = *i;
2214 unsigned VN = VNI->id;
2215 if (VNI->isUnused())
2216 continue; // Dead val#.
2217 // Is the def for the val# rematerializable?
2218 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2219 ? getInstructionFromIndex(VNI->def) : 0;
2221 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2222 // Remember how to remat the def of this val#.
2223 ReMatOrigDefs[VN] = ReMatDefMI;
2224 // Original def may be modified so we have to make a copy here.
2225 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2226 ClonedMIs.push_back(Clone);
2227 ReMatDefs[VN] = Clone;
2229 bool CanDelete = true;
2230 if (VNI->hasPHIKill()) {
2231 // A kill is a phi node, not all of its uses can be rematerialized.
2232 // It must not be deleted.
2234 // Need a stack slot if there is any live range where uses cannot be
2236 NeedStackSlot = true;
2239 ReMatDelete.set(VN);
2241 // Need a stack slot if there is any live range where uses cannot be
2243 NeedStackSlot = true;
2247 // One stack slot per live interval.
2248 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2249 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2250 Slot = vrm.assignVirt2StackSlot(li.reg);
2252 // This case only occurs when the prealloc splitter has already assigned
2253 // a stack slot to this vreg.
2255 Slot = vrm.getStackSlot(li.reg);
2258 // Create new intervals and rewrite defs and uses.
2259 for (LiveInterval::Ranges::const_iterator
2260 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2261 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2262 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2263 bool DefIsReMat = ReMatDefMI != NULL;
2264 bool CanDelete = ReMatDelete[I->valno->id];
2266 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2267 bool isLoad = isLoadSS ||
2268 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2269 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2270 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2271 CanDelete, vrm, rc, ReMatIds, loopInfo,
2272 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2273 MBBVRegsMap, NewLIs);
2276 // Insert spills / restores if we are splitting.
2278 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2282 SmallPtrSet<LiveInterval*, 4> AddedKill;
2283 SmallVector<unsigned, 2> Ops;
2284 if (NeedStackSlot) {
2285 int Id = SpillMBBs.find_first();
2287 std::vector<SRInfo> &spills = SpillIdxes[Id];
2288 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2289 int index = spills[i].index;
2290 unsigned VReg = spills[i].vreg;
2291 LiveInterval &nI = getOrCreateInterval(VReg);
2292 bool isReMat = vrm.isReMaterialized(VReg);
2293 MachineInstr *MI = getInstructionFromIndex(index);
2294 bool CanFold = false;
2295 bool FoundUse = false;
2297 if (spills[i].canFold) {
2299 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2300 MachineOperand &MO = MI->getOperand(j);
2301 if (!MO.isReg() || MO.getReg() != VReg)
2308 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2309 RestoreMBBs, RestoreIdxes))) {
2310 // MI has two-address uses of the same register. If the use
2311 // isn't the first and only use in the BB, then we can't fold
2312 // it. FIXME: Move this to rewriteInstructionsForSpills.
2319 // Fold the store into the def if possible.
2320 bool Folded = false;
2321 if (CanFold && !Ops.empty()) {
2322 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2325 // Also folded uses, do not issue a load.
2326 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2327 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2329 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2333 // Otherwise tell the spiller to issue a spill.
2335 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2336 bool isKill = LR->end == getStoreIndex(index);
2337 if (!MI->registerDefIsDead(nI.reg))
2338 // No need to spill a dead def.
2339 vrm.addSpillPoint(VReg, isKill, MI);
2341 AddedKill.insert(&nI);
2344 Id = SpillMBBs.find_next(Id);
2348 int Id = RestoreMBBs.find_first();
2350 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2351 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2352 int index = restores[i].index;
2355 unsigned VReg = restores[i].vreg;
2356 LiveInterval &nI = getOrCreateInterval(VReg);
2357 bool isReMat = vrm.isReMaterialized(VReg);
2358 MachineInstr *MI = getInstructionFromIndex(index);
2359 bool CanFold = false;
2361 if (restores[i].canFold) {
2363 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2364 MachineOperand &MO = MI->getOperand(j);
2365 if (!MO.isReg() || MO.getReg() != VReg)
2369 // If this restore were to be folded, it would have been folded
2378 // Fold the load into the use if possible.
2379 bool Folded = false;
2380 if (CanFold && !Ops.empty()) {
2382 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2384 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2386 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2387 // If the rematerializable def is a load, also try to fold it.
2388 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2389 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2390 Ops, isLoadSS, LdSlot, VReg);
2392 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2394 // Re-matting an instruction with virtual register use. Add the
2395 // register as an implicit use on the use MI and update the register
2396 // interval's spill weight to HUGE_VALF to prevent it from being
2398 LiveInterval &ImpLi = getInterval(ImpUse);
2399 ImpLi.weight = HUGE_VALF;
2400 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2405 // If folding is not possible / failed, then tell the spiller to issue a
2406 // load / rematerialization for us.
2408 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2410 vrm.addRestorePoint(VReg, MI);
2412 Id = RestoreMBBs.find_next(Id);
2415 // Finalize intervals: add kills, finalize spill weights, and filter out
2417 std::vector<LiveInterval*> RetNewLIs;
2418 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2419 LiveInterval *LI = NewLIs[i];
2421 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2422 if (!AddedKill.count(LI)) {
2423 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2424 unsigned LastUseIdx = getBaseIndex(LR->end);
2425 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2426 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2427 assert(UseIdx != -1);
2428 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2429 LastUse->getOperand(UseIdx).setIsKill();
2430 vrm.addKillPoint(LI->reg, LastUseIdx);
2433 RetNewLIs.push_back(LI);
2437 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2441 /// hasAllocatableSuperReg - Return true if the specified physical register has
2442 /// any super register that's allocatable.
2443 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2444 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2445 if (allocatableRegs_[*AS] && hasInterval(*AS))
2450 /// getRepresentativeReg - Find the largest super register of the specified
2451 /// physical register.
2452 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2453 // Find the largest super-register that is allocatable.
2454 unsigned BestReg = Reg;
2455 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2456 unsigned SuperReg = *AS;
2457 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2465 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2466 /// specified interval that conflicts with the specified physical register.
2467 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2468 unsigned PhysReg) const {
2469 unsigned NumConflicts = 0;
2470 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2471 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2472 E = mri_->reg_end(); I != E; ++I) {
2473 MachineOperand &O = I.getOperand();
2474 MachineInstr *MI = O.getParent();
2475 unsigned Index = getInstructionIndex(MI);
2476 if (pli.liveAt(Index))
2479 return NumConflicts;
2482 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2483 /// around all defs and uses of the specified interval. Return true if it
2484 /// was able to cut its interval.
2485 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2486 unsigned PhysReg, VirtRegMap &vrm) {
2487 unsigned SpillReg = getRepresentativeReg(PhysReg);
2489 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2490 // If there are registers which alias PhysReg, but which are not a
2491 // sub-register of the chosen representative super register. Assert
2492 // since we can't handle it yet.
2493 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2494 tri_->isSuperRegister(*AS, SpillReg));
2497 LiveInterval &pli = getInterval(SpillReg);
2498 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2499 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2500 E = mri_->reg_end(); I != E; ++I) {
2501 MachineOperand &O = I.getOperand();
2502 MachineInstr *MI = O.getParent();
2503 if (SeenMIs.count(MI))
2506 unsigned Index = getInstructionIndex(MI);
2507 if (pli.liveAt(Index)) {
2508 vrm.addEmergencySpill(SpillReg, MI);
2509 unsigned StartIdx = getLoadIndex(Index);
2510 unsigned EndIdx = getStoreIndex(Index)+1;
2511 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2512 pli.removeRange(StartIdx, EndIdx);
2516 raw_string_ostream Msg(msg);
2517 Msg << "Ran out of registers during register allocation!";
2518 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2519 Msg << "\nPlease check your inline asm statement for invalid "
2520 << "constraints:\n";
2521 MI->print(Msg, tm_);
2523 llvm_report_error(Msg.str());
2525 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2526 if (!hasInterval(*AS))
2528 LiveInterval &spli = getInterval(*AS);
2529 if (spli.liveAt(Index))
2530 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2537 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2538 MachineInstr* startInst) {
2539 LiveInterval& Interval = getOrCreateInterval(reg);
2540 VNInfo* VN = Interval.getNextValue(
2541 getInstructionIndex(startInst) + InstrSlots::DEF,
2542 startInst, true, getVNInfoAllocator());
2543 VN->setHasPHIKill(true);
2544 VN->kills.push_back(
2545 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2546 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2547 getMBBEndIdx(startInst->getParent()) + 1, VN);
2548 Interval.addRange(LR);