1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 static cl::opt<bool> EnableFastSpilling("fast-spill",
53 cl::init(false), cl::Hidden);
55 STATISTIC(numIntervals, "Number of original intervals");
56 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
72 AU.addRequiredID(TwoAddressInstructionPassID);
73 MachineFunctionPass::getAnalysisUsage(AU);
76 void LiveIntervals::releaseMemory() {
77 // Free the live intervals themselves.
78 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79 E = r2iMap_.end(); I != E; ++I)
87 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88 VNInfoAllocator.Reset();
89 while (!ClonedMIs.empty()) {
90 MachineInstr *MI = ClonedMIs.back();
92 mf_->DeleteMachineInstr(MI);
96 void LiveIntervals::computeNumbering() {
97 Index2MiMap OldI2MI = i2miMap_;
98 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
107 // Number MachineInstrs and MachineBasicBlocks.
108 // Initialize MBB indexes to a sentinal.
109 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111 unsigned MIIndex = 0;
112 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114 unsigned StartIdx = MIIndex;
116 // Insert an empty slot at the beginning of each block.
117 MIIndex += InstrSlots::NUM;
118 i2miMap_.push_back(0);
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123 assert(inserted && "multiple MachineInstr -> index mappings");
124 i2miMap_.push_back(I);
125 MIIndex += InstrSlots::NUM;
128 // Insert an empty slot after every instruction.
129 MIIndex += InstrSlots::NUM;
130 i2miMap_.push_back(0);
133 // Set the MBB2IdxMap entry for this MBB.
134 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
137 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
139 if (!OldI2MI.empty())
140 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141 for (LiveInterval::iterator LI = OI->second->begin(),
142 LE = OI->second->end(); LI != LE; ++LI) {
144 // Remap the start index of the live range to the corresponding new
145 // number, or our best guess at what it _should_ correspond to if the
146 // original instruction has been erased. This is either the following
147 // instruction or its predecessor.
148 unsigned index = LI->start / InstrSlots::NUM;
149 unsigned offset = LI->start % InstrSlots::NUM;
150 if (offset == InstrSlots::LOAD) {
151 std::vector<IdxMBBPair>::const_iterator I =
152 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153 // Take the pair containing the index
154 std::vector<IdxMBBPair>::const_iterator J =
155 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
157 LI->start = getMBBStartIdx(J->second);
159 LI->start = mi2iMap_[OldI2MI[index]] + offset;
162 // Remap the ending index in the same way that we remapped the start,
163 // except for the final step where we always map to the immediately
164 // following instruction.
165 index = (LI->end - 1) / InstrSlots::NUM;
166 offset = LI->end % InstrSlots::NUM;
167 if (offset == InstrSlots::LOAD) {
168 // VReg dies at end of block.
169 std::vector<IdxMBBPair>::const_iterator I =
170 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
173 LI->end = getMBBEndIdx(I->second) + 1;
175 unsigned idx = index;
176 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
178 if (index != OldI2MI.size())
179 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
181 LI->end = InstrSlots::NUM * i2miMap_.size();
185 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
189 // Remap the VNInfo def index, which works the same as the
190 // start indices above. VN's with special sentinel defs
191 // don't need to be remapped.
192 if (vni->def != ~0U && vni->def != ~1U) {
193 unsigned index = vni->def / InstrSlots::NUM;
194 unsigned offset = vni->def % InstrSlots::NUM;
195 if (offset == InstrSlots::LOAD) {
196 std::vector<IdxMBBPair>::const_iterator I =
197 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198 // Take the pair containing the index
199 std::vector<IdxMBBPair>::const_iterator J =
200 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
202 vni->def = getMBBStartIdx(J->second);
204 vni->def = mi2iMap_[OldI2MI[index]] + offset;
208 // Remap the VNInfo kill indices, which works the same as
209 // the end indices above.
210 for (size_t i = 0; i < vni->kills.size(); ++i) {
211 // PHI kills don't need to be remapped.
212 if (!vni->kills[i]) continue;
214 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215 unsigned offset = vni->kills[i] % InstrSlots::NUM;
216 if (offset == InstrSlots::STORE) {
217 std::vector<IdxMBBPair>::const_iterator I =
218 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
221 vni->kills[i] = getMBBEndIdx(I->second);
223 unsigned idx = index;
224 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
226 if (index != OldI2MI.size())
227 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228 (idx == index ? offset : 0);
230 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
237 /// runOnMachineFunction - Register allocate the whole function
239 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
241 mri_ = &mf_->getRegInfo();
242 tm_ = &fn.getTarget();
243 tri_ = tm_->getRegisterInfo();
244 tii_ = tm_->getInstrInfo();
245 aa_ = &getAnalysis<AliasAnalysis>();
246 lv_ = &getAnalysis<LiveVariables>();
247 allocatableRegs_ = tri_->getAllocatableSet(fn);
252 numIntervals += getNumIntervals();
254 DOUT << "********** INTERVALS **********\n";
255 for (iterator I = begin(), E = end(); I != E; ++I) {
256 I->second->print(DOUT, tri_);
260 numIntervalsAfter += getNumIntervals();
265 /// print - Implement the dump method.
266 void LiveIntervals::print(std::ostream &O, const Module* ) const {
267 O << "********** INTERVALS **********\n";
268 for (const_iterator I = begin(), E = end(); I != E; ++I) {
269 I->second->print(O, tri_);
273 O << "********** MACHINEINSTRS **********\n";
274 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275 mbbi != mbbe; ++mbbi) {
276 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277 for (MachineBasicBlock::iterator mii = mbbi->begin(),
278 mie = mbbi->end(); mii != mie; ++mii) {
279 O << getInstructionIndex(mii) << '\t' << *mii;
284 /// conflictsWithPhysRegDef - Returns true if the specified register
285 /// is defined during the duration of the specified interval.
286 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287 VirtRegMap &vrm, unsigned reg) {
288 for (LiveInterval::Ranges::const_iterator
289 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290 for (unsigned index = getBaseIndex(I->start),
291 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292 index += InstrSlots::NUM) {
293 // skip deleted instructions
294 while (index != end && !getInstructionFromIndex(index))
295 index += InstrSlots::NUM;
296 if (index == end) break;
298 MachineInstr *MI = getInstructionFromIndex(index);
299 unsigned SrcReg, DstReg;
300 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301 if (SrcReg == li.reg || DstReg == li.reg)
303 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304 MachineOperand& mop = MI->getOperand(i);
305 if (!mop.isRegister())
307 unsigned PhysReg = mop.getReg();
308 if (PhysReg == 0 || PhysReg == li.reg)
310 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311 if (!vrm.hasPhys(PhysReg))
313 PhysReg = vrm.getPhys(PhysReg);
315 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
324 void LiveIntervals::printRegName(unsigned reg) const {
325 if (TargetRegisterInfo::isPhysicalRegister(reg))
326 cerr << tri_->getName(reg);
328 cerr << "%reg" << reg;
331 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332 MachineBasicBlock::iterator mi,
333 unsigned MIIdx, MachineOperand& MO,
335 LiveInterval &interval) {
336 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
339 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340 DOUT << "is a implicit_def\n";
344 // Virtual registers may be defined multiple times (due to phi
345 // elimination and 2-addr elimination). Much of what we do only has to be
346 // done once for the vreg. We use an empty interval to detect the first
347 // time we see a vreg.
348 if (interval.empty()) {
349 // Get the Idx of the defining instructions.
350 unsigned defIndex = getDefIndex(MIIdx);
351 // Earlyclobbers move back one.
352 if (MO.isEarlyClobber())
353 defIndex = getUseIndex(MIIdx);
355 MachineInstr *CopyMI = NULL;
356 unsigned SrcReg, DstReg;
357 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
358 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
359 tii_->isMoveInstr(*mi, SrcReg, DstReg))
361 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
363 assert(ValNo->id == 0 && "First value in interval is not 0?");
365 // Loop over all of the blocks that the vreg is defined in. There are
366 // two cases we have to handle here. The most common case is a vreg
367 // whose lifetime is contained within a basic block. In this case there
368 // will be a single kill, in MBB, which comes after the definition.
369 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
370 // FIXME: what about dead vars?
372 if (vi.Kills[0] != mi)
373 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
375 killIdx = defIndex+1;
377 // If the kill happens after the definition, we have an intra-block
379 if (killIdx > defIndex) {
380 assert(vi.AliveBlocks.none() &&
381 "Shouldn't be alive across any blocks!");
382 LiveRange LR(defIndex, killIdx, ValNo);
383 interval.addRange(LR);
384 DOUT << " +" << LR << "\n";
385 interval.addKill(ValNo, killIdx);
390 // The other case we handle is when a virtual register lives to the end
391 // of the defining block, potentially live across some blocks, then is
392 // live into some number of blocks, but gets killed. Start by adding a
393 // range that goes from this definition to the end of the defining block.
394 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
395 DOUT << " +" << NewLR;
396 interval.addRange(NewLR);
398 // Iterate over all of the blocks that the variable is completely
399 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
401 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
402 if (vi.AliveBlocks[i]) {
403 LiveRange LR(getMBBStartIdx(i),
404 getMBBEndIdx(i)+1, // MBB ends at -1.
406 interval.addRange(LR);
411 // Finally, this virtual register is live from the start of any killing
412 // block to the 'use' slot of the killing instruction.
413 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
414 MachineInstr *Kill = vi.Kills[i];
415 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
416 LiveRange LR(getMBBStartIdx(Kill->getParent()),
418 interval.addRange(LR);
419 interval.addKill(ValNo, killIdx);
424 // If this is the second time we see a virtual register definition, it
425 // must be due to phi elimination or two addr elimination. If this is
426 // the result of two address elimination, then the vreg is one of the
427 // def-and-use register operand.
428 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
429 // If this is a two-address definition, then we have already processed
430 // the live range. The only problem is that we didn't realize there
431 // are actually two values in the live interval. Because of this we
432 // need to take the LiveRegion that defines this register and split it
434 assert(interval.containsOneValue());
435 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
436 unsigned RedefIndex = getDefIndex(MIIdx);
437 // Earlyclobbers move back one.
438 if (MO.isEarlyClobber())
439 RedefIndex = getUseIndex(MIIdx);
441 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
442 VNInfo *OldValNo = OldLR->valno;
444 // Delete the initial value, which should be short and continuous,
445 // because the 2-addr copy must be in the same MBB as the redef.
446 interval.removeRange(DefIndex, RedefIndex);
448 // Two-address vregs should always only be redefined once. This means
449 // that at this point, there should be exactly one value number in it.
450 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
452 // The new value number (#1) is defined by the instruction we claimed
454 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
457 // Value#0 is now defined by the 2-addr instruction.
458 OldValNo->def = RedefIndex;
461 // Add the new live interval which replaces the range for the input copy.
462 LiveRange LR(DefIndex, RedefIndex, ValNo);
463 DOUT << " replace range with " << LR;
464 interval.addRange(LR);
465 interval.addKill(ValNo, RedefIndex);
467 // If this redefinition is dead, we need to add a dummy unit live
468 // range covering the def slot.
470 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
473 interval.print(DOUT, tri_);
476 // Otherwise, this must be because of phi elimination. If this is the
477 // first redefinition of the vreg that we have seen, go back and change
478 // the live range in the PHI block to be a different value number.
479 if (interval.containsOneValue()) {
480 assert(vi.Kills.size() == 1 &&
481 "PHI elimination vreg should have one kill, the PHI itself!");
483 // Remove the old range that we now know has an incorrect number.
484 VNInfo *VNI = interval.getValNumInfo(0);
485 MachineInstr *Killer = vi.Kills[0];
486 unsigned Start = getMBBStartIdx(Killer->getParent());
487 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
488 DOUT << " Removing [" << Start << "," << End << "] from: ";
489 interval.print(DOUT, tri_); DOUT << "\n";
490 interval.removeRange(Start, End);
491 VNI->hasPHIKill = true;
492 DOUT << " RESULT: "; interval.print(DOUT, tri_);
494 // Replace the interval with one of a NEW value number. Note that this
495 // value number isn't actually defined by an instruction, weird huh? :)
496 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
497 DOUT << " replace range with " << LR;
498 interval.addRange(LR);
499 interval.addKill(LR.valno, End);
500 DOUT << " RESULT: "; interval.print(DOUT, tri_);
503 // In the case of PHI elimination, each variable definition is only
504 // live until the end of the block. We've already taken care of the
505 // rest of the live range.
506 unsigned defIndex = getDefIndex(MIIdx);
507 // Earlyclobbers move back one.
508 if (MO.isEarlyClobber())
509 defIndex = getUseIndex(MIIdx);
512 MachineInstr *CopyMI = NULL;
513 unsigned SrcReg, DstReg;
514 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
515 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
516 tii_->isMoveInstr(*mi, SrcReg, DstReg))
518 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
520 unsigned killIndex = getMBBEndIdx(mbb) + 1;
521 LiveRange LR(defIndex, killIndex, ValNo);
522 interval.addRange(LR);
523 interval.addKill(ValNo, killIndex);
524 ValNo->hasPHIKill = true;
532 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
533 MachineBasicBlock::iterator mi,
536 LiveInterval &interval,
537 MachineInstr *CopyMI) {
538 // A physical register cannot be live across basic block, so its
539 // lifetime must end somewhere in its defining basic block.
540 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
542 unsigned baseIndex = MIIdx;
543 unsigned start = getDefIndex(baseIndex);
544 // Earlyclobbers move back one.
545 if (MO.isEarlyClobber())
546 start = getUseIndex(MIIdx);
547 unsigned end = start;
549 // If it is not used after definition, it is considered dead at
550 // the instruction defining it. Hence its interval is:
551 // [defSlot(def), defSlot(def)+1)
558 // If it is not dead on definition, it must be killed by a
559 // subsequent instruction. Hence its interval is:
560 // [defSlot(def), useSlot(kill)+1)
561 baseIndex += InstrSlots::NUM;
562 while (++mi != MBB->end()) {
563 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
564 getInstructionFromIndex(baseIndex) == 0)
565 baseIndex += InstrSlots::NUM;
566 if (mi->killsRegister(interval.reg, tri_)) {
568 end = getUseIndex(baseIndex) + 1;
570 } else if (mi->modifiesRegister(interval.reg, tri_)) {
571 // Another instruction redefines the register before it is ever read.
572 // Then the register is essentially dead at the instruction that defines
573 // it. Hence its interval is:
574 // [defSlot(def), defSlot(def)+1)
580 baseIndex += InstrSlots::NUM;
583 // The only case we should have a dead physreg here without a killing or
584 // instruction where we know it's dead is if it is live-in to the function
586 assert(!CopyMI && "physreg was not killed in defining block!");
590 assert(start < end && "did not find end of interval?");
592 // Already exists? Extend old live interval.
593 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
594 VNInfo *ValNo = (OldLR != interval.end())
595 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
596 LiveRange LR(start, end, ValNo);
597 interval.addRange(LR);
598 interval.addKill(LR.valno, end);
599 DOUT << " +" << LR << '\n';
602 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
603 MachineBasicBlock::iterator MI,
607 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
608 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
609 getOrCreateInterval(MO.getReg()));
610 else if (allocatableRegs_[MO.getReg()]) {
611 MachineInstr *CopyMI = NULL;
612 unsigned SrcReg, DstReg;
613 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
614 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
615 tii_->isMoveInstr(*MI, SrcReg, DstReg))
617 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618 getOrCreateInterval(MO.getReg()), CopyMI);
619 // Def of a register also defines its sub-registers.
620 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
621 // If MI also modifies the sub-register explicitly, avoid processing it
622 // more than once. Do not pass in TRI here so it checks for exact match.
623 if (!MI->modifiesRegister(*AS))
624 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625 getOrCreateInterval(*AS), 0);
629 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
631 LiveInterval &interval, bool isAlias) {
632 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
634 // Look for kills, if it reaches a def before it's killed, then it shouldn't
635 // be considered a livein.
636 MachineBasicBlock::iterator mi = MBB->begin();
637 unsigned baseIndex = MIIdx;
638 unsigned start = baseIndex;
639 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
640 getInstructionFromIndex(baseIndex) == 0)
641 baseIndex += InstrSlots::NUM;
642 unsigned end = baseIndex;
644 while (mi != MBB->end()) {
645 if (mi->killsRegister(interval.reg, tri_)) {
647 end = getUseIndex(baseIndex) + 1;
649 } else if (mi->modifiesRegister(interval.reg, tri_)) {
650 // Another instruction redefines the register before it is ever read.
651 // Then the register is essentially dead at the instruction that defines
652 // it. Hence its interval is:
653 // [defSlot(def), defSlot(def)+1)
655 end = getDefIndex(start) + 1;
659 baseIndex += InstrSlots::NUM;
660 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
661 getInstructionFromIndex(baseIndex) == 0)
662 baseIndex += InstrSlots::NUM;
667 // Live-in register might not be used at all.
671 end = getDefIndex(MIIdx) + 1;
673 DOUT << " live through";
678 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
679 interval.addRange(LR);
680 interval.addKill(LR.valno, end);
681 DOUT << " +" << LR << '\n';
684 /// computeIntervals - computes the live intervals for virtual
685 /// registers. for some ordering of the machine instructions [1,N] a
686 /// live interval is an interval [i, j) where 1 <= i <= j < N for
687 /// which a variable is live
688 void LiveIntervals::computeIntervals() {
690 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
691 << "********** Function: "
692 << ((Value*)mf_->getFunction())->getName() << '\n';
694 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
696 MachineBasicBlock *MBB = MBBI;
697 // Track the index of the current machine instr.
698 unsigned MIIndex = getMBBStartIdx(MBB);
699 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
701 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
703 // Create intervals for live-ins to this BB first.
704 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
705 LE = MBB->livein_end(); LI != LE; ++LI) {
706 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
707 // Multiple live-ins can alias the same register.
708 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
709 if (!hasInterval(*AS))
710 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
714 // Skip over empty initial indices.
715 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
716 getInstructionFromIndex(MIIndex) == 0)
717 MIIndex += InstrSlots::NUM;
719 for (; MI != miEnd; ++MI) {
720 DOUT << MIIndex << "\t" << *MI;
723 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
724 MachineOperand &MO = MI->getOperand(i);
725 // handle register defs - build intervals
726 if (MO.isRegister() && MO.getReg() && MO.isDef()) {
727 handleRegisterDef(MBB, MI, MIIndex, MO, i);
731 MIIndex += InstrSlots::NUM;
733 // Skip over empty indices.
734 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
735 getInstructionFromIndex(MIIndex) == 0)
736 MIIndex += InstrSlots::NUM;
741 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
742 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
743 std::vector<IdxMBBPair>::const_iterator I =
744 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
747 while (I != Idx2MBBMap.end()) {
748 if (LR.end <= I->first)
750 MBBs.push_back(I->second);
757 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
758 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
760 return new LiveInterval(reg, Weight);
763 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
764 /// copy field and returns the source register that defines it.
765 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
769 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
770 return VNI->copy->getOperand(1).getReg();
771 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
772 return VNI->copy->getOperand(2).getReg();
773 unsigned SrcReg, DstReg;
774 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
776 assert(0 && "Unrecognized copy instruction!");
780 //===----------------------------------------------------------------------===//
781 // Register allocator hooks.
784 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
785 /// allow one) virtual register operand, then its uses are implicitly using
786 /// the register. Returns the virtual register.
787 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
788 MachineInstr *MI) const {
790 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
791 MachineOperand &MO = MI->getOperand(i);
792 if (!MO.isRegister() || !MO.isUse())
794 unsigned Reg = MO.getReg();
795 if (Reg == 0 || Reg == li.reg)
797 // FIXME: For now, only remat MI with at most one register operand.
799 "Can't rematerialize instruction with multiple register operand!");
808 /// isValNoAvailableAt - Return true if the val# of the specified interval
809 /// which reaches the given instruction also reaches the specified use index.
810 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
811 unsigned UseIdx) const {
812 unsigned Index = getInstructionIndex(MI);
813 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
814 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
815 return UI != li.end() && UI->valno == ValNo;
818 /// isReMaterializable - Returns true if the definition MI of the specified
819 /// val# of the specified interval is re-materializable.
820 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
821 const VNInfo *ValNo, MachineInstr *MI,
826 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
830 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
831 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
832 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
833 // this but remember this is not safe to fold into a two-address
835 // This is a load from fixed stack slot. It can be rematerialized.
838 // If the target-specific rules don't identify an instruction as
839 // being trivially rematerializable, use some target-independent
841 if (!MI->getDesc().isRematerializable() ||
842 !tii_->isTriviallyReMaterializable(MI)) {
843 if (!EnableAggressiveRemat)
846 // If the instruction accesses memory but the memoperands have been lost,
847 // we can't analyze it.
848 const TargetInstrDesc &TID = MI->getDesc();
849 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
852 // Avoid instructions obviously unsafe for remat.
853 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
856 // If the instruction accesses memory and the memory could be non-constant,
857 // assume the instruction is not rematerializable.
858 for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
859 E = MI->memoperands_end(); I != E; ++I) {
860 const MachineMemOperand &MMO = *I;
861 if (MMO.isVolatile() || MMO.isStore())
863 const Value *V = MMO.getValue();
866 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
867 if (!PSV->isConstant(mf_->getFrameInfo()))
869 } else if (!aa_->pointsToConstantMemory(V))
873 // If any of the registers accessed are non-constant, conservatively assume
874 // the instruction is not rematerializable.
876 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
877 const MachineOperand &MO = MI->getOperand(i);
878 if (MO.isRegister()) {
879 unsigned Reg = MO.getReg();
882 if (TargetRegisterInfo::isPhysicalRegister(Reg))
885 // Only allow one def, and that in the first operand.
886 if (MO.isDef() != (i == 0))
889 // Only allow constant-valued registers.
890 bool IsLiveIn = mri_->isLiveIn(Reg);
891 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
894 // For the def, it should be the only def.
895 if (MO.isDef() && (next(I) != E || IsLiveIn))
899 // Only allow one use other register use, as that's all the
900 // remat mechanisms support currently.
904 else if (Reg != ImpUse)
907 // For uses, there should be only one associate def.
908 if (I != E && (next(I) != E || IsLiveIn))
915 unsigned ImpUse = getReMatImplicitUse(li, MI);
917 const LiveInterval &ImpLi = getInterval(ImpUse);
918 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
919 re = mri_->use_end(); ri != re; ++ri) {
920 MachineInstr *UseMI = &*ri;
921 unsigned UseIdx = getInstructionIndex(UseMI);
922 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
924 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
931 /// isReMaterializable - Returns true if every definition of MI of every
932 /// val# of the specified interval is re-materializable.
933 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
935 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
937 const VNInfo *VNI = *i;
938 unsigned DefIdx = VNI->def;
940 continue; // Dead val#.
941 // Is the def for the val# rematerializable?
944 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
945 bool DefIsLoad = false;
947 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
954 /// FilterFoldedOps - Filter out two-address use operands. Return
955 /// true if it finds any issue with the operands that ought to prevent
957 static bool FilterFoldedOps(MachineInstr *MI,
958 SmallVector<unsigned, 2> &Ops,
960 SmallVector<unsigned, 2> &FoldOps) {
961 const TargetInstrDesc &TID = MI->getDesc();
964 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
965 unsigned OpIdx = Ops[i];
966 MachineOperand &MO = MI->getOperand(OpIdx);
967 // FIXME: fold subreg use.
971 MRInfo |= (unsigned)VirtRegMap::isMod;
973 // Filter out two-address use operand(s).
974 if (!MO.isImplicit() &&
975 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
976 MRInfo = VirtRegMap::isModRef;
979 MRInfo |= (unsigned)VirtRegMap::isRef;
981 FoldOps.push_back(OpIdx);
987 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
988 /// slot / to reg or any rematerialized load into ith operand of specified
989 /// MI. If it is successul, MI is updated with the newly created MI and
991 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
992 VirtRegMap &vrm, MachineInstr *DefMI,
994 SmallVector<unsigned, 2> &Ops,
995 bool isSS, int Slot, unsigned Reg) {
996 // If it is an implicit def instruction, just delete it.
997 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
998 RemoveMachineInstrFromMaps(MI);
999 vrm.RemoveMachineInstrFromMaps(MI);
1000 MI->eraseFromParent();
1005 // Filter the list of operand indexes that are to be folded. Abort if
1006 // any operand will prevent folding.
1007 unsigned MRInfo = 0;
1008 SmallVector<unsigned, 2> FoldOps;
1009 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1012 // The only time it's safe to fold into a two address instruction is when
1013 // it's folding reload and spill from / into a spill stack slot.
1014 if (DefMI && (MRInfo & VirtRegMap::isMod))
1017 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1018 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1020 // Remember this instruction uses the spill slot.
1021 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1023 // Attempt to fold the memory reference into the instruction. If
1024 // we can do this, we don't need to insert spill code.
1025 MachineBasicBlock &MBB = *MI->getParent();
1026 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1027 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1028 vrm.transferSpillPts(MI, fmi);
1029 vrm.transferRestorePts(MI, fmi);
1030 vrm.transferEmergencySpills(MI, fmi);
1032 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1033 mi2iMap_[fmi] = InstrIdx;
1034 MI = MBB.insert(MBB.erase(MI), fmi);
1041 /// canFoldMemoryOperand - Returns true if the specified load / store
1042 /// folding is possible.
1043 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1044 SmallVector<unsigned, 2> &Ops,
1046 // Filter the list of operand indexes that are to be folded. Abort if
1047 // any operand will prevent folding.
1048 unsigned MRInfo = 0;
1049 SmallVector<unsigned, 2> FoldOps;
1050 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1053 // It's only legal to remat for a use, not a def.
1054 if (ReMat && (MRInfo & VirtRegMap::isMod))
1057 return tii_->canFoldMemoryOperand(MI, FoldOps);
1060 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1061 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1062 for (LiveInterval::Ranges::const_iterator
1063 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1064 std::vector<IdxMBBPair>::const_iterator II =
1065 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1066 if (II == Idx2MBBMap.end())
1068 if (I->end > II->first) // crossing a MBB.
1070 MBBs.insert(II->second);
1071 if (MBBs.size() > 1)
1077 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1078 /// interval on to-be re-materialized operands of MI) with new register.
1079 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1080 MachineInstr *MI, unsigned NewVReg,
1082 // There is an implicit use. That means one of the other operand is
1083 // being remat'ed and the remat'ed instruction has li.reg as an
1084 // use operand. Make sure we rewrite that as well.
1085 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1086 MachineOperand &MO = MI->getOperand(i);
1087 if (!MO.isRegister())
1089 unsigned Reg = MO.getReg();
1090 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1092 if (!vrm.isReMaterialized(Reg))
1094 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1095 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1097 UseMO->setReg(NewVReg);
1101 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1102 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1103 bool LiveIntervals::
1104 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1105 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1106 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1107 unsigned Slot, int LdSlot,
1108 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1110 const TargetRegisterClass* rc,
1111 SmallVector<int, 4> &ReMatIds,
1112 const MachineLoopInfo *loopInfo,
1113 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1114 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1115 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1116 MachineBasicBlock *MBB = MI->getParent();
1117 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1118 bool CanFold = false;
1120 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1121 MachineOperand& mop = MI->getOperand(i);
1122 if (!mop.isRegister())
1124 unsigned Reg = mop.getReg();
1125 unsigned RegI = Reg;
1126 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1131 bool TryFold = !DefIsReMat;
1132 bool FoldSS = true; // Default behavior unless it's a remat.
1133 int FoldSlot = Slot;
1135 // If this is the rematerializable definition MI itself and
1136 // all of its uses are rematerialized, simply delete it.
1137 if (MI == ReMatOrigDefMI && CanDelete) {
1138 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1140 RemoveMachineInstrFromMaps(MI);
1141 vrm.RemoveMachineInstrFromMaps(MI);
1142 MI->eraseFromParent();
1146 // If def for this use can't be rematerialized, then try folding.
1147 // If def is rematerializable and it's a load, also try folding.
1148 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1150 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1156 // Scan all of the operands of this instruction rewriting operands
1157 // to use NewVReg instead of li.reg as appropriate. We do this for
1160 // 1. If the instr reads the same spilled vreg multiple times, we
1161 // want to reuse the NewVReg.
1162 // 2. If the instr is a two-addr instruction, we are required to
1163 // keep the src/dst regs pinned.
1165 // Keep track of whether we replace a use and/or def so that we can
1166 // create the spill interval with the appropriate range.
1168 HasUse = mop.isUse();
1169 HasDef = mop.isDef();
1170 SmallVector<unsigned, 2> Ops;
1172 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1173 const MachineOperand &MOj = MI->getOperand(j);
1174 if (!MOj.isRegister())
1176 unsigned RegJ = MOj.getReg();
1177 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1181 HasUse |= MOj.isUse();
1182 HasDef |= MOj.isDef();
1186 if (HasUse && !li.liveAt(getUseIndex(index)))
1187 // Must be defined by an implicit def. It should not be spilled. Note,
1188 // this is for correctness reason. e.g.
1189 // 8 %reg1024<def> = IMPLICIT_DEF
1190 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1191 // The live range [12, 14) are not part of the r1024 live interval since
1192 // it's defined by an implicit def. It will not conflicts with live
1193 // interval of r1025. Now suppose both registers are spilled, you can
1194 // easily see a situation where both registers are reloaded before
1195 // the INSERT_SUBREG and both target registers that would overlap.
1198 // Update stack slot spill weight if we are splitting.
1199 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1206 // Do not fold load / store here if we are splitting. We'll find an
1207 // optimal point to insert a load / store later.
1209 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1210 Ops, FoldSS, FoldSlot, Reg)) {
1211 // Folding the load/store can completely change the instruction in
1212 // unpredictable ways, rescan it from the beginning.
1216 if (isRemoved(MI)) {
1220 goto RestartInstruction;
1223 // We'll try to fold it later if it's profitable.
1224 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1228 // Create a new virtual register for the spill interval.
1229 bool CreatedNewVReg = false;
1231 NewVReg = mri_->createVirtualRegister(rc);
1233 CreatedNewVReg = true;
1235 mop.setReg(NewVReg);
1236 if (mop.isImplicit())
1237 rewriteImplicitOps(li, MI, NewVReg, vrm);
1239 // Reuse NewVReg for other reads.
1240 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1241 MachineOperand &mopj = MI->getOperand(Ops[j]);
1242 mopj.setReg(NewVReg);
1243 if (mopj.isImplicit())
1244 rewriteImplicitOps(li, MI, NewVReg, vrm);
1247 if (CreatedNewVReg) {
1249 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1250 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1251 // Each valnum may have its own remat id.
1252 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1254 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1256 if (!CanDelete || (HasUse && HasDef)) {
1257 // If this is a two-addr instruction then its use operands are
1258 // rematerializable but its def is not. It should be assigned a
1260 vrm.assignVirt2StackSlot(NewVReg, Slot);
1263 vrm.assignVirt2StackSlot(NewVReg, Slot);
1265 } else if (HasUse && HasDef &&
1266 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1267 // If this interval hasn't been assigned a stack slot (because earlier
1268 // def is a deleted remat def), do it now.
1269 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1270 vrm.assignVirt2StackSlot(NewVReg, Slot);
1273 // Re-matting an instruction with virtual register use. Add the
1274 // register as an implicit use on the use MI.
1275 if (DefIsReMat && ImpUse)
1276 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1278 // create a new register interval for this spill / remat.
1279 LiveInterval &nI = getOrCreateInterval(NewVReg);
1280 if (CreatedNewVReg) {
1281 NewLIs.push_back(&nI);
1282 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1284 vrm.setIsSplitFromReg(NewVReg, li.reg);
1288 if (CreatedNewVReg) {
1289 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1290 nI.getNextValue(~0U, 0, VNInfoAllocator));
1294 // Extend the split live interval to this def / use.
1295 unsigned End = getUseIndex(index)+1;
1296 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1297 nI.getValNumInfo(nI.getNumValNums()-1));
1303 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1304 nI.getNextValue(~0U, 0, VNInfoAllocator));
1309 DOUT << "\t\t\t\tAdded new interval: ";
1310 nI.print(DOUT, tri_);
1315 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1317 MachineBasicBlock *MBB, unsigned Idx) const {
1318 unsigned End = getMBBEndIdx(MBB);
1319 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1320 unsigned KillIdx = VNI->kills[j];
1321 if (KillIdx > Idx && KillIdx < End)
1327 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1328 /// during spilling.
1330 struct RewriteInfo {
1335 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1336 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1339 struct RewriteInfoCompare {
1340 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1341 return LHS.Index < RHS.Index;
1346 void LiveIntervals::
1347 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1348 LiveInterval::Ranges::const_iterator &I,
1349 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1350 unsigned Slot, int LdSlot,
1351 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1353 const TargetRegisterClass* rc,
1354 SmallVector<int, 4> &ReMatIds,
1355 const MachineLoopInfo *loopInfo,
1356 BitVector &SpillMBBs,
1357 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1358 BitVector &RestoreMBBs,
1359 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1360 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1361 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1362 bool AllCanFold = true;
1363 unsigned NewVReg = 0;
1364 unsigned start = getBaseIndex(I->start);
1365 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1367 // First collect all the def / use in this live range that will be rewritten.
1368 // Make sure they are sorted according to instruction index.
1369 std::vector<RewriteInfo> RewriteMIs;
1370 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1371 re = mri_->reg_end(); ri != re; ) {
1372 MachineInstr *MI = &*ri;
1373 MachineOperand &O = ri.getOperand();
1375 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1376 unsigned index = getInstructionIndex(MI);
1377 if (index < start || index >= end)
1379 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1380 // Must be defined by an implicit def. It should not be spilled. Note,
1381 // this is for correctness reason. e.g.
1382 // 8 %reg1024<def> = IMPLICIT_DEF
1383 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1384 // The live range [12, 14) are not part of the r1024 live interval since
1385 // it's defined by an implicit def. It will not conflicts with live
1386 // interval of r1025. Now suppose both registers are spilled, you can
1387 // easily see a situation where both registers are reloaded before
1388 // the INSERT_SUBREG and both target registers that would overlap.
1390 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1392 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1394 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1395 // Now rewrite the defs and uses.
1396 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1397 RewriteInfo &rwi = RewriteMIs[i];
1399 unsigned index = rwi.Index;
1400 bool MIHasUse = rwi.HasUse;
1401 bool MIHasDef = rwi.HasDef;
1402 MachineInstr *MI = rwi.MI;
1403 // If MI def and/or use the same register multiple times, then there
1404 // are multiple entries.
1405 unsigned NumUses = MIHasUse;
1406 while (i != e && RewriteMIs[i].MI == MI) {
1407 assert(RewriteMIs[i].Index == index);
1408 bool isUse = RewriteMIs[i].HasUse;
1409 if (isUse) ++NumUses;
1411 MIHasDef |= RewriteMIs[i].HasDef;
1414 MachineBasicBlock *MBB = MI->getParent();
1416 if (ImpUse && MI != ReMatDefMI) {
1417 // Re-matting an instruction with virtual register use. Update the
1418 // register interval's spill weight to HUGE_VALF to prevent it from
1420 LiveInterval &ImpLi = getInterval(ImpUse);
1421 ImpLi.weight = HUGE_VALF;
1424 unsigned MBBId = MBB->getNumber();
1425 unsigned ThisVReg = 0;
1427 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1428 if (NVI != MBBVRegsMap.end()) {
1429 ThisVReg = NVI->second;
1436 // It's better to start a new interval to avoid artifically
1437 // extend the new interval.
1438 if (MIHasDef && !MIHasUse) {
1439 MBBVRegsMap.erase(MBB->getNumber());
1445 bool IsNew = ThisVReg == 0;
1447 // This ends the previous live interval. If all of its def / use
1448 // can be folded, give it a low spill weight.
1449 if (NewVReg && TrySplit && AllCanFold) {
1450 LiveInterval &nI = getOrCreateInterval(NewVReg);
1457 bool HasDef = false;
1458 bool HasUse = false;
1459 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1460 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1461 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1462 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1463 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1464 if (!HasDef && !HasUse)
1467 AllCanFold &= CanFold;
1469 // Update weight of spill interval.
1470 LiveInterval &nI = getOrCreateInterval(NewVReg);
1472 // The spill weight is now infinity as it cannot be spilled again.
1473 nI.weight = HUGE_VALF;
1477 // Keep track of the last def and first use in each MBB.
1479 if (MI != ReMatOrigDefMI || !CanDelete) {
1480 bool HasKill = false;
1482 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1484 // If this is a two-address code, then this index starts a new VNInfo.
1485 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1487 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1489 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1490 SpillIdxes.find(MBBId);
1492 if (SII == SpillIdxes.end()) {
1493 std::vector<SRInfo> S;
1494 S.push_back(SRInfo(index, NewVReg, true));
1495 SpillIdxes.insert(std::make_pair(MBBId, S));
1496 } else if (SII->second.back().vreg != NewVReg) {
1497 SII->second.push_back(SRInfo(index, NewVReg, true));
1498 } else if ((int)index > SII->second.back().index) {
1499 // If there is an earlier def and this is a two-address
1500 // instruction, then it's not possible to fold the store (which
1501 // would also fold the load).
1502 SRInfo &Info = SII->second.back();
1504 Info.canFold = !HasUse;
1506 SpillMBBs.set(MBBId);
1507 } else if (SII != SpillIdxes.end() &&
1508 SII->second.back().vreg == NewVReg &&
1509 (int)index > SII->second.back().index) {
1510 // There is an earlier def that's not killed (must be two-address).
1511 // The spill is no longer needed.
1512 SII->second.pop_back();
1513 if (SII->second.empty()) {
1514 SpillIdxes.erase(MBBId);
1515 SpillMBBs.reset(MBBId);
1522 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1523 SpillIdxes.find(MBBId);
1524 if (SII != SpillIdxes.end() &&
1525 SII->second.back().vreg == NewVReg &&
1526 (int)index > SII->second.back().index)
1527 // Use(s) following the last def, it's not safe to fold the spill.
1528 SII->second.back().canFold = false;
1529 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1530 RestoreIdxes.find(MBBId);
1531 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1532 // If we are splitting live intervals, only fold if it's the first
1533 // use and there isn't another use later in the MBB.
1534 RII->second.back().canFold = false;
1536 // Only need a reload if there isn't an earlier def / use.
1537 if (RII == RestoreIdxes.end()) {
1538 std::vector<SRInfo> Infos;
1539 Infos.push_back(SRInfo(index, NewVReg, true));
1540 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1542 RII->second.push_back(SRInfo(index, NewVReg, true));
1544 RestoreMBBs.set(MBBId);
1548 // Update spill weight.
1549 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1550 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1553 if (NewVReg && TrySplit && AllCanFold) {
1554 // If all of its def / use can be folded, give it a low spill weight.
1555 LiveInterval &nI = getOrCreateInterval(NewVReg);
1560 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1561 BitVector &RestoreMBBs,
1562 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1563 if (!RestoreMBBs[Id])
1565 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1566 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1567 if (Restores[i].index == index &&
1568 Restores[i].vreg == vr &&
1569 Restores[i].canFold)
1574 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1575 BitVector &RestoreMBBs,
1576 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1577 if (!RestoreMBBs[Id])
1579 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1580 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1581 if (Restores[i].index == index && Restores[i].vreg)
1582 Restores[i].index = -1;
1585 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1586 /// spilled and create empty intervals for their uses.
1588 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1589 const TargetRegisterClass* rc,
1590 std::vector<LiveInterval*> &NewLIs) {
1591 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1592 re = mri_->reg_end(); ri != re; ) {
1593 MachineOperand &O = ri.getOperand();
1594 MachineInstr *MI = &*ri;
1597 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1598 "Register def was not rewritten?");
1599 RemoveMachineInstrFromMaps(MI);
1600 vrm.RemoveMachineInstrFromMaps(MI);
1601 MI->eraseFromParent();
1603 // This must be an use of an implicit_def so it's not part of the live
1604 // interval. Create a new empty live interval for it.
1605 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1606 unsigned NewVReg = mri_->createVirtualRegister(rc);
1608 vrm.setIsImplicitlyDefined(NewVReg);
1609 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1610 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1611 MachineOperand &MO = MI->getOperand(i);
1612 if (MO.isRegister() && MO.getReg() == li.reg)
1621 bool operator()(LiveInterval* A, LiveInterval* B) {
1622 return A->beginNumber() < B->beginNumber();
1627 std::vector<LiveInterval*> LiveIntervals::
1628 addIntervalsForSpillsFast(const LiveInterval &li,
1629 const MachineLoopInfo *loopInfo,
1630 VirtRegMap &vrm, float& SSWeight) {
1631 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1633 std::vector<LiveInterval*> added;
1635 assert(li.weight != HUGE_VALF &&
1636 "attempt to spill already spilled interval!");
1638 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1642 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1646 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1647 while (RI != mri_->reg_end()) {
1648 MachineInstr* MI = &*RI;
1650 SmallVector<unsigned, 2> Indices;
1651 bool HasUse = false;
1652 bool HasDef = false;
1654 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1655 MachineOperand& mop = MI->getOperand(i);
1656 if (!mop.isRegister() || mop.getReg() != li.reg) continue;
1658 HasUse |= MI->getOperand(i).isUse();
1659 HasDef |= MI->getOperand(i).isDef();
1661 Indices.push_back(i);
1664 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1665 Indices, true, slot, li.reg)) {
1666 unsigned NewVReg = mri_->createVirtualRegister(rc);
1668 vrm.assignVirt2StackSlot(NewVReg, slot);
1670 // create a new register for this spill
1671 LiveInterval &nI = getOrCreateInterval(NewVReg);
1673 // the spill weight is now infinity as it
1674 // cannot be spilled again
1675 nI.weight = HUGE_VALF;
1677 // Rewrite register operands to use the new vreg.
1678 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1679 E = Indices.end(); I != E; ++I) {
1680 MI->getOperand(*I).setReg(NewVReg);
1682 if (MI->getOperand(*I).isUse())
1683 MI->getOperand(*I).setIsKill(true);
1686 // Fill in the new live interval.
1687 unsigned index = getInstructionIndex(MI);
1689 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1690 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1693 vrm.addRestorePoint(NewVReg, MI);
1696 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1697 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1700 vrm.addSpillPoint(NewVReg, true, MI);
1703 added.push_back(&nI);
1705 DOUT << "\t\t\t\tadded new interval: ";
1709 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1712 SSWeight += getSpillWeight(true, true, loopDepth);
1714 SSWeight += getSpillWeight(false, true, loopDepth);
1716 SSWeight += getSpillWeight(true, false, loopDepth);
1720 RI = mri_->reg_begin(li.reg);
1723 // Clients expect the new intervals to be returned in sorted order.
1724 std::sort(added.begin(), added.end(), LISorter());
1729 std::vector<LiveInterval*> LiveIntervals::
1730 addIntervalsForSpills(const LiveInterval &li,
1731 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1734 if (EnableFastSpilling)
1735 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1737 assert(li.weight != HUGE_VALF &&
1738 "attempt to spill already spilled interval!");
1740 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1741 li.print(DOUT, tri_);
1744 // Spill slot weight.
1747 // Each bit specify whether it a spill is required in the MBB.
1748 BitVector SpillMBBs(mf_->getNumBlockIDs());
1749 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1750 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1751 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1752 DenseMap<unsigned,unsigned> MBBVRegsMap;
1753 std::vector<LiveInterval*> NewLIs;
1754 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1756 unsigned NumValNums = li.getNumValNums();
1757 SmallVector<MachineInstr*, 4> ReMatDefs;
1758 ReMatDefs.resize(NumValNums, NULL);
1759 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1760 ReMatOrigDefs.resize(NumValNums, NULL);
1761 SmallVector<int, 4> ReMatIds;
1762 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1763 BitVector ReMatDelete(NumValNums);
1764 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1766 // Spilling a split live interval. It cannot be split any further. Also,
1767 // it's also guaranteed to be a single val# / range interval.
1768 if (vrm.getPreSplitReg(li.reg)) {
1769 vrm.setIsSplitFromReg(li.reg, 0);
1770 // Unset the split kill marker on the last use.
1771 unsigned KillIdx = vrm.getKillPoint(li.reg);
1773 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1774 assert(KillMI && "Last use disappeared?");
1775 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1776 assert(KillOp != -1 && "Last use disappeared?");
1777 KillMI->getOperand(KillOp).setIsKill(false);
1779 vrm.removeKillPoint(li.reg);
1780 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1781 Slot = vrm.getStackSlot(li.reg);
1782 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1783 MachineInstr *ReMatDefMI = DefIsReMat ?
1784 vrm.getReMaterializedMI(li.reg) : NULL;
1786 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1787 bool isLoad = isLoadSS ||
1788 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1789 bool IsFirstRange = true;
1790 for (LiveInterval::Ranges::const_iterator
1791 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1792 // If this is a split live interval with multiple ranges, it means there
1793 // are two-address instructions that re-defined the value. Only the
1794 // first def can be rematerialized!
1796 // Note ReMatOrigDefMI has already been deleted.
1797 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1798 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1799 false, vrm, rc, ReMatIds, loopInfo,
1800 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1801 MBBVRegsMap, NewLIs, SSWeight);
1803 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1804 Slot, 0, false, false, false,
1805 false, vrm, rc, ReMatIds, loopInfo,
1806 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1807 MBBVRegsMap, NewLIs, SSWeight);
1809 IsFirstRange = false;
1812 SSWeight = 0.0f; // Already accounted for when split.
1813 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1817 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1818 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1822 bool NeedStackSlot = false;
1823 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1825 const VNInfo *VNI = *i;
1826 unsigned VN = VNI->id;
1827 unsigned DefIdx = VNI->def;
1829 continue; // Dead val#.
1830 // Is the def for the val# rematerializable?
1831 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1832 ? 0 : getInstructionFromIndex(DefIdx);
1834 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1835 // Remember how to remat the def of this val#.
1836 ReMatOrigDefs[VN] = ReMatDefMI;
1837 // Original def may be modified so we have to make a copy here.
1838 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1839 ClonedMIs.push_back(Clone);
1840 ReMatDefs[VN] = Clone;
1842 bool CanDelete = true;
1843 if (VNI->hasPHIKill) {
1844 // A kill is a phi node, not all of its uses can be rematerialized.
1845 // It must not be deleted.
1847 // Need a stack slot if there is any live range where uses cannot be
1849 NeedStackSlot = true;
1852 ReMatDelete.set(VN);
1854 // Need a stack slot if there is any live range where uses cannot be
1856 NeedStackSlot = true;
1860 // One stack slot per live interval.
1861 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1862 Slot = vrm.assignVirt2StackSlot(li.reg);
1864 // Create new intervals and rewrite defs and uses.
1865 for (LiveInterval::Ranges::const_iterator
1866 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1867 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1868 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1869 bool DefIsReMat = ReMatDefMI != NULL;
1870 bool CanDelete = ReMatDelete[I->valno->id];
1872 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1873 bool isLoad = isLoadSS ||
1874 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1875 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1876 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1877 CanDelete, vrm, rc, ReMatIds, loopInfo,
1878 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1879 MBBVRegsMap, NewLIs, SSWeight);
1882 // Insert spills / restores if we are splitting.
1884 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1888 SmallPtrSet<LiveInterval*, 4> AddedKill;
1889 SmallVector<unsigned, 2> Ops;
1890 if (NeedStackSlot) {
1891 int Id = SpillMBBs.find_first();
1893 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1894 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1895 std::vector<SRInfo> &spills = SpillIdxes[Id];
1896 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1897 int index = spills[i].index;
1898 unsigned VReg = spills[i].vreg;
1899 LiveInterval &nI = getOrCreateInterval(VReg);
1900 bool isReMat = vrm.isReMaterialized(VReg);
1901 MachineInstr *MI = getInstructionFromIndex(index);
1902 bool CanFold = false;
1903 bool FoundUse = false;
1905 if (spills[i].canFold) {
1907 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1908 MachineOperand &MO = MI->getOperand(j);
1909 if (!MO.isRegister() || MO.getReg() != VReg)
1916 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1917 RestoreMBBs, RestoreIdxes))) {
1918 // MI has two-address uses of the same register. If the use
1919 // isn't the first and only use in the BB, then we can't fold
1920 // it. FIXME: Move this to rewriteInstructionsForSpills.
1927 // Fold the store into the def if possible.
1928 bool Folded = false;
1929 if (CanFold && !Ops.empty()) {
1930 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1933 // Also folded uses, do not issue a load.
1934 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1935 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1937 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1941 // Otherwise tell the spiller to issue a spill.
1943 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1944 bool isKill = LR->end == getStoreIndex(index);
1945 if (!MI->registerDefIsDead(nI.reg))
1946 // No need to spill a dead def.
1947 vrm.addSpillPoint(VReg, isKill, MI);
1949 AddedKill.insert(&nI);
1952 // Update spill slot weight.
1954 SSWeight += getSpillWeight(true, false, loopDepth);
1956 Id = SpillMBBs.find_next(Id);
1960 int Id = RestoreMBBs.find_first();
1962 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1963 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1965 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1966 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1967 int index = restores[i].index;
1970 unsigned VReg = restores[i].vreg;
1971 LiveInterval &nI = getOrCreateInterval(VReg);
1972 bool isReMat = vrm.isReMaterialized(VReg);
1973 MachineInstr *MI = getInstructionFromIndex(index);
1974 bool CanFold = false;
1976 if (restores[i].canFold) {
1978 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1979 MachineOperand &MO = MI->getOperand(j);
1980 if (!MO.isRegister() || MO.getReg() != VReg)
1984 // If this restore were to be folded, it would have been folded
1993 // Fold the load into the use if possible.
1994 bool Folded = false;
1995 if (CanFold && !Ops.empty()) {
1997 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1999 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2001 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2002 // If the rematerializable def is a load, also try to fold it.
2003 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2004 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2005 Ops, isLoadSS, LdSlot, VReg);
2006 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2008 // Re-matting an instruction with virtual register use. Add the
2009 // register as an implicit use on the use MI and update the register
2010 // interval's spill weight to HUGE_VALF to prevent it from being
2012 LiveInterval &ImpLi = getInterval(ImpUse);
2013 ImpLi.weight = HUGE_VALF;
2014 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2018 // If folding is not possible / failed, then tell the spiller to issue a
2019 // load / rematerialization for us.
2021 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2023 vrm.addRestorePoint(VReg, MI);
2025 // Update spill slot weight.
2027 SSWeight += getSpillWeight(false, true, loopDepth);
2029 Id = RestoreMBBs.find_next(Id);
2032 // Finalize intervals: add kills, finalize spill weights, and filter out
2034 std::vector<LiveInterval*> RetNewLIs;
2035 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2036 LiveInterval *LI = NewLIs[i];
2038 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2039 if (!AddedKill.count(LI)) {
2040 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2041 unsigned LastUseIdx = getBaseIndex(LR->end);
2042 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2043 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2044 assert(UseIdx != -1);
2045 if (LastUse->getOperand(UseIdx).isImplicit() ||
2046 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2047 LastUse->getOperand(UseIdx).setIsKill();
2048 vrm.addKillPoint(LI->reg, LastUseIdx);
2051 RetNewLIs.push_back(LI);
2055 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2059 /// hasAllocatableSuperReg - Return true if the specified physical register has
2060 /// any super register that's allocatable.
2061 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2062 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2063 if (allocatableRegs_[*AS] && hasInterval(*AS))
2068 /// getRepresentativeReg - Find the largest super register of the specified
2069 /// physical register.
2070 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2071 // Find the largest super-register that is allocatable.
2072 unsigned BestReg = Reg;
2073 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2074 unsigned SuperReg = *AS;
2075 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2083 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2084 /// specified interval that conflicts with the specified physical register.
2085 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2086 unsigned PhysReg) const {
2087 unsigned NumConflicts = 0;
2088 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2089 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2090 E = mri_->reg_end(); I != E; ++I) {
2091 MachineOperand &O = I.getOperand();
2092 MachineInstr *MI = O.getParent();
2093 unsigned Index = getInstructionIndex(MI);
2094 if (pli.liveAt(Index))
2097 return NumConflicts;
2100 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2101 /// around all defs and uses of the specified interval.
2102 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2103 unsigned PhysReg, VirtRegMap &vrm) {
2104 unsigned SpillReg = getRepresentativeReg(PhysReg);
2106 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2107 // If there are registers which alias PhysReg, but which are not a
2108 // sub-register of the chosen representative super register. Assert
2109 // since we can't handle it yet.
2110 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2111 tri_->isSuperRegister(*AS, SpillReg));
2113 LiveInterval &pli = getInterval(SpillReg);
2114 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2115 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2116 E = mri_->reg_end(); I != E; ++I) {
2117 MachineOperand &O = I.getOperand();
2118 MachineInstr *MI = O.getParent();
2119 if (SeenMIs.count(MI))
2122 unsigned Index = getInstructionIndex(MI);
2123 if (pli.liveAt(Index)) {
2124 vrm.addEmergencySpill(SpillReg, MI);
2125 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2126 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2127 if (!hasInterval(*AS))
2129 LiveInterval &spli = getInterval(*AS);
2130 if (spli.liveAt(Index))
2131 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2137 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2138 MachineInstr* startInst) {
2139 LiveInterval& Interval = getOrCreateInterval(reg);
2140 VNInfo* VN = Interval.getNextValue(
2141 getInstructionIndex(startInst) + InstrSlots::DEF,
2142 startInst, getVNInfoAllocator());
2143 VN->hasPHIKill = true;
2144 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2145 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2146 getMBBEndIdx(startInst->getParent()) + 1, VN);
2147 Interval.addRange(LR);