1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 STATISTIC(numIntervals, "Number of original intervals");
53 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addRequired<AliasAnalysis>();
62 AU.addPreserved<AliasAnalysis>();
63 AU.addPreserved<LiveVariables>();
64 AU.addRequired<LiveVariables>();
65 AU.addPreservedID(MachineLoopInfoID);
66 AU.addPreservedID(MachineDominatorsID);
67 AU.addPreservedID(PHIEliminationID);
68 AU.addRequiredID(PHIEliminationID);
69 AU.addRequiredID(TwoAddressInstructionPassID);
70 MachineFunctionPass::getAnalysisUsage(AU);
73 void LiveIntervals::releaseMemory() {
79 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
80 VNInfoAllocator.Reset();
81 while (!ClonedMIs.empty()) {
82 MachineInstr *MI = ClonedMIs.back();
84 mf_->DeleteMachineInstr(MI);
88 void LiveIntervals::computeNumbering() {
89 Index2MiMap OldI2MI = i2miMap_;
90 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99 // Number MachineInstrs and MachineBasicBlocks.
100 // Initialize MBB indexes to a sentinal.
101 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
103 unsigned MIIndex = 0;
104 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
106 unsigned StartIdx = MIIndex;
108 // Insert an empty slot at the beginning of each block.
109 MIIndex += InstrSlots::NUM;
110 i2miMap_.push_back(0);
112 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
114 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
115 assert(inserted && "multiple MachineInstr -> index mappings");
116 i2miMap_.push_back(I);
117 MIIndex += InstrSlots::NUM;
120 // Insert an empty slot after every instruction.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
125 // Set the MBB2IdxMap entry for this MBB.
126 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
129 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
131 if (!OldI2MI.empty())
132 for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
133 for (LiveInterval::iterator LI = OI->second.begin(),
134 LE = OI->second.end(); LI != LE; ++LI) {
136 // Remap the start index of the live range to the corresponding new
137 // number, or our best guess at what it _should_ correspond to if the
138 // original instruction has been erased. This is either the following
139 // instruction or its predecessor.
140 unsigned index = LI->start / InstrSlots::NUM;
141 unsigned offset = LI->start % InstrSlots::NUM;
142 if (offset == InstrSlots::LOAD) {
143 std::vector<IdxMBBPair>::const_iterator I =
144 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
145 // Take the pair containing the index
146 std::vector<IdxMBBPair>::const_iterator J =
147 ((I != OldI2MBB.end() && I->first > index) ||
148 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
150 LI->start = getMBBStartIdx(J->second);
152 LI->start = mi2iMap_[OldI2MI[index]] + offset;
155 // Remap the ending index in the same way that we remapped the start,
156 // except for the final step where we always map to the immediately
157 // following instruction.
158 index = (LI->end - 1) / InstrSlots::NUM;
159 offset = LI->end % InstrSlots::NUM;
160 if (offset == InstrSlots::USE) {
161 std::vector<IdxMBBPair>::const_iterator I =
162 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
163 // Take the pair containing the index
164 std::vector<IdxMBBPair>::const_iterator J =
165 ((I != OldI2MBB.end() && I->first > index) ||
166 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
168 LI->end = getMBBEndIdx(J->second) + 1;
170 unsigned idx = index;
171 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
173 if (index != OldI2MI.size())
174 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
176 LI->end = InstrSlots::NUM * i2miMap_.size();
179 // Remap the VNInfo def index, which works the same as the
180 // start indices above.
181 VNInfo* vni = LI->valno;
182 index = vni->def / InstrSlots::NUM;
183 offset = vni->def % InstrSlots::NUM;
184 if (offset == InstrSlots::LOAD) {
185 std::vector<IdxMBBPair>::const_iterator I =
186 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
187 // Take the pair containing the index
188 std::vector<IdxMBBPair>::const_iterator J =
189 ((I != OldI2MBB.end() && I->first > index) ||
190 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
192 vni->def = getMBBStartIdx(J->second);
195 vni->def = mi2iMap_[OldI2MI[index]] + offset;
198 // Remap the VNInfo kill indices, which works the same as
199 // the end indices above.
200 for (size_t i = 0; i < vni->kills.size(); ++i) {
201 index = (vni->kills[i]-1) / InstrSlots::NUM;
202 offset = vni->kills[i] % InstrSlots::NUM;
203 if (offset == InstrSlots::USE) {
204 std::vector<IdxMBBPair>::const_iterator I =
205 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
206 // Take the pair containing the index
207 std::vector<IdxMBBPair>::const_iterator J =
208 ((I != OldI2MBB.end() && I->first > index) ||
209 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
211 vni->kills[i] = getMBBEndIdx(J->second) + 1;
213 unsigned idx = index;
214 while (!OldI2MI[index]) ++index;
215 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
217 if (index != OldI2MI.size())
218 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
219 (idx == index ? offset : 0);
221 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
227 /// runOnMachineFunction - Register allocate the whole function
229 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
231 mri_ = &mf_->getRegInfo();
232 tm_ = &fn.getTarget();
233 tri_ = tm_->getRegisterInfo();
234 tii_ = tm_->getInstrInfo();
235 aa_ = &getAnalysis<AliasAnalysis>();
236 lv_ = &getAnalysis<LiveVariables>();
237 allocatableRegs_ = tri_->getAllocatableSet(fn);
242 numIntervals += getNumIntervals();
244 DOUT << "********** INTERVALS **********\n";
245 for (iterator I = begin(), E = end(); I != E; ++I) {
246 I->second.print(DOUT, tri_);
250 numIntervalsAfter += getNumIntervals();
255 /// print - Implement the dump method.
256 void LiveIntervals::print(std::ostream &O, const Module* ) const {
257 O << "********** INTERVALS **********\n";
258 for (const_iterator I = begin(), E = end(); I != E; ++I) {
259 I->second.print(O, tri_);
263 O << "********** MACHINEINSTRS **********\n";
264 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
265 mbbi != mbbe; ++mbbi) {
266 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
267 for (MachineBasicBlock::iterator mii = mbbi->begin(),
268 mie = mbbi->end(); mii != mie; ++mii) {
269 O << getInstructionIndex(mii) << '\t' << *mii;
274 /// conflictsWithPhysRegDef - Returns true if the specified register
275 /// is defined during the duration of the specified interval.
276 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
277 VirtRegMap &vrm, unsigned reg) {
278 for (LiveInterval::Ranges::const_iterator
279 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
280 for (unsigned index = getBaseIndex(I->start),
281 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
282 index += InstrSlots::NUM) {
283 // skip deleted instructions
284 while (index != end && !getInstructionFromIndex(index))
285 index += InstrSlots::NUM;
286 if (index == end) break;
288 MachineInstr *MI = getInstructionFromIndex(index);
289 unsigned SrcReg, DstReg;
290 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
291 if (SrcReg == li.reg || DstReg == li.reg)
293 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
294 MachineOperand& mop = MI->getOperand(i);
295 if (!mop.isRegister())
297 unsigned PhysReg = mop.getReg();
298 if (PhysReg == 0 || PhysReg == li.reg)
300 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
301 if (!vrm.hasPhys(PhysReg))
303 PhysReg = vrm.getPhys(PhysReg);
305 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
314 void LiveIntervals::printRegName(unsigned reg) const {
315 if (TargetRegisterInfo::isPhysicalRegister(reg))
316 cerr << tri_->getName(reg);
318 cerr << "%reg" << reg;
321 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
322 MachineBasicBlock::iterator mi,
323 unsigned MIIdx, MachineOperand& MO,
325 LiveInterval &interval) {
326 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
327 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
329 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
330 DOUT << "is a implicit_def\n";
334 // Virtual registers may be defined multiple times (due to phi
335 // elimination and 2-addr elimination). Much of what we do only has to be
336 // done once for the vreg. We use an empty interval to detect the first
337 // time we see a vreg.
338 if (interval.empty()) {
339 // Get the Idx of the defining instructions.
340 unsigned defIndex = getDefIndex(MIIdx);
342 MachineInstr *CopyMI = NULL;
343 unsigned SrcReg, DstReg;
344 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
345 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
346 tii_->isMoveInstr(*mi, SrcReg, DstReg))
348 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
350 assert(ValNo->id == 0 && "First value in interval is not 0?");
352 // Loop over all of the blocks that the vreg is defined in. There are
353 // two cases we have to handle here. The most common case is a vreg
354 // whose lifetime is contained within a basic block. In this case there
355 // will be a single kill, in MBB, which comes after the definition.
356 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
357 // FIXME: what about dead vars?
359 if (vi.Kills[0] != mi)
360 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
362 killIdx = defIndex+1;
364 // If the kill happens after the definition, we have an intra-block
366 if (killIdx > defIndex) {
367 assert(vi.AliveBlocks.none() &&
368 "Shouldn't be alive across any blocks!");
369 LiveRange LR(defIndex, killIdx, ValNo);
370 interval.addRange(LR);
371 DOUT << " +" << LR << "\n";
372 interval.addKill(ValNo, killIdx);
377 // The other case we handle is when a virtual register lives to the end
378 // of the defining block, potentially live across some blocks, then is
379 // live into some number of blocks, but gets killed. Start by adding a
380 // range that goes from this definition to the end of the defining block.
381 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
382 DOUT << " +" << NewLR;
383 interval.addRange(NewLR);
385 // Iterate over all of the blocks that the variable is completely
386 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
388 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
389 if (vi.AliveBlocks[i]) {
390 LiveRange LR(getMBBStartIdx(i),
391 getMBBEndIdx(i)+1, // MBB ends at -1.
393 interval.addRange(LR);
398 // Finally, this virtual register is live from the start of any killing
399 // block to the 'use' slot of the killing instruction.
400 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
401 MachineInstr *Kill = vi.Kills[i];
402 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
403 LiveRange LR(getMBBStartIdx(Kill->getParent()),
405 interval.addRange(LR);
406 interval.addKill(ValNo, killIdx);
411 // If this is the second time we see a virtual register definition, it
412 // must be due to phi elimination or two addr elimination. If this is
413 // the result of two address elimination, then the vreg is one of the
414 // def-and-use register operand.
415 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
416 // If this is a two-address definition, then we have already processed
417 // the live range. The only problem is that we didn't realize there
418 // are actually two values in the live interval. Because of this we
419 // need to take the LiveRegion that defines this register and split it
421 assert(interval.containsOneValue());
422 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
423 unsigned RedefIndex = getDefIndex(MIIdx);
425 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
426 VNInfo *OldValNo = OldLR->valno;
428 // Delete the initial value, which should be short and continuous,
429 // because the 2-addr copy must be in the same MBB as the redef.
430 interval.removeRange(DefIndex, RedefIndex);
432 // Two-address vregs should always only be redefined once. This means
433 // that at this point, there should be exactly one value number in it.
434 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
436 // The new value number (#1) is defined by the instruction we claimed
438 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
441 // Value#0 is now defined by the 2-addr instruction.
442 OldValNo->def = RedefIndex;
445 // Add the new live interval which replaces the range for the input copy.
446 LiveRange LR(DefIndex, RedefIndex, ValNo);
447 DOUT << " replace range with " << LR;
448 interval.addRange(LR);
449 interval.addKill(ValNo, RedefIndex);
451 // If this redefinition is dead, we need to add a dummy unit live
452 // range covering the def slot.
454 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
457 interval.print(DOUT, tri_);
460 // Otherwise, this must be because of phi elimination. If this is the
461 // first redefinition of the vreg that we have seen, go back and change
462 // the live range in the PHI block to be a different value number.
463 if (interval.containsOneValue()) {
464 assert(vi.Kills.size() == 1 &&
465 "PHI elimination vreg should have one kill, the PHI itself!");
467 // Remove the old range that we now know has an incorrect number.
468 VNInfo *VNI = interval.getValNumInfo(0);
469 MachineInstr *Killer = vi.Kills[0];
470 unsigned Start = getMBBStartIdx(Killer->getParent());
471 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
472 DOUT << " Removing [" << Start << "," << End << "] from: ";
473 interval.print(DOUT, tri_); DOUT << "\n";
474 interval.removeRange(Start, End);
475 VNI->hasPHIKill = true;
476 DOUT << " RESULT: "; interval.print(DOUT, tri_);
478 // Replace the interval with one of a NEW value number. Note that this
479 // value number isn't actually defined by an instruction, weird huh? :)
480 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
481 DOUT << " replace range with " << LR;
482 interval.addRange(LR);
483 interval.addKill(LR.valno, End);
484 DOUT << " RESULT: "; interval.print(DOUT, tri_);
487 // In the case of PHI elimination, each variable definition is only
488 // live until the end of the block. We've already taken care of the
489 // rest of the live range.
490 unsigned defIndex = getDefIndex(MIIdx);
493 MachineInstr *CopyMI = NULL;
494 unsigned SrcReg, DstReg;
495 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
496 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
497 tii_->isMoveInstr(*mi, SrcReg, DstReg))
499 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
501 unsigned killIndex = getMBBEndIdx(mbb) + 1;
502 LiveRange LR(defIndex, killIndex, ValNo);
503 interval.addRange(LR);
504 interval.addKill(ValNo, killIndex);
505 ValNo->hasPHIKill = true;
513 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
514 MachineBasicBlock::iterator mi,
517 LiveInterval &interval,
518 MachineInstr *CopyMI) {
519 // A physical register cannot be live across basic block, so its
520 // lifetime must end somewhere in its defining basic block.
521 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
523 unsigned baseIndex = MIIdx;
524 unsigned start = getDefIndex(baseIndex);
525 unsigned end = start;
527 // If it is not used after definition, it is considered dead at
528 // the instruction defining it. Hence its interval is:
529 // [defSlot(def), defSlot(def)+1)
532 end = getDefIndex(start) + 1;
536 // If it is not dead on definition, it must be killed by a
537 // subsequent instruction. Hence its interval is:
538 // [defSlot(def), useSlot(kill)+1)
539 baseIndex += InstrSlots::NUM;
540 while (++mi != MBB->end()) {
541 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
542 getInstructionFromIndex(baseIndex) == 0)
543 baseIndex += InstrSlots::NUM;
544 if (mi->killsRegister(interval.reg, tri_)) {
546 end = getUseIndex(baseIndex) + 1;
548 } else if (mi->modifiesRegister(interval.reg, tri_)) {
549 // Another instruction redefines the register before it is ever read.
550 // Then the register is essentially dead at the instruction that defines
551 // it. Hence its interval is:
552 // [defSlot(def), defSlot(def)+1)
554 end = getDefIndex(start) + 1;
558 baseIndex += InstrSlots::NUM;
561 // The only case we should have a dead physreg here without a killing or
562 // instruction where we know it's dead is if it is live-in to the function
564 assert(!CopyMI && "physreg was not killed in defining block!");
565 end = getDefIndex(start) + 1; // It's dead.
568 assert(start < end && "did not find end of interval?");
570 // Already exists? Extend old live interval.
571 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
572 VNInfo *ValNo = (OldLR != interval.end())
573 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
574 LiveRange LR(start, end, ValNo);
575 interval.addRange(LR);
576 interval.addKill(LR.valno, end);
577 DOUT << " +" << LR << '\n';
580 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
581 MachineBasicBlock::iterator MI,
585 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
586 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
587 getOrCreateInterval(MO.getReg()));
588 else if (allocatableRegs_[MO.getReg()]) {
589 MachineInstr *CopyMI = NULL;
590 unsigned SrcReg, DstReg;
591 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
592 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
593 tii_->isMoveInstr(*MI, SrcReg, DstReg))
595 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
596 getOrCreateInterval(MO.getReg()), CopyMI);
597 // Def of a register also defines its sub-registers.
598 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
599 // If MI also modifies the sub-register explicitly, avoid processing it
600 // more than once. Do not pass in TRI here so it checks for exact match.
601 if (!MI->modifiesRegister(*AS))
602 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
603 getOrCreateInterval(*AS), 0);
607 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
609 LiveInterval &interval, bool isAlias) {
610 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
612 // Look for kills, if it reaches a def before it's killed, then it shouldn't
613 // be considered a livein.
614 MachineBasicBlock::iterator mi = MBB->begin();
615 unsigned baseIndex = MIIdx;
616 unsigned start = baseIndex;
617 unsigned end = start;
618 while (mi != MBB->end()) {
619 if (mi->killsRegister(interval.reg, tri_)) {
621 end = getUseIndex(baseIndex) + 1;
623 } else if (mi->modifiesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
629 end = getDefIndex(start) + 1;
633 baseIndex += InstrSlots::NUM;
634 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
635 getInstructionFromIndex(baseIndex) == 0)
636 baseIndex += InstrSlots::NUM;
641 // Live-in register might not be used at all.
645 end = getDefIndex(MIIdx) + 1;
647 DOUT << " live through";
652 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
653 interval.addRange(LR);
654 interval.addKill(LR.valno, end);
655 DOUT << " +" << LR << '\n';
658 /// computeIntervals - computes the live intervals for virtual
659 /// registers. for some ordering of the machine instructions [1,N] a
660 /// live interval is an interval [i, j) where 1 <= i <= j < N for
661 /// which a variable is live
662 void LiveIntervals::computeIntervals() {
663 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
664 << "********** Function: "
665 << ((Value*)mf_->getFunction())->getName() << '\n';
666 // Track the index of the current machine instr.
667 unsigned MIIndex = 0;
669 // Skip over empty initial indices.
670 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
671 getInstructionFromIndex(MIIndex) == 0)
672 MIIndex += InstrSlots::NUM;
674 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
676 MachineBasicBlock *MBB = MBBI;
677 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
679 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
681 // Create intervals for live-ins to this BB first.
682 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
683 LE = MBB->livein_end(); LI != LE; ++LI) {
684 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
685 // Multiple live-ins can alias the same register.
686 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
687 if (!hasInterval(*AS))
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
692 for (; MI != miEnd; ++MI) {
693 DOUT << MIIndex << "\t" << *MI;
696 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
697 MachineOperand &MO = MI->getOperand(i);
698 // handle register defs - build intervals
699 if (MO.isRegister() && MO.getReg() && MO.isDef())
700 handleRegisterDef(MBB, MI, MIIndex, MO, i);
703 MIIndex += InstrSlots::NUM;
705 // Skip over empty indices.
706 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
707 getInstructionFromIndex(MIIndex) == 0)
708 MIIndex += InstrSlots::NUM;
713 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
714 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
715 std::vector<IdxMBBPair>::const_iterator I =
716 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
719 while (I != Idx2MBBMap.end()) {
720 if (LR.end <= I->first)
722 MBBs.push_back(I->second);
730 LiveInterval LiveIntervals::createInterval(unsigned reg) {
731 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
733 return LiveInterval(reg, Weight);
736 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
737 /// copy field and returns the source register that defines it.
738 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
742 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
743 return VNI->copy->getOperand(1).getReg();
744 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
745 return VNI->copy->getOperand(2).getReg();
746 unsigned SrcReg, DstReg;
747 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
749 assert(0 && "Unrecognized copy instruction!");
753 //===----------------------------------------------------------------------===//
754 // Register allocator hooks.
757 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
758 /// allow one) virtual register operand, then its uses are implicitly using
759 /// the register. Returns the virtual register.
760 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
761 MachineInstr *MI) const {
763 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
764 MachineOperand &MO = MI->getOperand(i);
765 if (!MO.isRegister() || !MO.isUse())
767 unsigned Reg = MO.getReg();
768 if (Reg == 0 || Reg == li.reg)
770 // FIXME: For now, only remat MI with at most one register operand.
772 "Can't rematerialize instruction with multiple register operand!");
781 /// isValNoAvailableAt - Return true if the val# of the specified interval
782 /// which reaches the given instruction also reaches the specified use index.
783 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
784 unsigned UseIdx) const {
785 unsigned Index = getInstructionIndex(MI);
786 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
787 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
788 return UI != li.end() && UI->valno == ValNo;
791 /// isReMaterializable - Returns true if the definition MI of the specified
792 /// val# of the specified interval is re-materializable.
793 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
794 const VNInfo *ValNo, MachineInstr *MI,
799 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
803 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
804 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
805 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
806 // this but remember this is not safe to fold into a two-address
808 // This is a load from fixed stack slot. It can be rematerialized.
811 // If the target-specific rules don't identify an instruction as
812 // being trivially rematerializable, use some target-independent
814 if (!MI->getDesc().isRematerializable() ||
815 !tii_->isTriviallyReMaterializable(MI)) {
816 if (!EnableAggressiveRemat)
819 // If the instruction accesses memory but the memoperands have been lost,
820 // we can't analyze it.
821 const TargetInstrDesc &TID = MI->getDesc();
822 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
825 // Avoid instructions obviously unsafe for remat.
826 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
829 // If the instruction accesses memory and the memory could be non-constant,
830 // assume the instruction is not rematerializable.
831 for (alist<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
832 E = MI->memoperands_end(); I != E; ++I) {
833 const MachineMemOperand &MMO = *I;
834 if (MMO.isVolatile() || MMO.isStore())
836 const Value *V = MMO.getValue();
839 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
840 if (!PSV->isConstant(mf_->getFrameInfo()))
842 } else if (!aa_->pointsToConstantMemory(V))
846 // If any of the registers accessed are non-constant, conservatively assume
847 // the instruction is not rematerializable.
849 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
850 const MachineOperand &MO = MI->getOperand(i);
852 unsigned Reg = MO.getReg();
855 if (TargetRegisterInfo::isPhysicalRegister(Reg))
858 // Only allow one def, and that in the first operand.
859 if (MO.isDef() != (i == 0))
862 // Only allow constant-valued registers.
863 bool IsLiveIn = mri_->isLiveIn(Reg);
864 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
867 // For the def, it should be the only def.
868 if (MO.isDef() && (next(I) != E || IsLiveIn))
872 // Only allow one use other register use, as that's all the
873 // remat mechanisms support currently.
877 else if (Reg != ImpUse)
880 // For uses, there should be only one associate def.
881 if (I != E && (next(I) != E || IsLiveIn))
888 unsigned ImpUse = getReMatImplicitUse(li, MI);
890 const LiveInterval &ImpLi = getInterval(ImpUse);
891 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
892 re = mri_->use_end(); ri != re; ++ri) {
893 MachineInstr *UseMI = &*ri;
894 unsigned UseIdx = getInstructionIndex(UseMI);
895 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
897 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
904 /// isReMaterializable - Returns true if every definition of MI of every
905 /// val# of the specified interval is re-materializable.
906 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
908 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
910 const VNInfo *VNI = *i;
911 unsigned DefIdx = VNI->def;
913 continue; // Dead val#.
914 // Is the def for the val# rematerializable?
917 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
918 bool DefIsLoad = false;
920 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
927 /// FilterFoldedOps - Filter out two-address use operands. Return
928 /// true if it finds any issue with the operands that ought to prevent
930 static bool FilterFoldedOps(MachineInstr *MI,
931 SmallVector<unsigned, 2> &Ops,
933 SmallVector<unsigned, 2> &FoldOps) {
934 const TargetInstrDesc &TID = MI->getDesc();
937 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
938 unsigned OpIdx = Ops[i];
939 MachineOperand &MO = MI->getOperand(OpIdx);
940 // FIXME: fold subreg use.
944 MRInfo |= (unsigned)VirtRegMap::isMod;
946 // Filter out two-address use operand(s).
947 if (!MO.isImplicit() &&
948 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
949 MRInfo = VirtRegMap::isModRef;
952 MRInfo |= (unsigned)VirtRegMap::isRef;
954 FoldOps.push_back(OpIdx);
960 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
961 /// slot / to reg or any rematerialized load into ith operand of specified
962 /// MI. If it is successul, MI is updated with the newly created MI and
964 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
965 VirtRegMap &vrm, MachineInstr *DefMI,
967 SmallVector<unsigned, 2> &Ops,
968 bool isSS, int Slot, unsigned Reg) {
969 // If it is an implicit def instruction, just delete it.
970 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
971 RemoveMachineInstrFromMaps(MI);
972 vrm.RemoveMachineInstrFromMaps(MI);
973 MI->eraseFromParent();
978 // Filter the list of operand indexes that are to be folded. Abort if
979 // any operand will prevent folding.
981 SmallVector<unsigned, 2> FoldOps;
982 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
985 // The only time it's safe to fold into a two address instruction is when
986 // it's folding reload and spill from / into a spill stack slot.
987 if (DefMI && (MRInfo & VirtRegMap::isMod))
990 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
991 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
993 // Remember this instruction uses the spill slot.
994 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
996 // Attempt to fold the memory reference into the instruction. If
997 // we can do this, we don't need to insert spill code.
998 MachineBasicBlock &MBB = *MI->getParent();
999 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1000 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1001 vrm.transferSpillPts(MI, fmi);
1002 vrm.transferRestorePts(MI, fmi);
1003 vrm.transferEmergencySpills(MI, fmi);
1005 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1006 mi2iMap_[fmi] = InstrIdx;
1007 MI = MBB.insert(MBB.erase(MI), fmi);
1014 /// canFoldMemoryOperand - Returns true if the specified load / store
1015 /// folding is possible.
1016 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1017 SmallVector<unsigned, 2> &Ops,
1019 // Filter the list of operand indexes that are to be folded. Abort if
1020 // any operand will prevent folding.
1021 unsigned MRInfo = 0;
1022 SmallVector<unsigned, 2> FoldOps;
1023 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1026 // It's only legal to remat for a use, not a def.
1027 if (ReMat && (MRInfo & VirtRegMap::isMod))
1030 return tii_->canFoldMemoryOperand(MI, FoldOps);
1033 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1034 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1035 for (LiveInterval::Ranges::const_iterator
1036 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1037 std::vector<IdxMBBPair>::const_iterator II =
1038 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1039 if (II == Idx2MBBMap.end())
1041 if (I->end > II->first) // crossing a MBB.
1043 MBBs.insert(II->second);
1044 if (MBBs.size() > 1)
1050 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1051 /// interval on to-be re-materialized operands of MI) with new register.
1052 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1053 MachineInstr *MI, unsigned NewVReg,
1055 // There is an implicit use. That means one of the other operand is
1056 // being remat'ed and the remat'ed instruction has li.reg as an
1057 // use operand. Make sure we rewrite that as well.
1058 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1059 MachineOperand &MO = MI->getOperand(i);
1060 if (!MO.isRegister())
1062 unsigned Reg = MO.getReg();
1063 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1065 if (!vrm.isReMaterialized(Reg))
1067 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1068 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1070 UseMO->setReg(NewVReg);
1074 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1075 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1076 bool LiveIntervals::
1077 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1078 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1079 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1080 unsigned Slot, int LdSlot,
1081 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1083 const TargetRegisterClass* rc,
1084 SmallVector<int, 4> &ReMatIds,
1085 const MachineLoopInfo *loopInfo,
1086 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1087 std::map<unsigned,unsigned> &MBBVRegsMap,
1088 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1089 MachineBasicBlock *MBB = MI->getParent();
1090 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1091 bool CanFold = false;
1093 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1094 MachineOperand& mop = MI->getOperand(i);
1095 if (!mop.isRegister())
1097 unsigned Reg = mop.getReg();
1098 unsigned RegI = Reg;
1099 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1104 bool TryFold = !DefIsReMat;
1105 bool FoldSS = true; // Default behavior unless it's a remat.
1106 int FoldSlot = Slot;
1108 // If this is the rematerializable definition MI itself and
1109 // all of its uses are rematerialized, simply delete it.
1110 if (MI == ReMatOrigDefMI && CanDelete) {
1111 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1113 RemoveMachineInstrFromMaps(MI);
1114 vrm.RemoveMachineInstrFromMaps(MI);
1115 MI->eraseFromParent();
1119 // If def for this use can't be rematerialized, then try folding.
1120 // If def is rematerializable and it's a load, also try folding.
1121 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1123 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1129 // Scan all of the operands of this instruction rewriting operands
1130 // to use NewVReg instead of li.reg as appropriate. We do this for
1133 // 1. If the instr reads the same spilled vreg multiple times, we
1134 // want to reuse the NewVReg.
1135 // 2. If the instr is a two-addr instruction, we are required to
1136 // keep the src/dst regs pinned.
1138 // Keep track of whether we replace a use and/or def so that we can
1139 // create the spill interval with the appropriate range.
1141 HasUse = mop.isUse();
1142 HasDef = mop.isDef();
1143 SmallVector<unsigned, 2> Ops;
1145 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1146 const MachineOperand &MOj = MI->getOperand(j);
1147 if (!MOj.isRegister())
1149 unsigned RegJ = MOj.getReg();
1150 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1154 HasUse |= MOj.isUse();
1155 HasDef |= MOj.isDef();
1159 if (HasUse && !li.liveAt(getUseIndex(index)))
1160 // Must be defined by an implicit def. It should not be spilled. Note,
1161 // this is for correctness reason. e.g.
1162 // 8 %reg1024<def> = IMPLICIT_DEF
1163 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1164 // The live range [12, 14) are not part of the r1024 live interval since
1165 // it's defined by an implicit def. It will not conflicts with live
1166 // interval of r1025. Now suppose both registers are spilled, you can
1167 // easily see a situation where both registers are reloaded before
1168 // the INSERT_SUBREG and both target registers that would overlap.
1171 // Update stack slot spill weight if we are splitting.
1172 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1179 // Do not fold load / store here if we are splitting. We'll find an
1180 // optimal point to insert a load / store later.
1182 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1183 Ops, FoldSS, FoldSlot, Reg)) {
1184 // Folding the load/store can completely change the instruction in
1185 // unpredictable ways, rescan it from the beginning.
1189 if (isRemoved(MI)) {
1193 goto RestartInstruction;
1196 // We'll try to fold it later if it's profitable.
1197 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1201 // Create a new virtual register for the spill interval.
1202 bool CreatedNewVReg = false;
1204 NewVReg = mri_->createVirtualRegister(rc);
1206 CreatedNewVReg = true;
1208 mop.setReg(NewVReg);
1209 if (mop.isImplicit())
1210 rewriteImplicitOps(li, MI, NewVReg, vrm);
1212 // Reuse NewVReg for other reads.
1213 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1214 MachineOperand &mopj = MI->getOperand(Ops[j]);
1215 mopj.setReg(NewVReg);
1216 if (mopj.isImplicit())
1217 rewriteImplicitOps(li, MI, NewVReg, vrm);
1220 if (CreatedNewVReg) {
1222 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1223 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1224 // Each valnum may have its own remat id.
1225 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1227 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1229 if (!CanDelete || (HasUse && HasDef)) {
1230 // If this is a two-addr instruction then its use operands are
1231 // rematerializable but its def is not. It should be assigned a
1233 vrm.assignVirt2StackSlot(NewVReg, Slot);
1236 vrm.assignVirt2StackSlot(NewVReg, Slot);
1238 } else if (HasUse && HasDef &&
1239 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1240 // If this interval hasn't been assigned a stack slot (because earlier
1241 // def is a deleted remat def), do it now.
1242 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1243 vrm.assignVirt2StackSlot(NewVReg, Slot);
1246 // Re-matting an instruction with virtual register use. Add the
1247 // register as an implicit use on the use MI.
1248 if (DefIsReMat && ImpUse)
1249 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1251 // create a new register interval for this spill / remat.
1252 LiveInterval &nI = getOrCreateInterval(NewVReg);
1253 if (CreatedNewVReg) {
1254 NewLIs.push_back(&nI);
1255 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1257 vrm.setIsSplitFromReg(NewVReg, li.reg);
1261 if (CreatedNewVReg) {
1262 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1263 nI.getNextValue(~0U, 0, VNInfoAllocator));
1267 // Extend the split live interval to this def / use.
1268 unsigned End = getUseIndex(index)+1;
1269 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1270 nI.getValNumInfo(nI.getNumValNums()-1));
1276 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1277 nI.getNextValue(~0U, 0, VNInfoAllocator));
1282 DOUT << "\t\t\t\tAdded new interval: ";
1283 nI.print(DOUT, tri_);
1288 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1290 MachineBasicBlock *MBB, unsigned Idx) const {
1291 unsigned End = getMBBEndIdx(MBB);
1292 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1293 unsigned KillIdx = VNI->kills[j];
1294 if (KillIdx > Idx && KillIdx < End)
1300 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1301 /// during spilling.
1303 struct RewriteInfo {
1308 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1309 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1312 struct RewriteInfoCompare {
1313 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1314 return LHS.Index < RHS.Index;
1319 void LiveIntervals::
1320 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1321 LiveInterval::Ranges::const_iterator &I,
1322 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1323 unsigned Slot, int LdSlot,
1324 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1326 const TargetRegisterClass* rc,
1327 SmallVector<int, 4> &ReMatIds,
1328 const MachineLoopInfo *loopInfo,
1329 BitVector &SpillMBBs,
1330 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1331 BitVector &RestoreMBBs,
1332 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1333 std::map<unsigned,unsigned> &MBBVRegsMap,
1334 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1335 bool AllCanFold = true;
1336 unsigned NewVReg = 0;
1337 unsigned start = getBaseIndex(I->start);
1338 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1340 // First collect all the def / use in this live range that will be rewritten.
1341 // Make sure they are sorted according to instruction index.
1342 std::vector<RewriteInfo> RewriteMIs;
1343 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1344 re = mri_->reg_end(); ri != re; ) {
1345 MachineInstr *MI = &*ri;
1346 MachineOperand &O = ri.getOperand();
1348 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1349 unsigned index = getInstructionIndex(MI);
1350 if (index < start || index >= end)
1352 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1353 // Must be defined by an implicit def. It should not be spilled. Note,
1354 // this is for correctness reason. e.g.
1355 // 8 %reg1024<def> = IMPLICIT_DEF
1356 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1357 // The live range [12, 14) are not part of the r1024 live interval since
1358 // it's defined by an implicit def. It will not conflicts with live
1359 // interval of r1025. Now suppose both registers are spilled, you can
1360 // easily see a situation where both registers are reloaded before
1361 // the INSERT_SUBREG and both target registers that would overlap.
1363 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1365 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1367 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1368 // Now rewrite the defs and uses.
1369 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1370 RewriteInfo &rwi = RewriteMIs[i];
1372 unsigned index = rwi.Index;
1373 bool MIHasUse = rwi.HasUse;
1374 bool MIHasDef = rwi.HasDef;
1375 MachineInstr *MI = rwi.MI;
1376 // If MI def and/or use the same register multiple times, then there
1377 // are multiple entries.
1378 unsigned NumUses = MIHasUse;
1379 while (i != e && RewriteMIs[i].MI == MI) {
1380 assert(RewriteMIs[i].Index == index);
1381 bool isUse = RewriteMIs[i].HasUse;
1382 if (isUse) ++NumUses;
1384 MIHasDef |= RewriteMIs[i].HasDef;
1387 MachineBasicBlock *MBB = MI->getParent();
1389 if (ImpUse && MI != ReMatDefMI) {
1390 // Re-matting an instruction with virtual register use. Update the
1391 // register interval's spill weight to HUGE_VALF to prevent it from
1393 LiveInterval &ImpLi = getInterval(ImpUse);
1394 ImpLi.weight = HUGE_VALF;
1397 unsigned MBBId = MBB->getNumber();
1398 unsigned ThisVReg = 0;
1400 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1401 if (NVI != MBBVRegsMap.end()) {
1402 ThisVReg = NVI->second;
1409 // It's better to start a new interval to avoid artifically
1410 // extend the new interval.
1411 if (MIHasDef && !MIHasUse) {
1412 MBBVRegsMap.erase(MBB->getNumber());
1418 bool IsNew = ThisVReg == 0;
1420 // This ends the previous live interval. If all of its def / use
1421 // can be folded, give it a low spill weight.
1422 if (NewVReg && TrySplit && AllCanFold) {
1423 LiveInterval &nI = getOrCreateInterval(NewVReg);
1430 bool HasDef = false;
1431 bool HasUse = false;
1432 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1433 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1434 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1435 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1436 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1437 if (!HasDef && !HasUse)
1440 AllCanFold &= CanFold;
1442 // Update weight of spill interval.
1443 LiveInterval &nI = getOrCreateInterval(NewVReg);
1445 // The spill weight is now infinity as it cannot be spilled again.
1446 nI.weight = HUGE_VALF;
1450 // Keep track of the last def and first use in each MBB.
1452 if (MI != ReMatOrigDefMI || !CanDelete) {
1453 bool HasKill = false;
1455 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1457 // If this is a two-address code, then this index starts a new VNInfo.
1458 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1460 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1462 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1463 SpillIdxes.find(MBBId);
1465 if (SII == SpillIdxes.end()) {
1466 std::vector<SRInfo> S;
1467 S.push_back(SRInfo(index, NewVReg, true));
1468 SpillIdxes.insert(std::make_pair(MBBId, S));
1469 } else if (SII->second.back().vreg != NewVReg) {
1470 SII->second.push_back(SRInfo(index, NewVReg, true));
1471 } else if ((int)index > SII->second.back().index) {
1472 // If there is an earlier def and this is a two-address
1473 // instruction, then it's not possible to fold the store (which
1474 // would also fold the load).
1475 SRInfo &Info = SII->second.back();
1477 Info.canFold = !HasUse;
1479 SpillMBBs.set(MBBId);
1480 } else if (SII != SpillIdxes.end() &&
1481 SII->second.back().vreg == NewVReg &&
1482 (int)index > SII->second.back().index) {
1483 // There is an earlier def that's not killed (must be two-address).
1484 // The spill is no longer needed.
1485 SII->second.pop_back();
1486 if (SII->second.empty()) {
1487 SpillIdxes.erase(MBBId);
1488 SpillMBBs.reset(MBBId);
1495 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1496 SpillIdxes.find(MBBId);
1497 if (SII != SpillIdxes.end() &&
1498 SII->second.back().vreg == NewVReg &&
1499 (int)index > SII->second.back().index)
1500 // Use(s) following the last def, it's not safe to fold the spill.
1501 SII->second.back().canFold = false;
1502 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1503 RestoreIdxes.find(MBBId);
1504 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1505 // If we are splitting live intervals, only fold if it's the first
1506 // use and there isn't another use later in the MBB.
1507 RII->second.back().canFold = false;
1509 // Only need a reload if there isn't an earlier def / use.
1510 if (RII == RestoreIdxes.end()) {
1511 std::vector<SRInfo> Infos;
1512 Infos.push_back(SRInfo(index, NewVReg, true));
1513 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1515 RII->second.push_back(SRInfo(index, NewVReg, true));
1517 RestoreMBBs.set(MBBId);
1521 // Update spill weight.
1522 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1523 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1526 if (NewVReg && TrySplit && AllCanFold) {
1527 // If all of its def / use can be folded, give it a low spill weight.
1528 LiveInterval &nI = getOrCreateInterval(NewVReg);
1533 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1534 BitVector &RestoreMBBs,
1535 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1536 if (!RestoreMBBs[Id])
1538 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1539 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1540 if (Restores[i].index == index &&
1541 Restores[i].vreg == vr &&
1542 Restores[i].canFold)
1547 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1548 BitVector &RestoreMBBs,
1549 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1550 if (!RestoreMBBs[Id])
1552 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1553 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1554 if (Restores[i].index == index && Restores[i].vreg)
1555 Restores[i].index = -1;
1558 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1559 /// spilled and create empty intervals for their uses.
1561 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1562 const TargetRegisterClass* rc,
1563 std::vector<LiveInterval*> &NewLIs) {
1564 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1565 re = mri_->reg_end(); ri != re; ) {
1566 MachineOperand &O = ri.getOperand();
1567 MachineInstr *MI = &*ri;
1570 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1571 "Register def was not rewritten?");
1572 RemoveMachineInstrFromMaps(MI);
1573 vrm.RemoveMachineInstrFromMaps(MI);
1574 MI->eraseFromParent();
1576 // This must be an use of an implicit_def so it's not part of the live
1577 // interval. Create a new empty live interval for it.
1578 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1579 unsigned NewVReg = mri_->createVirtualRegister(rc);
1581 vrm.setIsImplicitlyDefined(NewVReg);
1582 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1583 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1584 MachineOperand &MO = MI->getOperand(i);
1585 if (MO.isReg() && MO.getReg() == li.reg)
1593 std::vector<LiveInterval*> LiveIntervals::
1594 addIntervalsForSpills(const LiveInterval &li,
1595 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1597 assert(li.weight != HUGE_VALF &&
1598 "attempt to spill already spilled interval!");
1600 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1601 li.print(DOUT, tri_);
1604 // Spill slot weight.
1607 // Each bit specify whether it a spill is required in the MBB.
1608 BitVector SpillMBBs(mf_->getNumBlockIDs());
1609 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1610 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1611 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1612 std::map<unsigned,unsigned> MBBVRegsMap;
1613 std::vector<LiveInterval*> NewLIs;
1614 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1616 unsigned NumValNums = li.getNumValNums();
1617 SmallVector<MachineInstr*, 4> ReMatDefs;
1618 ReMatDefs.resize(NumValNums, NULL);
1619 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1620 ReMatOrigDefs.resize(NumValNums, NULL);
1621 SmallVector<int, 4> ReMatIds;
1622 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1623 BitVector ReMatDelete(NumValNums);
1624 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1626 // Spilling a split live interval. It cannot be split any further. Also,
1627 // it's also guaranteed to be a single val# / range interval.
1628 if (vrm.getPreSplitReg(li.reg)) {
1629 vrm.setIsSplitFromReg(li.reg, 0);
1630 // Unset the split kill marker on the last use.
1631 unsigned KillIdx = vrm.getKillPoint(li.reg);
1633 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1634 assert(KillMI && "Last use disappeared?");
1635 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1636 assert(KillOp != -1 && "Last use disappeared?");
1637 KillMI->getOperand(KillOp).setIsKill(false);
1639 vrm.removeKillPoint(li.reg);
1640 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1641 Slot = vrm.getStackSlot(li.reg);
1642 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1643 MachineInstr *ReMatDefMI = DefIsReMat ?
1644 vrm.getReMaterializedMI(li.reg) : NULL;
1646 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1647 bool isLoad = isLoadSS ||
1648 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1649 bool IsFirstRange = true;
1650 for (LiveInterval::Ranges::const_iterator
1651 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1652 // If this is a split live interval with multiple ranges, it means there
1653 // are two-address instructions that re-defined the value. Only the
1654 // first def can be rematerialized!
1656 // Note ReMatOrigDefMI has already been deleted.
1657 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1658 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1659 false, vrm, rc, ReMatIds, loopInfo,
1660 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1661 MBBVRegsMap, NewLIs, SSWeight);
1663 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1664 Slot, 0, false, false, false,
1665 false, vrm, rc, ReMatIds, loopInfo,
1666 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1667 MBBVRegsMap, NewLIs, SSWeight);
1669 IsFirstRange = false;
1672 SSWeight = 0.0f; // Already accounted for when split.
1673 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1677 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1678 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1682 bool NeedStackSlot = false;
1683 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1685 const VNInfo *VNI = *i;
1686 unsigned VN = VNI->id;
1687 unsigned DefIdx = VNI->def;
1689 continue; // Dead val#.
1690 // Is the def for the val# rematerializable?
1691 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1692 ? 0 : getInstructionFromIndex(DefIdx);
1694 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1695 // Remember how to remat the def of this val#.
1696 ReMatOrigDefs[VN] = ReMatDefMI;
1697 // Original def may be modified so we have to make a copy here.
1698 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1699 ClonedMIs.push_back(Clone);
1700 ReMatDefs[VN] = Clone;
1702 bool CanDelete = true;
1703 if (VNI->hasPHIKill) {
1704 // A kill is a phi node, not all of its uses can be rematerialized.
1705 // It must not be deleted.
1707 // Need a stack slot if there is any live range where uses cannot be
1709 NeedStackSlot = true;
1712 ReMatDelete.set(VN);
1714 // Need a stack slot if there is any live range where uses cannot be
1716 NeedStackSlot = true;
1720 // One stack slot per live interval.
1721 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1722 Slot = vrm.assignVirt2StackSlot(li.reg);
1724 // Create new intervals and rewrite defs and uses.
1725 for (LiveInterval::Ranges::const_iterator
1726 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1727 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1728 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1729 bool DefIsReMat = ReMatDefMI != NULL;
1730 bool CanDelete = ReMatDelete[I->valno->id];
1732 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1733 bool isLoad = isLoadSS ||
1734 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1735 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1736 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1737 CanDelete, vrm, rc, ReMatIds, loopInfo,
1738 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1739 MBBVRegsMap, NewLIs, SSWeight);
1742 // Insert spills / restores if we are splitting.
1744 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1748 SmallPtrSet<LiveInterval*, 4> AddedKill;
1749 SmallVector<unsigned, 2> Ops;
1750 if (NeedStackSlot) {
1751 int Id = SpillMBBs.find_first();
1753 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1754 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1755 std::vector<SRInfo> &spills = SpillIdxes[Id];
1756 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1757 int index = spills[i].index;
1758 unsigned VReg = spills[i].vreg;
1759 LiveInterval &nI = getOrCreateInterval(VReg);
1760 bool isReMat = vrm.isReMaterialized(VReg);
1761 MachineInstr *MI = getInstructionFromIndex(index);
1762 bool CanFold = false;
1763 bool FoundUse = false;
1765 if (spills[i].canFold) {
1767 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1768 MachineOperand &MO = MI->getOperand(j);
1769 if (!MO.isRegister() || MO.getReg() != VReg)
1776 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1777 RestoreMBBs, RestoreIdxes))) {
1778 // MI has two-address uses of the same register. If the use
1779 // isn't the first and only use in the BB, then we can't fold
1780 // it. FIXME: Move this to rewriteInstructionsForSpills.
1787 // Fold the store into the def if possible.
1788 bool Folded = false;
1789 if (CanFold && !Ops.empty()) {
1790 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1793 // Also folded uses, do not issue a load.
1794 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1795 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1797 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1801 // Otherwise tell the spiller to issue a spill.
1803 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1804 bool isKill = LR->end == getStoreIndex(index);
1805 if (!MI->registerDefIsDead(nI.reg))
1806 // No need to spill a dead def.
1807 vrm.addSpillPoint(VReg, isKill, MI);
1809 AddedKill.insert(&nI);
1812 // Update spill slot weight.
1814 SSWeight += getSpillWeight(true, false, loopDepth);
1816 Id = SpillMBBs.find_next(Id);
1820 int Id = RestoreMBBs.find_first();
1822 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1823 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1825 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1826 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1827 int index = restores[i].index;
1830 unsigned VReg = restores[i].vreg;
1831 LiveInterval &nI = getOrCreateInterval(VReg);
1832 bool isReMat = vrm.isReMaterialized(VReg);
1833 MachineInstr *MI = getInstructionFromIndex(index);
1834 bool CanFold = false;
1836 if (restores[i].canFold) {
1838 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1839 MachineOperand &MO = MI->getOperand(j);
1840 if (!MO.isRegister() || MO.getReg() != VReg)
1844 // If this restore were to be folded, it would have been folded
1853 // Fold the load into the use if possible.
1854 bool Folded = false;
1855 if (CanFold && !Ops.empty()) {
1857 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1859 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1861 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1862 // If the rematerializable def is a load, also try to fold it.
1863 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1864 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1865 Ops, isLoadSS, LdSlot, VReg);
1866 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1868 // Re-matting an instruction with virtual register use. Add the
1869 // register as an implicit use on the use MI and update the register
1870 // interval's spill weight to HUGE_VALF to prevent it from being
1872 LiveInterval &ImpLi = getInterval(ImpUse);
1873 ImpLi.weight = HUGE_VALF;
1874 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1878 // If folding is not possible / failed, then tell the spiller to issue a
1879 // load / rematerialization for us.
1881 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1883 vrm.addRestorePoint(VReg, MI);
1885 // Update spill slot weight.
1887 SSWeight += getSpillWeight(false, true, loopDepth);
1889 Id = RestoreMBBs.find_next(Id);
1892 // Finalize intervals: add kills, finalize spill weights, and filter out
1894 std::vector<LiveInterval*> RetNewLIs;
1895 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1896 LiveInterval *LI = NewLIs[i];
1898 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1899 if (!AddedKill.count(LI)) {
1900 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1901 unsigned LastUseIdx = getBaseIndex(LR->end);
1902 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1903 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1904 assert(UseIdx != -1);
1905 if (LastUse->getOperand(UseIdx).isImplicit() ||
1906 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1907 LastUse->getOperand(UseIdx).setIsKill();
1908 vrm.addKillPoint(LI->reg, LastUseIdx);
1911 RetNewLIs.push_back(LI);
1915 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1919 /// hasAllocatableSuperReg - Return true if the specified physical register has
1920 /// any super register that's allocatable.
1921 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1922 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1923 if (allocatableRegs_[*AS] && hasInterval(*AS))
1928 /// getRepresentativeReg - Find the largest super register of the specified
1929 /// physical register.
1930 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1931 // Find the largest super-register that is allocatable.
1932 unsigned BestReg = Reg;
1933 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1934 unsigned SuperReg = *AS;
1935 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1943 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1944 /// specified interval that conflicts with the specified physical register.
1945 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1946 unsigned PhysReg) const {
1947 unsigned NumConflicts = 0;
1948 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1949 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1950 E = mri_->reg_end(); I != E; ++I) {
1951 MachineOperand &O = I.getOperand();
1952 MachineInstr *MI = O.getParent();
1953 unsigned Index = getInstructionIndex(MI);
1954 if (pli.liveAt(Index))
1957 return NumConflicts;
1960 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1961 /// around all defs and uses of the specified interval.
1962 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1963 unsigned PhysReg, VirtRegMap &vrm) {
1964 unsigned SpillReg = getRepresentativeReg(PhysReg);
1966 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1967 // If there are registers which alias PhysReg, but which are not a
1968 // sub-register of the chosen representative super register. Assert
1969 // since we can't handle it yet.
1970 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1971 tri_->isSuperRegister(*AS, SpillReg));
1973 LiveInterval &pli = getInterval(SpillReg);
1974 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1975 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1976 E = mri_->reg_end(); I != E; ++I) {
1977 MachineOperand &O = I.getOperand();
1978 MachineInstr *MI = O.getParent();
1979 if (SeenMIs.count(MI))
1982 unsigned Index = getInstructionIndex(MI);
1983 if (pli.liveAt(Index)) {
1984 vrm.addEmergencySpill(SpillReg, MI);
1985 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1986 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1987 if (!hasInterval(*AS))
1989 LiveInterval &spli = getInterval(*AS);
1990 if (spli.liveAt(Index))
1991 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1997 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1998 MachineInstr* startInst) {
1999 LiveInterval& Interval = getOrCreateInterval(reg);
2000 VNInfo* VN = Interval.getNextValue(
2001 getInstructionIndex(startInst) + InstrSlots::DEF,
2002 startInst, getVNInfoAllocator());
2003 VN->hasPHIKill = true;
2004 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2005 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2006 getMBBEndIdx(startInst->getParent()) + 1, VN);
2007 Interval.addRange(LR);