1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 STATISTIC(numIntervals, "Number of original intervals");
53 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addRequired<AliasAnalysis>();
62 AU.addPreserved<AliasAnalysis>();
63 AU.addPreserved<LiveVariables>();
64 AU.addRequired<LiveVariables>();
65 AU.addPreservedID(MachineLoopInfoID);
66 AU.addPreservedID(MachineDominatorsID);
67 AU.addPreservedID(PHIEliminationID);
68 AU.addRequiredID(PHIEliminationID);
69 AU.addRequiredID(TwoAddressInstructionPassID);
70 MachineFunctionPass::getAnalysisUsage(AU);
73 void LiveIntervals::releaseMemory() {
79 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
80 VNInfoAllocator.Reset();
81 while (!ClonedMIs.empty()) {
82 MachineInstr *MI = ClonedMIs.back();
84 mf_->DeleteMachineInstr(MI);
88 void LiveIntervals::computeNumbering() {
89 Index2MiMap OldI2MI = i2miMap_;
90 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99 // Number MachineInstrs and MachineBasicBlocks.
100 // Initialize MBB indexes to a sentinal.
101 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
103 unsigned MIIndex = 0;
104 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
106 unsigned StartIdx = MIIndex;
108 // Insert an empty slot at the beginning of each block.
109 MIIndex += InstrSlots::NUM;
110 i2miMap_.push_back(0);
112 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
114 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
115 assert(inserted && "multiple MachineInstr -> index mappings");
116 i2miMap_.push_back(I);
117 MIIndex += InstrSlots::NUM;
120 // Insert an empty slot after every instruction.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
125 // Set the MBB2IdxMap entry for this MBB.
126 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
130 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
132 if (!OldI2MI.empty())
133 for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
134 for (LiveInterval::iterator LI = OI->second.begin(),
135 LE = OI->second.end(); LI != LE; ++LI) {
137 // Remap the start index of the live range to the corresponding new
138 // number, or our best guess at what it _should_ correspond to if the
139 // original instruction has been erased. This is either the following
140 // instruction or its predecessor.
141 unsigned index = LI->start / InstrSlots::NUM;
142 unsigned offset = LI->start % InstrSlots::NUM;
143 if (offset == InstrSlots::LOAD || LI->valno->def == ~0U) {
144 std::vector<IdxMBBPair>::const_iterator I =
145 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
146 // Take the pair containing the index
147 std::vector<IdxMBBPair>::const_iterator J =
148 ((I != OldI2MBB.end() && I->first > index) ||
149 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
151 LI->start = getMBBStartIdx(J->second);
153 LI->start = mi2iMap_[OldI2MI[index]] + offset;
156 // Remap the ending index in the same way that we remapped the start,
157 // except for the final step where we always map to the immediately
158 // following instruction.
159 index = (LI->end - 1) / InstrSlots::NUM;
160 offset = LI->end % InstrSlots::NUM;
161 if (LI->valno->hasPHIKill && !OldI2MI[index]) {
162 // Special handling for when this was previously killed by a PHI, but
163 // the PHI has now been removed. We need to trim the live interval
164 // to die at the end of the preceding block.
165 std::vector<IdxMBBPair>::const_iterator I =
166 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
167 // Take the pair containing the index
168 std::vector<IdxMBBPair>::const_iterator J =
169 ((I != OldI2MBB.end() && I->first > index) ||
170 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
172 MachineBasicBlock* StartMBB = J->second;
173 MachineBasicBlock* CurrMBB = J->second;
175 while (CurrMBB == StartMBB) {
176 while (index > 0 && !OldI2MI[index]) --index;
177 CurrMBB = OldI2MI[index]->getParent();
178 if (!StartMBB) StartMBB = CurrMBB;
183 LI->end = getMBBEndIdx(CurrMBB) + 1;
184 } else if (offset == InstrSlots::USE) {
185 std::vector<IdxMBBPair>::const_iterator I =
186 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
187 // Take the pair containing the index
188 std::vector<IdxMBBPair>::const_iterator J =
189 ((I != OldI2MBB.end() && I->first > index) ||
190 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
192 LI->end = getMBBEndIdx(J->second) + 1;
194 unsigned idx = index;
195 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
197 if (index != OldI2MI.size())
198 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
200 LI->end = InstrSlots::NUM * i2miMap_.size();
203 // Remap the VNInfo def index, which works the same as the
204 // start indices above.
205 VNInfo* vni = LI->valno;
206 if (vni->def != ~0U) {
207 index = vni->def / InstrSlots::NUM;
208 offset = vni->def % InstrSlots::NUM;
209 if (offset == InstrSlots::LOAD) {
210 std::vector<IdxMBBPair>::const_iterator I =
211 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(),
213 // Take the pair containing the index
214 std::vector<IdxMBBPair>::const_iterator J =
215 ((I != OldI2MBB.end() && I->first > index) ||
216 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
218 vni->def = getMBBStartIdx(J->second);
221 vni->def = mi2iMap_[OldI2MI[index]] + offset;
225 // Remap the VNInfo kill indices, which works the same as
226 // the end indices above.
227 for (size_t i = 0; i < vni->kills.size(); ++i) {
228 index = (vni->kills[i]-1) / InstrSlots::NUM;
229 offset = vni->kills[i] % InstrSlots::NUM;
231 if (LI->valno->hasPHIKill && !OldI2MI[index]) {
232 // Special handling for when this was previously killed by a PHI,
233 // but the PHI has now been removed. We need to trim the live
234 // interval to die at the end of the preceding block.
235 std::vector<IdxMBBPair>::const_iterator I =
236 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
237 // Take the pair containing the index
238 std::vector<IdxMBBPair>::const_iterator J =
239 ((I != OldI2MBB.end() && I->first > index) ||
240 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
242 MachineBasicBlock* StartMBB = J->second;
243 MachineBasicBlock* CurrMBB = J->second;
245 while (CurrMBB == StartMBB) {
246 while (index > 0 && !OldI2MI[index]) --index;
247 CurrMBB = OldI2MI[index]->getParent();
248 if (!StartMBB) StartMBB = CurrMBB;
253 vni->kills[i] = getMBBEndIdx(CurrMBB) + 1;
254 } else if (offset == InstrSlots::USE) {
255 std::vector<IdxMBBPair>::const_iterator I =
256 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
257 // Take the pair containing the index
258 std::vector<IdxMBBPair>::const_iterator J =
259 ((I != OldI2MBB.end() && I->first > index) ||
260 (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
262 vni->kills[i] = getMBBEndIdx(J->second) + 1;
264 unsigned idx = index;
265 while (!OldI2MI[index]) ++index;
266 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
268 if (index != OldI2MI.size())
269 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
270 (idx == index ? offset : 0);
272 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
278 /// runOnMachineFunction - Register allocate the whole function
280 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
282 mri_ = &mf_->getRegInfo();
283 tm_ = &fn.getTarget();
284 tri_ = tm_->getRegisterInfo();
285 tii_ = tm_->getInstrInfo();
286 aa_ = &getAnalysis<AliasAnalysis>();
287 lv_ = &getAnalysis<LiveVariables>();
288 allocatableRegs_ = tri_->getAllocatableSet(fn);
293 numIntervals += getNumIntervals();
295 DOUT << "********** INTERVALS **********\n";
296 for (iterator I = begin(), E = end(); I != E; ++I) {
297 I->second.print(DOUT, tri_);
301 numIntervalsAfter += getNumIntervals();
306 /// print - Implement the dump method.
307 void LiveIntervals::print(std::ostream &O, const Module* ) const {
308 O << "********** INTERVALS **********\n";
309 for (const_iterator I = begin(), E = end(); I != E; ++I) {
310 I->second.print(O, tri_);
314 O << "********** MACHINEINSTRS **********\n";
315 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
316 mbbi != mbbe; ++mbbi) {
317 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
318 for (MachineBasicBlock::iterator mii = mbbi->begin(),
319 mie = mbbi->end(); mii != mie; ++mii) {
320 O << getInstructionIndex(mii) << '\t' << *mii;
325 /// conflictsWithPhysRegDef - Returns true if the specified register
326 /// is defined during the duration of the specified interval.
327 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
328 VirtRegMap &vrm, unsigned reg) {
329 for (LiveInterval::Ranges::const_iterator
330 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
331 for (unsigned index = getBaseIndex(I->start),
332 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
333 index += InstrSlots::NUM) {
334 // skip deleted instructions
335 while (index != end && !getInstructionFromIndex(index))
336 index += InstrSlots::NUM;
337 if (index == end) break;
339 MachineInstr *MI = getInstructionFromIndex(index);
340 unsigned SrcReg, DstReg;
341 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
342 if (SrcReg == li.reg || DstReg == li.reg)
344 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
345 MachineOperand& mop = MI->getOperand(i);
346 if (!mop.isRegister())
348 unsigned PhysReg = mop.getReg();
349 if (PhysReg == 0 || PhysReg == li.reg)
351 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
352 if (!vrm.hasPhys(PhysReg))
354 PhysReg = vrm.getPhys(PhysReg);
356 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
365 void LiveIntervals::printRegName(unsigned reg) const {
366 if (TargetRegisterInfo::isPhysicalRegister(reg))
367 cerr << tri_->getName(reg);
369 cerr << "%reg" << reg;
372 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
373 MachineBasicBlock::iterator mi,
374 unsigned MIIdx, MachineOperand& MO,
376 LiveInterval &interval) {
377 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
378 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
380 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
381 DOUT << "is a implicit_def\n";
385 // Virtual registers may be defined multiple times (due to phi
386 // elimination and 2-addr elimination). Much of what we do only has to be
387 // done once for the vreg. We use an empty interval to detect the first
388 // time we see a vreg.
389 if (interval.empty()) {
390 // Get the Idx of the defining instructions.
391 unsigned defIndex = getDefIndex(MIIdx);
393 MachineInstr *CopyMI = NULL;
394 unsigned SrcReg, DstReg;
395 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
396 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
397 tii_->isMoveInstr(*mi, SrcReg, DstReg))
399 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
401 assert(ValNo->id == 0 && "First value in interval is not 0?");
403 // Loop over all of the blocks that the vreg is defined in. There are
404 // two cases we have to handle here. The most common case is a vreg
405 // whose lifetime is contained within a basic block. In this case there
406 // will be a single kill, in MBB, which comes after the definition.
407 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
408 // FIXME: what about dead vars?
410 if (vi.Kills[0] != mi)
411 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
413 killIdx = defIndex+1;
415 // If the kill happens after the definition, we have an intra-block
417 if (killIdx > defIndex) {
418 assert(vi.AliveBlocks.none() &&
419 "Shouldn't be alive across any blocks!");
420 LiveRange LR(defIndex, killIdx, ValNo);
421 interval.addRange(LR);
422 DOUT << " +" << LR << "\n";
423 interval.addKill(ValNo, killIdx);
428 // The other case we handle is when a virtual register lives to the end
429 // of the defining block, potentially live across some blocks, then is
430 // live into some number of blocks, but gets killed. Start by adding a
431 // range that goes from this definition to the end of the defining block.
432 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
433 DOUT << " +" << NewLR;
434 interval.addRange(NewLR);
436 // Iterate over all of the blocks that the variable is completely
437 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
439 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
440 if (vi.AliveBlocks[i]) {
441 LiveRange LR(getMBBStartIdx(i),
442 getMBBEndIdx(i)+1, // MBB ends at -1.
444 interval.addRange(LR);
449 // Finally, this virtual register is live from the start of any killing
450 // block to the 'use' slot of the killing instruction.
451 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
452 MachineInstr *Kill = vi.Kills[i];
453 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
454 LiveRange LR(getMBBStartIdx(Kill->getParent()),
456 interval.addRange(LR);
457 interval.addKill(ValNo, killIdx);
462 // If this is the second time we see a virtual register definition, it
463 // must be due to phi elimination or two addr elimination. If this is
464 // the result of two address elimination, then the vreg is one of the
465 // def-and-use register operand.
466 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
467 // If this is a two-address definition, then we have already processed
468 // the live range. The only problem is that we didn't realize there
469 // are actually two values in the live interval. Because of this we
470 // need to take the LiveRegion that defines this register and split it
472 assert(interval.containsOneValue());
473 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
474 unsigned RedefIndex = getDefIndex(MIIdx);
476 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
477 VNInfo *OldValNo = OldLR->valno;
479 // Delete the initial value, which should be short and continuous,
480 // because the 2-addr copy must be in the same MBB as the redef.
481 interval.removeRange(DefIndex, RedefIndex);
483 // Two-address vregs should always only be redefined once. This means
484 // that at this point, there should be exactly one value number in it.
485 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
487 // The new value number (#1) is defined by the instruction we claimed
489 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
492 // Value#0 is now defined by the 2-addr instruction.
493 OldValNo->def = RedefIndex;
496 // Add the new live interval which replaces the range for the input copy.
497 LiveRange LR(DefIndex, RedefIndex, ValNo);
498 DOUT << " replace range with " << LR;
499 interval.addRange(LR);
500 interval.addKill(ValNo, RedefIndex);
502 // If this redefinition is dead, we need to add a dummy unit live
503 // range covering the def slot.
505 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
508 interval.print(DOUT, tri_);
511 // Otherwise, this must be because of phi elimination. If this is the
512 // first redefinition of the vreg that we have seen, go back and change
513 // the live range in the PHI block to be a different value number.
514 if (interval.containsOneValue()) {
515 assert(vi.Kills.size() == 1 &&
516 "PHI elimination vreg should have one kill, the PHI itself!");
518 // Remove the old range that we now know has an incorrect number.
519 VNInfo *VNI = interval.getValNumInfo(0);
520 MachineInstr *Killer = vi.Kills[0];
521 unsigned Start = getMBBStartIdx(Killer->getParent());
522 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
523 DOUT << " Removing [" << Start << "," << End << "] from: ";
524 interval.print(DOUT, tri_); DOUT << "\n";
525 interval.removeRange(Start, End);
526 VNI->hasPHIKill = true;
527 DOUT << " RESULT: "; interval.print(DOUT, tri_);
529 // Replace the interval with one of a NEW value number. Note that this
530 // value number isn't actually defined by an instruction, weird huh? :)
531 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
532 DOUT << " replace range with " << LR;
533 interval.addRange(LR);
534 interval.addKill(LR.valno, End);
535 DOUT << " RESULT: "; interval.print(DOUT, tri_);
538 // In the case of PHI elimination, each variable definition is only
539 // live until the end of the block. We've already taken care of the
540 // rest of the live range.
541 unsigned defIndex = getDefIndex(MIIdx);
544 MachineInstr *CopyMI = NULL;
545 unsigned SrcReg, DstReg;
546 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
547 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
548 tii_->isMoveInstr(*mi, SrcReg, DstReg))
550 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
552 unsigned killIndex = getMBBEndIdx(mbb) + 1;
553 LiveRange LR(defIndex, killIndex, ValNo);
554 interval.addRange(LR);
555 interval.addKill(ValNo, killIndex);
556 ValNo->hasPHIKill = true;
564 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
565 MachineBasicBlock::iterator mi,
568 LiveInterval &interval,
569 MachineInstr *CopyMI) {
570 // A physical register cannot be live across basic block, so its
571 // lifetime must end somewhere in its defining basic block.
572 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
574 unsigned baseIndex = MIIdx;
575 unsigned start = getDefIndex(baseIndex);
576 unsigned end = start;
578 // If it is not used after definition, it is considered dead at
579 // the instruction defining it. Hence its interval is:
580 // [defSlot(def), defSlot(def)+1)
583 end = getDefIndex(start) + 1;
587 // If it is not dead on definition, it must be killed by a
588 // subsequent instruction. Hence its interval is:
589 // [defSlot(def), useSlot(kill)+1)
590 baseIndex += InstrSlots::NUM;
591 while (++mi != MBB->end()) {
592 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
593 getInstructionFromIndex(baseIndex) == 0)
594 baseIndex += InstrSlots::NUM;
595 if (mi->killsRegister(interval.reg, tri_)) {
597 end = getUseIndex(baseIndex) + 1;
599 } else if (mi->modifiesRegister(interval.reg, tri_)) {
600 // Another instruction redefines the register before it is ever read.
601 // Then the register is essentially dead at the instruction that defines
602 // it. Hence its interval is:
603 // [defSlot(def), defSlot(def)+1)
605 end = getDefIndex(start) + 1;
609 baseIndex += InstrSlots::NUM;
612 // The only case we should have a dead physreg here without a killing or
613 // instruction where we know it's dead is if it is live-in to the function
615 assert(!CopyMI && "physreg was not killed in defining block!");
616 end = getDefIndex(start) + 1; // It's dead.
619 assert(start < end && "did not find end of interval?");
621 // Already exists? Extend old live interval.
622 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
623 VNInfo *ValNo = (OldLR != interval.end())
624 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
625 LiveRange LR(start, end, ValNo);
626 interval.addRange(LR);
627 interval.addKill(LR.valno, end);
628 DOUT << " +" << LR << '\n';
631 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
632 MachineBasicBlock::iterator MI,
636 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
637 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
638 getOrCreateInterval(MO.getReg()));
639 else if (allocatableRegs_[MO.getReg()]) {
640 MachineInstr *CopyMI = NULL;
641 unsigned SrcReg, DstReg;
642 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
643 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
644 tii_->isMoveInstr(*MI, SrcReg, DstReg))
646 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
647 getOrCreateInterval(MO.getReg()), CopyMI);
648 // Def of a register also defines its sub-registers.
649 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
650 // If MI also modifies the sub-register explicitly, avoid processing it
651 // more than once. Do not pass in TRI here so it checks for exact match.
652 if (!MI->modifiesRegister(*AS))
653 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
654 getOrCreateInterval(*AS), 0);
658 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
660 LiveInterval &interval, bool isAlias) {
661 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
663 // Look for kills, if it reaches a def before it's killed, then it shouldn't
664 // be considered a livein.
665 MachineBasicBlock::iterator mi = MBB->begin();
666 unsigned baseIndex = MIIdx;
667 unsigned start = baseIndex;
668 unsigned end = start;
669 while (mi != MBB->end()) {
670 if (mi->killsRegister(interval.reg, tri_)) {
672 end = getUseIndex(baseIndex) + 1;
674 } else if (mi->modifiesRegister(interval.reg, tri_)) {
675 // Another instruction redefines the register before it is ever read.
676 // Then the register is essentially dead at the instruction that defines
677 // it. Hence its interval is:
678 // [defSlot(def), defSlot(def)+1)
680 end = getDefIndex(start) + 1;
684 baseIndex += InstrSlots::NUM;
685 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
686 getInstructionFromIndex(baseIndex) == 0)
687 baseIndex += InstrSlots::NUM;
692 // Live-in register might not be used at all.
696 end = getDefIndex(MIIdx) + 1;
698 DOUT << " live through";
703 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
704 interval.addRange(LR);
705 interval.addKill(LR.valno, end);
706 DOUT << " +" << LR << '\n';
709 /// computeIntervals - computes the live intervals for virtual
710 /// registers. for some ordering of the machine instructions [1,N] a
711 /// live interval is an interval [i, j) where 1 <= i <= j < N for
712 /// which a variable is live
713 void LiveIntervals::computeIntervals() {
714 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
715 << "********** Function: "
716 << ((Value*)mf_->getFunction())->getName() << '\n';
717 // Track the index of the current machine instr.
718 unsigned MIIndex = 0;
720 // Skip over empty initial indices.
721 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
722 getInstructionFromIndex(MIIndex) == 0)
723 MIIndex += InstrSlots::NUM;
725 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
727 MachineBasicBlock *MBB = MBBI;
728 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
730 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
732 // Create intervals for live-ins to this BB first.
733 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
734 LE = MBB->livein_end(); LI != LE; ++LI) {
735 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
736 // Multiple live-ins can alias the same register.
737 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
738 if (!hasInterval(*AS))
739 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
743 for (; MI != miEnd; ++MI) {
744 DOUT << MIIndex << "\t" << *MI;
747 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
748 MachineOperand &MO = MI->getOperand(i);
749 // handle register defs - build intervals
750 if (MO.isRegister() && MO.getReg() && MO.isDef())
751 handleRegisterDef(MBB, MI, MIIndex, MO, i);
754 MIIndex += InstrSlots::NUM;
756 // Skip over empty indices.
757 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
758 getInstructionFromIndex(MIIndex) == 0)
759 MIIndex += InstrSlots::NUM;
764 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
765 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
766 std::vector<IdxMBBPair>::const_iterator I =
767 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
770 while (I != Idx2MBBMap.end()) {
771 if (LR.end <= I->first)
773 MBBs.push_back(I->second);
781 LiveInterval LiveIntervals::createInterval(unsigned reg) {
782 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
784 return LiveInterval(reg, Weight);
787 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
788 /// copy field and returns the source register that defines it.
789 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
793 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
794 return VNI->copy->getOperand(1).getReg();
795 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
796 return VNI->copy->getOperand(2).getReg();
797 unsigned SrcReg, DstReg;
798 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
800 assert(0 && "Unrecognized copy instruction!");
804 //===----------------------------------------------------------------------===//
805 // Register allocator hooks.
808 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
809 /// allow one) virtual register operand, then its uses are implicitly using
810 /// the register. Returns the virtual register.
811 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
812 MachineInstr *MI) const {
814 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
815 MachineOperand &MO = MI->getOperand(i);
816 if (!MO.isRegister() || !MO.isUse())
818 unsigned Reg = MO.getReg();
819 if (Reg == 0 || Reg == li.reg)
821 // FIXME: For now, only remat MI with at most one register operand.
823 "Can't rematerialize instruction with multiple register operand!");
832 /// isValNoAvailableAt - Return true if the val# of the specified interval
833 /// which reaches the given instruction also reaches the specified use index.
834 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
835 unsigned UseIdx) const {
836 unsigned Index = getInstructionIndex(MI);
837 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
838 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
839 return UI != li.end() && UI->valno == ValNo;
842 /// isReMaterializable - Returns true if the definition MI of the specified
843 /// val# of the specified interval is re-materializable.
844 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
845 const VNInfo *ValNo, MachineInstr *MI,
850 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
854 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
855 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
856 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
857 // this but remember this is not safe to fold into a two-address
859 // This is a load from fixed stack slot. It can be rematerialized.
862 // If the target-specific rules don't identify an instruction as
863 // being trivially rematerializable, use some target-independent
865 if (!MI->getDesc().isRematerializable() ||
866 !tii_->isTriviallyReMaterializable(MI)) {
867 if (!EnableAggressiveRemat)
870 // If the instruction access memory but the memoperands have been lost,
871 // we can't analyze it.
872 const TargetInstrDesc &TID = MI->getDesc();
873 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
876 // Avoid instructions obviously unsafe for remat.
877 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
880 // If the instruction accesses memory and the memory could be non-constant,
881 // assume the instruction is not rematerializable.
882 for (alist<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
883 E = MI->memoperands_end(); I != E; ++I) {
884 const MachineMemOperand &MMO = *I;
885 if (MMO.isVolatile() || MMO.isStore())
887 const Value *V = MMO.getValue();
890 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
891 if (!PSV->isConstant(mf_->getFrameInfo()))
893 } else if (!aa_->pointsToConstantMemory(V))
897 // If any of the registers accessed are non-constant, conservatively assume
898 // the instruction is not rematerializable.
900 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
901 const MachineOperand &MO = MI->getOperand(i);
903 unsigned Reg = MO.getReg();
906 if (TargetRegisterInfo::isPhysicalRegister(Reg))
909 // Only allow one def, and that in the first operand.
910 if (MO.isDef() != (i == 0))
913 // Only allow constant-valued registers.
914 bool IsLiveIn = mri_->isLiveIn(Reg);
915 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
918 // For the def, it should be the only def.
919 if (MO.isDef() && (next(I) != E || IsLiveIn))
923 // Only allow one use other register use, as that's all the
924 // remat mechanisms support currently.
928 else if (Reg != ImpUse)
931 // For uses, there should be only one associate def.
932 if (I != E && (next(I) != E || IsLiveIn))
939 unsigned ImpUse = getReMatImplicitUse(li, MI);
941 const LiveInterval &ImpLi = getInterval(ImpUse);
942 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
943 re = mri_->use_end(); ri != re; ++ri) {
944 MachineInstr *UseMI = &*ri;
945 unsigned UseIdx = getInstructionIndex(UseMI);
946 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
948 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
955 /// isReMaterializable - Returns true if every definition of MI of every
956 /// val# of the specified interval is re-materializable.
957 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
959 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
961 const VNInfo *VNI = *i;
962 unsigned DefIdx = VNI->def;
964 continue; // Dead val#.
965 // Is the def for the val# rematerializable?
968 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
969 bool DefIsLoad = false;
971 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
978 /// FilterFoldedOps - Filter out two-address use operands. Return
979 /// true if it finds any issue with the operands that ought to prevent
981 static bool FilterFoldedOps(MachineInstr *MI,
982 SmallVector<unsigned, 2> &Ops,
984 SmallVector<unsigned, 2> &FoldOps) {
985 const TargetInstrDesc &TID = MI->getDesc();
988 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
989 unsigned OpIdx = Ops[i];
990 MachineOperand &MO = MI->getOperand(OpIdx);
991 // FIXME: fold subreg use.
995 MRInfo |= (unsigned)VirtRegMap::isMod;
997 // Filter out two-address use operand(s).
998 if (!MO.isImplicit() &&
999 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1000 MRInfo = VirtRegMap::isModRef;
1003 MRInfo |= (unsigned)VirtRegMap::isRef;
1005 FoldOps.push_back(OpIdx);
1011 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1012 /// slot / to reg or any rematerialized load into ith operand of specified
1013 /// MI. If it is successul, MI is updated with the newly created MI and
1015 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1016 VirtRegMap &vrm, MachineInstr *DefMI,
1018 SmallVector<unsigned, 2> &Ops,
1019 bool isSS, int Slot, unsigned Reg) {
1020 // If it is an implicit def instruction, just delete it.
1021 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1022 RemoveMachineInstrFromMaps(MI);
1023 vrm.RemoveMachineInstrFromMaps(MI);
1024 MI->eraseFromParent();
1029 // Filter the list of operand indexes that are to be folded. Abort if
1030 // any operand will prevent folding.
1031 unsigned MRInfo = 0;
1032 SmallVector<unsigned, 2> FoldOps;
1033 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1036 // The only time it's safe to fold into a two address instruction is when
1037 // it's folding reload and spill from / into a spill stack slot.
1038 if (DefMI && (MRInfo & VirtRegMap::isMod))
1041 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1042 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1044 // Remember this instruction uses the spill slot.
1045 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1047 // Attempt to fold the memory reference into the instruction. If
1048 // we can do this, we don't need to insert spill code.
1049 MachineBasicBlock &MBB = *MI->getParent();
1050 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1051 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1052 vrm.transferSpillPts(MI, fmi);
1053 vrm.transferRestorePts(MI, fmi);
1054 vrm.transferEmergencySpills(MI, fmi);
1056 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1057 mi2iMap_[fmi] = InstrIdx;
1058 MI = MBB.insert(MBB.erase(MI), fmi);
1065 /// canFoldMemoryOperand - Returns true if the specified load / store
1066 /// folding is possible.
1067 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1068 SmallVector<unsigned, 2> &Ops,
1070 // Filter the list of operand indexes that are to be folded. Abort if
1071 // any operand will prevent folding.
1072 unsigned MRInfo = 0;
1073 SmallVector<unsigned, 2> FoldOps;
1074 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1077 // It's only legal to remat for a use, not a def.
1078 if (ReMat && (MRInfo & VirtRegMap::isMod))
1081 return tii_->canFoldMemoryOperand(MI, FoldOps);
1084 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1085 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1086 for (LiveInterval::Ranges::const_iterator
1087 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1088 std::vector<IdxMBBPair>::const_iterator II =
1089 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1090 if (II == Idx2MBBMap.end())
1092 if (I->end > II->first) // crossing a MBB.
1094 MBBs.insert(II->second);
1095 if (MBBs.size() > 1)
1101 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1102 /// interval on to-be re-materialized operands of MI) with new register.
1103 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1104 MachineInstr *MI, unsigned NewVReg,
1106 // There is an implicit use. That means one of the other operand is
1107 // being remat'ed and the remat'ed instruction has li.reg as an
1108 // use operand. Make sure we rewrite that as well.
1109 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1110 MachineOperand &MO = MI->getOperand(i);
1111 if (!MO.isRegister())
1113 unsigned Reg = MO.getReg();
1114 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1116 if (!vrm.isReMaterialized(Reg))
1118 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1119 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1121 UseMO->setReg(NewVReg);
1125 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1126 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1127 bool LiveIntervals::
1128 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1129 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1130 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1131 unsigned Slot, int LdSlot,
1132 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1134 const TargetRegisterClass* rc,
1135 SmallVector<int, 4> &ReMatIds,
1136 const MachineLoopInfo *loopInfo,
1137 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1138 std::map<unsigned,unsigned> &MBBVRegsMap,
1139 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1140 MachineBasicBlock *MBB = MI->getParent();
1141 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1142 bool CanFold = false;
1144 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1145 MachineOperand& mop = MI->getOperand(i);
1146 if (!mop.isRegister())
1148 unsigned Reg = mop.getReg();
1149 unsigned RegI = Reg;
1150 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1155 bool TryFold = !DefIsReMat;
1156 bool FoldSS = true; // Default behavior unless it's a remat.
1157 int FoldSlot = Slot;
1159 // If this is the rematerializable definition MI itself and
1160 // all of its uses are rematerialized, simply delete it.
1161 if (MI == ReMatOrigDefMI && CanDelete) {
1162 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1164 RemoveMachineInstrFromMaps(MI);
1165 vrm.RemoveMachineInstrFromMaps(MI);
1166 MI->eraseFromParent();
1170 // If def for this use can't be rematerialized, then try folding.
1171 // If def is rematerializable and it's a load, also try folding.
1172 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1174 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1180 // Scan all of the operands of this instruction rewriting operands
1181 // to use NewVReg instead of li.reg as appropriate. We do this for
1184 // 1. If the instr reads the same spilled vreg multiple times, we
1185 // want to reuse the NewVReg.
1186 // 2. If the instr is a two-addr instruction, we are required to
1187 // keep the src/dst regs pinned.
1189 // Keep track of whether we replace a use and/or def so that we can
1190 // create the spill interval with the appropriate range.
1192 HasUse = mop.isUse();
1193 HasDef = mop.isDef();
1194 SmallVector<unsigned, 2> Ops;
1196 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1197 const MachineOperand &MOj = MI->getOperand(j);
1198 if (!MOj.isRegister())
1200 unsigned RegJ = MOj.getReg();
1201 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1205 HasUse |= MOj.isUse();
1206 HasDef |= MOj.isDef();
1210 if (HasUse && !li.liveAt(getUseIndex(index)))
1211 // Must be defined by an implicit def. It should not be spilled. Note,
1212 // this is for correctness reason. e.g.
1213 // 8 %reg1024<def> = IMPLICIT_DEF
1214 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1215 // The live range [12, 14) are not part of the r1024 live interval since
1216 // it's defined by an implicit def. It will not conflicts with live
1217 // interval of r1025. Now suppose both registers are spilled, you can
1218 // easily see a situation where both registers are reloaded before
1219 // the INSERT_SUBREG and both target registers that would overlap.
1222 // Update stack slot spill weight if we are splitting.
1223 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1230 // Do not fold load / store here if we are splitting. We'll find an
1231 // optimal point to insert a load / store later.
1233 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1234 Ops, FoldSS, FoldSlot, Reg)) {
1235 // Folding the load/store can completely change the instruction in
1236 // unpredictable ways, rescan it from the beginning.
1240 if (isRemoved(MI)) {
1244 goto RestartInstruction;
1247 // We'll try to fold it later if it's profitable.
1248 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1252 // Create a new virtual register for the spill interval.
1253 bool CreatedNewVReg = false;
1255 NewVReg = mri_->createVirtualRegister(rc);
1257 CreatedNewVReg = true;
1259 mop.setReg(NewVReg);
1260 if (mop.isImplicit())
1261 rewriteImplicitOps(li, MI, NewVReg, vrm);
1263 // Reuse NewVReg for other reads.
1264 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1265 MachineOperand &mopj = MI->getOperand(Ops[j]);
1266 mopj.setReg(NewVReg);
1267 if (mopj.isImplicit())
1268 rewriteImplicitOps(li, MI, NewVReg, vrm);
1271 if (CreatedNewVReg) {
1273 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1274 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1275 // Each valnum may have its own remat id.
1276 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1278 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1280 if (!CanDelete || (HasUse && HasDef)) {
1281 // If this is a two-addr instruction then its use operands are
1282 // rematerializable but its def is not. It should be assigned a
1284 vrm.assignVirt2StackSlot(NewVReg, Slot);
1287 vrm.assignVirt2StackSlot(NewVReg, Slot);
1289 } else if (HasUse && HasDef &&
1290 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1291 // If this interval hasn't been assigned a stack slot (because earlier
1292 // def is a deleted remat def), do it now.
1293 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1294 vrm.assignVirt2StackSlot(NewVReg, Slot);
1297 // Re-matting an instruction with virtual register use. Add the
1298 // register as an implicit use on the use MI.
1299 if (DefIsReMat && ImpUse)
1300 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1302 // create a new register interval for this spill / remat.
1303 LiveInterval &nI = getOrCreateInterval(NewVReg);
1304 if (CreatedNewVReg) {
1305 NewLIs.push_back(&nI);
1306 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1308 vrm.setIsSplitFromReg(NewVReg, li.reg);
1312 if (CreatedNewVReg) {
1313 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1314 nI.getNextValue(~0U, 0, VNInfoAllocator));
1318 // Extend the split live interval to this def / use.
1319 unsigned End = getUseIndex(index)+1;
1320 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1321 nI.getValNumInfo(nI.getNumValNums()-1));
1327 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1328 nI.getNextValue(~0U, 0, VNInfoAllocator));
1333 DOUT << "\t\t\t\tAdded new interval: ";
1334 nI.print(DOUT, tri_);
1339 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1341 MachineBasicBlock *MBB, unsigned Idx) const {
1342 unsigned End = getMBBEndIdx(MBB);
1343 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1344 unsigned KillIdx = VNI->kills[j];
1345 if (KillIdx > Idx && KillIdx < End)
1351 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1352 /// during spilling.
1354 struct RewriteInfo {
1359 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1360 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1363 struct RewriteInfoCompare {
1364 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1365 return LHS.Index < RHS.Index;
1370 void LiveIntervals::
1371 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1372 LiveInterval::Ranges::const_iterator &I,
1373 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1374 unsigned Slot, int LdSlot,
1375 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1377 const TargetRegisterClass* rc,
1378 SmallVector<int, 4> &ReMatIds,
1379 const MachineLoopInfo *loopInfo,
1380 BitVector &SpillMBBs,
1381 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1382 BitVector &RestoreMBBs,
1383 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1384 std::map<unsigned,unsigned> &MBBVRegsMap,
1385 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1386 bool AllCanFold = true;
1387 unsigned NewVReg = 0;
1388 unsigned start = getBaseIndex(I->start);
1389 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1391 // First collect all the def / use in this live range that will be rewritten.
1392 // Make sure they are sorted according to instruction index.
1393 std::vector<RewriteInfo> RewriteMIs;
1394 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1395 re = mri_->reg_end(); ri != re; ) {
1396 MachineInstr *MI = &*ri;
1397 MachineOperand &O = ri.getOperand();
1399 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1400 unsigned index = getInstructionIndex(MI);
1401 if (index < start || index >= end)
1403 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1404 // Must be defined by an implicit def. It should not be spilled. Note,
1405 // this is for correctness reason. e.g.
1406 // 8 %reg1024<def> = IMPLICIT_DEF
1407 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1408 // The live range [12, 14) are not part of the r1024 live interval since
1409 // it's defined by an implicit def. It will not conflicts with live
1410 // interval of r1025. Now suppose both registers are spilled, you can
1411 // easily see a situation where both registers are reloaded before
1412 // the INSERT_SUBREG and both target registers that would overlap.
1414 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1416 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1418 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1419 // Now rewrite the defs and uses.
1420 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1421 RewriteInfo &rwi = RewriteMIs[i];
1423 unsigned index = rwi.Index;
1424 bool MIHasUse = rwi.HasUse;
1425 bool MIHasDef = rwi.HasDef;
1426 MachineInstr *MI = rwi.MI;
1427 // If MI def and/or use the same register multiple times, then there
1428 // are multiple entries.
1429 unsigned NumUses = MIHasUse;
1430 while (i != e && RewriteMIs[i].MI == MI) {
1431 assert(RewriteMIs[i].Index == index);
1432 bool isUse = RewriteMIs[i].HasUse;
1433 if (isUse) ++NumUses;
1435 MIHasDef |= RewriteMIs[i].HasDef;
1438 MachineBasicBlock *MBB = MI->getParent();
1440 if (ImpUse && MI != ReMatDefMI) {
1441 // Re-matting an instruction with virtual register use. Update the
1442 // register interval's spill weight to HUGE_VALF to prevent it from
1444 LiveInterval &ImpLi = getInterval(ImpUse);
1445 ImpLi.weight = HUGE_VALF;
1448 unsigned MBBId = MBB->getNumber();
1449 unsigned ThisVReg = 0;
1451 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1452 if (NVI != MBBVRegsMap.end()) {
1453 ThisVReg = NVI->second;
1460 // It's better to start a new interval to avoid artifically
1461 // extend the new interval.
1462 if (MIHasDef && !MIHasUse) {
1463 MBBVRegsMap.erase(MBB->getNumber());
1469 bool IsNew = ThisVReg == 0;
1471 // This ends the previous live interval. If all of its def / use
1472 // can be folded, give it a low spill weight.
1473 if (NewVReg && TrySplit && AllCanFold) {
1474 LiveInterval &nI = getOrCreateInterval(NewVReg);
1481 bool HasDef = false;
1482 bool HasUse = false;
1483 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1484 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1485 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1486 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1487 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1488 if (!HasDef && !HasUse)
1491 AllCanFold &= CanFold;
1493 // Update weight of spill interval.
1494 LiveInterval &nI = getOrCreateInterval(NewVReg);
1496 // The spill weight is now infinity as it cannot be spilled again.
1497 nI.weight = HUGE_VALF;
1501 // Keep track of the last def and first use in each MBB.
1503 if (MI != ReMatOrigDefMI || !CanDelete) {
1504 bool HasKill = false;
1506 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1508 // If this is a two-address code, then this index starts a new VNInfo.
1509 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1511 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1513 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1514 SpillIdxes.find(MBBId);
1516 if (SII == SpillIdxes.end()) {
1517 std::vector<SRInfo> S;
1518 S.push_back(SRInfo(index, NewVReg, true));
1519 SpillIdxes.insert(std::make_pair(MBBId, S));
1520 } else if (SII->second.back().vreg != NewVReg) {
1521 SII->second.push_back(SRInfo(index, NewVReg, true));
1522 } else if ((int)index > SII->second.back().index) {
1523 // If there is an earlier def and this is a two-address
1524 // instruction, then it's not possible to fold the store (which
1525 // would also fold the load).
1526 SRInfo &Info = SII->second.back();
1528 Info.canFold = !HasUse;
1530 SpillMBBs.set(MBBId);
1531 } else if (SII != SpillIdxes.end() &&
1532 SII->second.back().vreg == NewVReg &&
1533 (int)index > SII->second.back().index) {
1534 // There is an earlier def that's not killed (must be two-address).
1535 // The spill is no longer needed.
1536 SII->second.pop_back();
1537 if (SII->second.empty()) {
1538 SpillIdxes.erase(MBBId);
1539 SpillMBBs.reset(MBBId);
1546 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1547 SpillIdxes.find(MBBId);
1548 if (SII != SpillIdxes.end() &&
1549 SII->second.back().vreg == NewVReg &&
1550 (int)index > SII->second.back().index)
1551 // Use(s) following the last def, it's not safe to fold the spill.
1552 SII->second.back().canFold = false;
1553 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1554 RestoreIdxes.find(MBBId);
1555 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1556 // If we are splitting live intervals, only fold if it's the first
1557 // use and there isn't another use later in the MBB.
1558 RII->second.back().canFold = false;
1560 // Only need a reload if there isn't an earlier def / use.
1561 if (RII == RestoreIdxes.end()) {
1562 std::vector<SRInfo> Infos;
1563 Infos.push_back(SRInfo(index, NewVReg, true));
1564 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1566 RII->second.push_back(SRInfo(index, NewVReg, true));
1568 RestoreMBBs.set(MBBId);
1572 // Update spill weight.
1573 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1574 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1577 if (NewVReg && TrySplit && AllCanFold) {
1578 // If all of its def / use can be folded, give it a low spill weight.
1579 LiveInterval &nI = getOrCreateInterval(NewVReg);
1584 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1585 BitVector &RestoreMBBs,
1586 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1587 if (!RestoreMBBs[Id])
1589 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1590 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1591 if (Restores[i].index == index &&
1592 Restores[i].vreg == vr &&
1593 Restores[i].canFold)
1598 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1599 BitVector &RestoreMBBs,
1600 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1601 if (!RestoreMBBs[Id])
1603 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1604 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1605 if (Restores[i].index == index && Restores[i].vreg)
1606 Restores[i].index = -1;
1609 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1610 /// spilled and create empty intervals for their uses.
1612 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1613 const TargetRegisterClass* rc,
1614 std::vector<LiveInterval*> &NewLIs) {
1615 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1616 re = mri_->reg_end(); ri != re; ) {
1617 MachineOperand &O = ri.getOperand();
1618 MachineInstr *MI = &*ri;
1621 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1622 "Register def was not rewritten?");
1623 RemoveMachineInstrFromMaps(MI);
1624 vrm.RemoveMachineInstrFromMaps(MI);
1625 MI->eraseFromParent();
1627 // This must be an use of an implicit_def so it's not part of the live
1628 // interval. Create a new empty live interval for it.
1629 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1630 unsigned NewVReg = mri_->createVirtualRegister(rc);
1632 vrm.setIsImplicitlyDefined(NewVReg);
1633 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1634 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1635 MachineOperand &MO = MI->getOperand(i);
1636 if (MO.isReg() && MO.getReg() == li.reg)
1644 std::vector<LiveInterval*> LiveIntervals::
1645 addIntervalsForSpills(const LiveInterval &li,
1646 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1648 assert(li.weight != HUGE_VALF &&
1649 "attempt to spill already spilled interval!");
1651 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1652 li.print(DOUT, tri_);
1655 // Spill slot weight.
1658 // Each bit specify whether it a spill is required in the MBB.
1659 BitVector SpillMBBs(mf_->getNumBlockIDs());
1660 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1661 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1662 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1663 std::map<unsigned,unsigned> MBBVRegsMap;
1664 std::vector<LiveInterval*> NewLIs;
1665 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1667 unsigned NumValNums = li.getNumValNums();
1668 SmallVector<MachineInstr*, 4> ReMatDefs;
1669 ReMatDefs.resize(NumValNums, NULL);
1670 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1671 ReMatOrigDefs.resize(NumValNums, NULL);
1672 SmallVector<int, 4> ReMatIds;
1673 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1674 BitVector ReMatDelete(NumValNums);
1675 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1677 // Spilling a split live interval. It cannot be split any further. Also,
1678 // it's also guaranteed to be a single val# / range interval.
1679 if (vrm.getPreSplitReg(li.reg)) {
1680 vrm.setIsSplitFromReg(li.reg, 0);
1681 // Unset the split kill marker on the last use.
1682 unsigned KillIdx = vrm.getKillPoint(li.reg);
1684 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1685 assert(KillMI && "Last use disappeared?");
1686 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1687 assert(KillOp != -1 && "Last use disappeared?");
1688 KillMI->getOperand(KillOp).setIsKill(false);
1690 vrm.removeKillPoint(li.reg);
1691 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1692 Slot = vrm.getStackSlot(li.reg);
1693 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1694 MachineInstr *ReMatDefMI = DefIsReMat ?
1695 vrm.getReMaterializedMI(li.reg) : NULL;
1697 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1698 bool isLoad = isLoadSS ||
1699 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1700 bool IsFirstRange = true;
1701 for (LiveInterval::Ranges::const_iterator
1702 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1703 // If this is a split live interval with multiple ranges, it means there
1704 // are two-address instructions that re-defined the value. Only the
1705 // first def can be rematerialized!
1707 // Note ReMatOrigDefMI has already been deleted.
1708 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1709 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1710 false, vrm, rc, ReMatIds, loopInfo,
1711 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1712 MBBVRegsMap, NewLIs, SSWeight);
1714 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1715 Slot, 0, false, false, false,
1716 false, vrm, rc, ReMatIds, loopInfo,
1717 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1718 MBBVRegsMap, NewLIs, SSWeight);
1720 IsFirstRange = false;
1723 SSWeight = 0.0f; // Already accounted for when split.
1724 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1728 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1729 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1733 bool NeedStackSlot = false;
1734 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1736 const VNInfo *VNI = *i;
1737 unsigned VN = VNI->id;
1738 unsigned DefIdx = VNI->def;
1740 continue; // Dead val#.
1741 // Is the def for the val# rematerializable?
1742 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1743 ? 0 : getInstructionFromIndex(DefIdx);
1745 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1746 // Remember how to remat the def of this val#.
1747 ReMatOrigDefs[VN] = ReMatDefMI;
1748 // Original def may be modified so we have to make a copy here.
1749 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1750 ClonedMIs.push_back(Clone);
1751 ReMatDefs[VN] = Clone;
1753 bool CanDelete = true;
1754 if (VNI->hasPHIKill) {
1755 // A kill is a phi node, not all of its uses can be rematerialized.
1756 // It must not be deleted.
1758 // Need a stack slot if there is any live range where uses cannot be
1760 NeedStackSlot = true;
1763 ReMatDelete.set(VN);
1765 // Need a stack slot if there is any live range where uses cannot be
1767 NeedStackSlot = true;
1771 // One stack slot per live interval.
1772 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1773 Slot = vrm.assignVirt2StackSlot(li.reg);
1775 // Create new intervals and rewrite defs and uses.
1776 for (LiveInterval::Ranges::const_iterator
1777 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1778 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1779 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1780 bool DefIsReMat = ReMatDefMI != NULL;
1781 bool CanDelete = ReMatDelete[I->valno->id];
1783 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1784 bool isLoad = isLoadSS ||
1785 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1786 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1787 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1788 CanDelete, vrm, rc, ReMatIds, loopInfo,
1789 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1790 MBBVRegsMap, NewLIs, SSWeight);
1793 // Insert spills / restores if we are splitting.
1795 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1799 SmallPtrSet<LiveInterval*, 4> AddedKill;
1800 SmallVector<unsigned, 2> Ops;
1801 if (NeedStackSlot) {
1802 int Id = SpillMBBs.find_first();
1804 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1805 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1806 std::vector<SRInfo> &spills = SpillIdxes[Id];
1807 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1808 int index = spills[i].index;
1809 unsigned VReg = spills[i].vreg;
1810 LiveInterval &nI = getOrCreateInterval(VReg);
1811 bool isReMat = vrm.isReMaterialized(VReg);
1812 MachineInstr *MI = getInstructionFromIndex(index);
1813 bool CanFold = false;
1814 bool FoundUse = false;
1816 if (spills[i].canFold) {
1818 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1819 MachineOperand &MO = MI->getOperand(j);
1820 if (!MO.isRegister() || MO.getReg() != VReg)
1827 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1828 RestoreMBBs, RestoreIdxes))) {
1829 // MI has two-address uses of the same register. If the use
1830 // isn't the first and only use in the BB, then we can't fold
1831 // it. FIXME: Move this to rewriteInstructionsForSpills.
1838 // Fold the store into the def if possible.
1839 bool Folded = false;
1840 if (CanFold && !Ops.empty()) {
1841 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1844 // Also folded uses, do not issue a load.
1845 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1846 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1848 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1852 // Otherwise tell the spiller to issue a spill.
1854 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1855 bool isKill = LR->end == getStoreIndex(index);
1856 if (!MI->registerDefIsDead(nI.reg))
1857 // No need to spill a dead def.
1858 vrm.addSpillPoint(VReg, isKill, MI);
1860 AddedKill.insert(&nI);
1863 // Update spill slot weight.
1865 SSWeight += getSpillWeight(true, false, loopDepth);
1867 Id = SpillMBBs.find_next(Id);
1871 int Id = RestoreMBBs.find_first();
1873 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1874 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1876 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1877 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1878 int index = restores[i].index;
1881 unsigned VReg = restores[i].vreg;
1882 LiveInterval &nI = getOrCreateInterval(VReg);
1883 bool isReMat = vrm.isReMaterialized(VReg);
1884 MachineInstr *MI = getInstructionFromIndex(index);
1885 bool CanFold = false;
1887 if (restores[i].canFold) {
1889 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1890 MachineOperand &MO = MI->getOperand(j);
1891 if (!MO.isRegister() || MO.getReg() != VReg)
1895 // If this restore were to be folded, it would have been folded
1904 // Fold the load into the use if possible.
1905 bool Folded = false;
1906 if (CanFold && !Ops.empty()) {
1908 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1910 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1912 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1913 // If the rematerializable def is a load, also try to fold it.
1914 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1915 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1916 Ops, isLoadSS, LdSlot, VReg);
1917 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1919 // Re-matting an instruction with virtual register use. Add the
1920 // register as an implicit use on the use MI and update the register
1921 // interval's spill weight to HUGE_VALF to prevent it from being
1923 LiveInterval &ImpLi = getInterval(ImpUse);
1924 ImpLi.weight = HUGE_VALF;
1925 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1929 // If folding is not possible / failed, then tell the spiller to issue a
1930 // load / rematerialization for us.
1932 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1934 vrm.addRestorePoint(VReg, MI);
1936 // Update spill slot weight.
1938 SSWeight += getSpillWeight(false, true, loopDepth);
1940 Id = RestoreMBBs.find_next(Id);
1943 // Finalize intervals: add kills, finalize spill weights, and filter out
1945 std::vector<LiveInterval*> RetNewLIs;
1946 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1947 LiveInterval *LI = NewLIs[i];
1949 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1950 if (!AddedKill.count(LI)) {
1951 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1952 unsigned LastUseIdx = getBaseIndex(LR->end);
1953 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1954 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1955 assert(UseIdx != -1);
1956 if (LastUse->getOperand(UseIdx).isImplicit() ||
1957 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1958 LastUse->getOperand(UseIdx).setIsKill();
1959 vrm.addKillPoint(LI->reg, LastUseIdx);
1962 RetNewLIs.push_back(LI);
1966 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1970 /// hasAllocatableSuperReg - Return true if the specified physical register has
1971 /// any super register that's allocatable.
1972 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1973 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1974 if (allocatableRegs_[*AS] && hasInterval(*AS))
1979 /// getRepresentativeReg - Find the largest super register of the specified
1980 /// physical register.
1981 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1982 // Find the largest super-register that is allocatable.
1983 unsigned BestReg = Reg;
1984 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1985 unsigned SuperReg = *AS;
1986 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1994 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1995 /// specified interval that conflicts with the specified physical register.
1996 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1997 unsigned PhysReg) const {
1998 unsigned NumConflicts = 0;
1999 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2000 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2001 E = mri_->reg_end(); I != E; ++I) {
2002 MachineOperand &O = I.getOperand();
2003 MachineInstr *MI = O.getParent();
2004 unsigned Index = getInstructionIndex(MI);
2005 if (pli.liveAt(Index))
2008 return NumConflicts;
2011 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2012 /// around all defs and uses of the specified interval.
2013 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2014 unsigned PhysReg, VirtRegMap &vrm) {
2015 unsigned SpillReg = getRepresentativeReg(PhysReg);
2017 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2018 // If there are registers which alias PhysReg, but which are not a
2019 // sub-register of the chosen representative super register. Assert
2020 // since we can't handle it yet.
2021 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2022 tri_->isSuperRegister(*AS, SpillReg));
2024 LiveInterval &pli = getInterval(SpillReg);
2025 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2026 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2027 E = mri_->reg_end(); I != E; ++I) {
2028 MachineOperand &O = I.getOperand();
2029 MachineInstr *MI = O.getParent();
2030 if (SeenMIs.count(MI))
2033 unsigned Index = getInstructionIndex(MI);
2034 if (pli.liveAt(Index)) {
2035 vrm.addEmergencySpill(SpillReg, MI);
2036 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2037 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2038 if (!hasInterval(*AS))
2040 LiveInterval &spli = getInterval(*AS);
2041 if (spli.liveAt(Index))
2042 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2048 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2049 MachineInstr* startInst) {
2050 LiveInterval& Interval = getOrCreateInterval(reg);
2051 VNInfo* VN = Interval.getNextValue(
2052 getInstructionIndex(startInst) + InstrSlots::DEF,
2053 startInst, getVNInfoAllocator());
2054 VN->hasPHIKill = true;
2055 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2056 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2057 getMBBEndIdx(startInst->getParent()) + 1, VN);
2058 Interval.addRange(LR);