1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 static cl::opt<bool> EnableFastSpilling("fast-spill",
53 cl::init(false), cl::Hidden);
55 STATISTIC(numIntervals, "Number of original intervals");
56 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
72 AU.addRequiredID(TwoAddressInstructionPassID);
73 MachineFunctionPass::getAnalysisUsage(AU);
76 void LiveIntervals::releaseMemory() {
77 // Free the live intervals themselves.
78 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79 E = r2iMap_.end(); I != E; ++I)
87 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88 VNInfoAllocator.Reset();
89 while (!ClonedMIs.empty()) {
90 MachineInstr *MI = ClonedMIs.back();
92 mf_->DeleteMachineInstr(MI);
96 void LiveIntervals::computeNumbering() {
97 Index2MiMap OldI2MI = i2miMap_;
98 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
107 // Number MachineInstrs and MachineBasicBlocks.
108 // Initialize MBB indexes to a sentinal.
109 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111 unsigned MIIndex = 0;
112 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114 unsigned StartIdx = MIIndex;
116 // Insert an empty slot at the beginning of each block.
117 MIIndex += InstrSlots::NUM;
118 i2miMap_.push_back(0);
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123 assert(inserted && "multiple MachineInstr -> index mappings");
124 i2miMap_.push_back(I);
125 MIIndex += InstrSlots::NUM;
128 // Insert an empty slot after every instruction.
129 MIIndex += InstrSlots::NUM;
130 i2miMap_.push_back(0);
133 // Set the MBB2IdxMap entry for this MBB.
134 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
137 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
139 if (!OldI2MI.empty())
140 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141 for (LiveInterval::iterator LI = OI->second->begin(),
142 LE = OI->second->end(); LI != LE; ++LI) {
144 // Remap the start index of the live range to the corresponding new
145 // number, or our best guess at what it _should_ correspond to if the
146 // original instruction has been erased. This is either the following
147 // instruction or its predecessor.
148 unsigned index = LI->start / InstrSlots::NUM;
149 unsigned offset = LI->start % InstrSlots::NUM;
150 if (offset == InstrSlots::LOAD) {
151 std::vector<IdxMBBPair>::const_iterator I =
152 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153 // Take the pair containing the index
154 std::vector<IdxMBBPair>::const_iterator J =
155 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
157 LI->start = getMBBStartIdx(J->second);
159 LI->start = mi2iMap_[OldI2MI[index]] + offset;
162 // Remap the ending index in the same way that we remapped the start,
163 // except for the final step where we always map to the immediately
164 // following instruction.
165 index = (LI->end - 1) / InstrSlots::NUM;
166 offset = LI->end % InstrSlots::NUM;
167 if (offset == InstrSlots::LOAD) {
168 // VReg dies at end of block.
169 std::vector<IdxMBBPair>::const_iterator I =
170 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
173 LI->end = getMBBEndIdx(I->second) + 1;
175 unsigned idx = index;
176 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
178 if (index != OldI2MI.size())
179 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
181 LI->end = InstrSlots::NUM * i2miMap_.size();
185 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
189 // Remap the VNInfo def index, which works the same as the
190 // start indices above. VN's with special sentinel defs
191 // don't need to be remapped.
192 if (vni->def != ~0U && vni->def != ~1U) {
193 unsigned index = vni->def / InstrSlots::NUM;
194 unsigned offset = vni->def % InstrSlots::NUM;
195 if (offset == InstrSlots::LOAD) {
196 std::vector<IdxMBBPair>::const_iterator I =
197 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198 // Take the pair containing the index
199 std::vector<IdxMBBPair>::const_iterator J =
200 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
202 vni->def = getMBBStartIdx(J->second);
204 vni->def = mi2iMap_[OldI2MI[index]] + offset;
208 // Remap the VNInfo kill indices, which works the same as
209 // the end indices above.
210 for (size_t i = 0; i < vni->kills.size(); ++i) {
211 // PHI kills don't need to be remapped.
212 if (!vni->kills[i]) continue;
214 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215 unsigned offset = vni->kills[i] % InstrSlots::NUM;
216 if (offset == InstrSlots::STORE) {
217 std::vector<IdxMBBPair>::const_iterator I =
218 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
221 vni->kills[i] = getMBBEndIdx(I->second);
223 unsigned idx = index;
224 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
226 if (index != OldI2MI.size())
227 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228 (idx == index ? offset : 0);
230 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
237 /// runOnMachineFunction - Register allocate the whole function
239 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
241 mri_ = &mf_->getRegInfo();
242 tm_ = &fn.getTarget();
243 tri_ = tm_->getRegisterInfo();
244 tii_ = tm_->getInstrInfo();
245 aa_ = &getAnalysis<AliasAnalysis>();
246 lv_ = &getAnalysis<LiveVariables>();
247 allocatableRegs_ = tri_->getAllocatableSet(fn);
252 numIntervals += getNumIntervals();
254 DOUT << "********** INTERVALS **********\n";
255 for (iterator I = begin(), E = end(); I != E; ++I) {
256 I->second->print(DOUT, tri_);
260 numIntervalsAfter += getNumIntervals();
265 /// print - Implement the dump method.
266 void LiveIntervals::print(std::ostream &O, const Module* ) const {
267 O << "********** INTERVALS **********\n";
268 for (const_iterator I = begin(), E = end(); I != E; ++I) {
269 I->second->print(O, tri_);
273 O << "********** MACHINEINSTRS **********\n";
274 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275 mbbi != mbbe; ++mbbi) {
276 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277 for (MachineBasicBlock::iterator mii = mbbi->begin(),
278 mie = mbbi->end(); mii != mie; ++mii) {
279 O << getInstructionIndex(mii) << '\t' << *mii;
284 /// conflictsWithPhysRegDef - Returns true if the specified register
285 /// is defined during the duration of the specified interval.
286 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287 VirtRegMap &vrm, unsigned reg) {
288 for (LiveInterval::Ranges::const_iterator
289 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290 for (unsigned index = getBaseIndex(I->start),
291 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292 index += InstrSlots::NUM) {
293 // skip deleted instructions
294 while (index != end && !getInstructionFromIndex(index))
295 index += InstrSlots::NUM;
296 if (index == end) break;
298 MachineInstr *MI = getInstructionFromIndex(index);
299 unsigned SrcReg, DstReg;
300 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301 if (SrcReg == li.reg || DstReg == li.reg)
303 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304 MachineOperand& mop = MI->getOperand(i);
305 if (!mop.isRegister())
307 unsigned PhysReg = mop.getReg();
308 if (PhysReg == 0 || PhysReg == li.reg)
310 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311 if (!vrm.hasPhys(PhysReg))
313 PhysReg = vrm.getPhys(PhysReg);
315 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
324 void LiveIntervals::printRegName(unsigned reg) const {
325 if (TargetRegisterInfo::isPhysicalRegister(reg))
326 cerr << tri_->getName(reg);
328 cerr << "%reg" << reg;
331 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332 MachineBasicBlock::iterator mi,
333 unsigned MIIdx, MachineOperand& MO,
335 LiveInterval &interval) {
336 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
339 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340 DOUT << "is a implicit_def\n";
344 // Virtual registers may be defined multiple times (due to phi
345 // elimination and 2-addr elimination). Much of what we do only has to be
346 // done once for the vreg. We use an empty interval to detect the first
347 // time we see a vreg.
348 if (interval.empty()) {
349 // Get the Idx of the defining instructions.
350 unsigned defIndex = getDefIndex(MIIdx);
352 MachineInstr *CopyMI = NULL;
353 unsigned SrcReg, DstReg;
354 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
355 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
356 tii_->isMoveInstr(*mi, SrcReg, DstReg))
358 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
360 assert(ValNo->id == 0 && "First value in interval is not 0?");
362 // Loop over all of the blocks that the vreg is defined in. There are
363 // two cases we have to handle here. The most common case is a vreg
364 // whose lifetime is contained within a basic block. In this case there
365 // will be a single kill, in MBB, which comes after the definition.
366 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
367 // FIXME: what about dead vars?
369 if (vi.Kills[0] != mi)
370 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
372 killIdx = defIndex+1;
374 // If the kill happens after the definition, we have an intra-block
376 if (killIdx > defIndex) {
377 assert(vi.AliveBlocks.none() &&
378 "Shouldn't be alive across any blocks!");
379 LiveRange LR(defIndex, killIdx, ValNo);
380 interval.addRange(LR);
381 DOUT << " +" << LR << "\n";
382 interval.addKill(ValNo, killIdx);
387 // The other case we handle is when a virtual register lives to the end
388 // of the defining block, potentially live across some blocks, then is
389 // live into some number of blocks, but gets killed. Start by adding a
390 // range that goes from this definition to the end of the defining block.
391 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
392 DOUT << " +" << NewLR;
393 interval.addRange(NewLR);
395 // Iterate over all of the blocks that the variable is completely
396 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
398 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
399 if (vi.AliveBlocks[i]) {
400 LiveRange LR(getMBBStartIdx(i),
401 getMBBEndIdx(i)+1, // MBB ends at -1.
403 interval.addRange(LR);
408 // Finally, this virtual register is live from the start of any killing
409 // block to the 'use' slot of the killing instruction.
410 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
411 MachineInstr *Kill = vi.Kills[i];
412 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
413 LiveRange LR(getMBBStartIdx(Kill->getParent()),
415 interval.addRange(LR);
416 interval.addKill(ValNo, killIdx);
421 // If this is the second time we see a virtual register definition, it
422 // must be due to phi elimination or two addr elimination. If this is
423 // the result of two address elimination, then the vreg is one of the
424 // def-and-use register operand.
425 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
426 // If this is a two-address definition, then we have already processed
427 // the live range. The only problem is that we didn't realize there
428 // are actually two values in the live interval. Because of this we
429 // need to take the LiveRegion that defines this register and split it
431 assert(interval.containsOneValue());
432 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
433 unsigned RedefIndex = getDefIndex(MIIdx);
435 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
436 VNInfo *OldValNo = OldLR->valno;
438 // Delete the initial value, which should be short and continuous,
439 // because the 2-addr copy must be in the same MBB as the redef.
440 interval.removeRange(DefIndex, RedefIndex);
442 // Two-address vregs should always only be redefined once. This means
443 // that at this point, there should be exactly one value number in it.
444 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
446 // The new value number (#1) is defined by the instruction we claimed
448 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
451 // Value#0 is now defined by the 2-addr instruction.
452 OldValNo->def = RedefIndex;
455 // Add the new live interval which replaces the range for the input copy.
456 LiveRange LR(DefIndex, RedefIndex, ValNo);
457 DOUT << " replace range with " << LR;
458 interval.addRange(LR);
459 interval.addKill(ValNo, RedefIndex);
461 // If this redefinition is dead, we need to add a dummy unit live
462 // range covering the def slot.
464 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
467 interval.print(DOUT, tri_);
470 // Otherwise, this must be because of phi elimination. If this is the
471 // first redefinition of the vreg that we have seen, go back and change
472 // the live range in the PHI block to be a different value number.
473 if (interval.containsOneValue()) {
474 assert(vi.Kills.size() == 1 &&
475 "PHI elimination vreg should have one kill, the PHI itself!");
477 // Remove the old range that we now know has an incorrect number.
478 VNInfo *VNI = interval.getValNumInfo(0);
479 MachineInstr *Killer = vi.Kills[0];
480 unsigned Start = getMBBStartIdx(Killer->getParent());
481 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
482 DOUT << " Removing [" << Start << "," << End << "] from: ";
483 interval.print(DOUT, tri_); DOUT << "\n";
484 interval.removeRange(Start, End);
485 VNI->hasPHIKill = true;
486 DOUT << " RESULT: "; interval.print(DOUT, tri_);
488 // Replace the interval with one of a NEW value number. Note that this
489 // value number isn't actually defined by an instruction, weird huh? :)
490 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
491 DOUT << " replace range with " << LR;
492 interval.addRange(LR);
493 interval.addKill(LR.valno, End);
494 DOUT << " RESULT: "; interval.print(DOUT, tri_);
497 // In the case of PHI elimination, each variable definition is only
498 // live until the end of the block. We've already taken care of the
499 // rest of the live range.
500 unsigned defIndex = getDefIndex(MIIdx);
503 MachineInstr *CopyMI = NULL;
504 unsigned SrcReg, DstReg;
505 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
506 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
507 tii_->isMoveInstr(*mi, SrcReg, DstReg))
509 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
511 unsigned killIndex = getMBBEndIdx(mbb) + 1;
512 LiveRange LR(defIndex, killIndex, ValNo);
513 interval.addRange(LR);
514 interval.addKill(ValNo, killIndex);
515 ValNo->hasPHIKill = true;
523 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
524 MachineBasicBlock::iterator mi,
527 LiveInterval &interval,
528 MachineInstr *CopyMI) {
529 // A physical register cannot be live across basic block, so its
530 // lifetime must end somewhere in its defining basic block.
531 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
533 unsigned baseIndex = MIIdx;
534 unsigned start = getDefIndex(baseIndex);
535 unsigned end = start;
537 // If it is not used after definition, it is considered dead at
538 // the instruction defining it. Hence its interval is:
539 // [defSlot(def), defSlot(def)+1)
542 end = getDefIndex(start) + 1;
546 // If it is not dead on definition, it must be killed by a
547 // subsequent instruction. Hence its interval is:
548 // [defSlot(def), useSlot(kill)+1)
549 baseIndex += InstrSlots::NUM;
550 while (++mi != MBB->end()) {
551 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
552 getInstructionFromIndex(baseIndex) == 0)
553 baseIndex += InstrSlots::NUM;
554 if (mi->killsRegister(interval.reg, tri_)) {
556 end = getUseIndex(baseIndex) + 1;
558 } else if (mi->modifiesRegister(interval.reg, tri_)) {
559 // Another instruction redefines the register before it is ever read.
560 // Then the register is essentially dead at the instruction that defines
561 // it. Hence its interval is:
562 // [defSlot(def), defSlot(def)+1)
564 end = getDefIndex(start) + 1;
568 baseIndex += InstrSlots::NUM;
571 // The only case we should have a dead physreg here without a killing or
572 // instruction where we know it's dead is if it is live-in to the function
574 assert(!CopyMI && "physreg was not killed in defining block!");
575 end = getDefIndex(start) + 1; // It's dead.
578 assert(start < end && "did not find end of interval?");
580 // Already exists? Extend old live interval.
581 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
582 VNInfo *ValNo = (OldLR != interval.end())
583 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
584 LiveRange LR(start, end, ValNo);
585 interval.addRange(LR);
586 interval.addKill(LR.valno, end);
587 DOUT << " +" << LR << '\n';
590 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
591 MachineBasicBlock::iterator MI,
595 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
596 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
597 getOrCreateInterval(MO.getReg()));
598 else if (allocatableRegs_[MO.getReg()]) {
599 MachineInstr *CopyMI = NULL;
600 unsigned SrcReg, DstReg;
601 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
602 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
603 tii_->isMoveInstr(*MI, SrcReg, DstReg))
605 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606 getOrCreateInterval(MO.getReg()), CopyMI);
607 // Def of a register also defines its sub-registers.
608 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609 // If MI also modifies the sub-register explicitly, avoid processing it
610 // more than once. Do not pass in TRI here so it checks for exact match.
611 if (!MI->modifiesRegister(*AS))
612 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613 getOrCreateInterval(*AS), 0);
617 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
619 LiveInterval &interval, bool isAlias) {
620 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
622 // Look for kills, if it reaches a def before it's killed, then it shouldn't
623 // be considered a livein.
624 MachineBasicBlock::iterator mi = MBB->begin();
625 unsigned baseIndex = MIIdx;
626 unsigned start = baseIndex;
627 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
628 getInstructionFromIndex(baseIndex) == 0)
629 baseIndex += InstrSlots::NUM;
630 unsigned end = baseIndex;
632 while (mi != MBB->end()) {
633 if (mi->killsRegister(interval.reg, tri_)) {
635 end = getUseIndex(baseIndex) + 1;
637 } else if (mi->modifiesRegister(interval.reg, tri_)) {
638 // Another instruction redefines the register before it is ever read.
639 // Then the register is essentially dead at the instruction that defines
640 // it. Hence its interval is:
641 // [defSlot(def), defSlot(def)+1)
643 end = getDefIndex(start) + 1;
647 baseIndex += InstrSlots::NUM;
648 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
649 getInstructionFromIndex(baseIndex) == 0)
650 baseIndex += InstrSlots::NUM;
655 // Live-in register might not be used at all.
659 end = getDefIndex(MIIdx) + 1;
661 DOUT << " live through";
666 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
667 interval.addRange(LR);
668 interval.addKill(LR.valno, end);
669 DOUT << " +" << LR << '\n';
672 /// computeIntervals - computes the live intervals for virtual
673 /// registers. for some ordering of the machine instructions [1,N] a
674 /// live interval is an interval [i, j) where 1 <= i <= j < N for
675 /// which a variable is live
676 void LiveIntervals::computeIntervals() {
678 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
679 << "********** Function: "
680 << ((Value*)mf_->getFunction())->getName() << '\n';
682 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
684 MachineBasicBlock *MBB = MBBI;
685 // Track the index of the current machine instr.
686 unsigned MIIndex = getMBBStartIdx(MBB);
687 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
689 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
691 // Create intervals for live-ins to this BB first.
692 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
693 LE = MBB->livein_end(); LI != LE; ++LI) {
694 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
695 // Multiple live-ins can alias the same register.
696 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
697 if (!hasInterval(*AS))
698 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
702 // Skip over empty initial indices.
703 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
704 getInstructionFromIndex(MIIndex) == 0)
705 MIIndex += InstrSlots::NUM;
707 for (; MI != miEnd; ++MI) {
708 DOUT << MIIndex << "\t" << *MI;
711 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
712 MachineOperand &MO = MI->getOperand(i);
713 // handle register defs - build intervals
714 if (MO.isRegister() && MO.getReg() && MO.isDef()) {
715 handleRegisterDef(MBB, MI, MIIndex, MO, i);
716 if (MO.isEarlyClobber()) {
717 LiveInterval &interval = getOrCreateInterval(MO.getReg());
718 interval.isEarlyClobber = true;
721 if (MO.isRegister() && !MO.isDef() &&
722 MO.getReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()) &&
723 MO.overlapsEarlyClobber()) {
724 LiveInterval &interval = getOrCreateInterval(MO.getReg());
725 interval.overlapsEarlyClobber = true;
729 MIIndex += InstrSlots::NUM;
731 // Skip over empty indices.
732 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
733 getInstructionFromIndex(MIIndex) == 0)
734 MIIndex += InstrSlots::NUM;
739 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
740 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
741 std::vector<IdxMBBPair>::const_iterator I =
742 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
745 while (I != Idx2MBBMap.end()) {
746 if (LR.end <= I->first)
748 MBBs.push_back(I->second);
755 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
756 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
758 return new LiveInterval(reg, Weight);
761 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
762 /// copy field and returns the source register that defines it.
763 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
767 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
768 return VNI->copy->getOperand(1).getReg();
769 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
770 return VNI->copy->getOperand(2).getReg();
771 unsigned SrcReg, DstReg;
772 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
774 assert(0 && "Unrecognized copy instruction!");
778 //===----------------------------------------------------------------------===//
779 // Register allocator hooks.
782 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
783 /// allow one) virtual register operand, then its uses are implicitly using
784 /// the register. Returns the virtual register.
785 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
786 MachineInstr *MI) const {
788 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
789 MachineOperand &MO = MI->getOperand(i);
790 if (!MO.isRegister() || !MO.isUse())
792 unsigned Reg = MO.getReg();
793 if (Reg == 0 || Reg == li.reg)
795 // FIXME: For now, only remat MI with at most one register operand.
797 "Can't rematerialize instruction with multiple register operand!");
806 /// isValNoAvailableAt - Return true if the val# of the specified interval
807 /// which reaches the given instruction also reaches the specified use index.
808 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
809 unsigned UseIdx) const {
810 unsigned Index = getInstructionIndex(MI);
811 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
812 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
813 return UI != li.end() && UI->valno == ValNo;
816 /// isReMaterializable - Returns true if the definition MI of the specified
817 /// val# of the specified interval is re-materializable.
818 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
819 const VNInfo *ValNo, MachineInstr *MI,
824 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
828 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
829 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
830 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
831 // this but remember this is not safe to fold into a two-address
833 // This is a load from fixed stack slot. It can be rematerialized.
836 // If the target-specific rules don't identify an instruction as
837 // being trivially rematerializable, use some target-independent
839 if (!MI->getDesc().isRematerializable() ||
840 !tii_->isTriviallyReMaterializable(MI)) {
841 if (!EnableAggressiveRemat)
844 // If the instruction accesses memory but the memoperands have been lost,
845 // we can't analyze it.
846 const TargetInstrDesc &TID = MI->getDesc();
847 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
850 // Avoid instructions obviously unsafe for remat.
851 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
854 // If the instruction accesses memory and the memory could be non-constant,
855 // assume the instruction is not rematerializable.
856 for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
857 E = MI->memoperands_end(); I != E; ++I) {
858 const MachineMemOperand &MMO = *I;
859 if (MMO.isVolatile() || MMO.isStore())
861 const Value *V = MMO.getValue();
864 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
865 if (!PSV->isConstant(mf_->getFrameInfo()))
867 } else if (!aa_->pointsToConstantMemory(V))
871 // If any of the registers accessed are non-constant, conservatively assume
872 // the instruction is not rematerializable.
874 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
875 const MachineOperand &MO = MI->getOperand(i);
876 if (MO.isRegister()) {
877 unsigned Reg = MO.getReg();
880 if (TargetRegisterInfo::isPhysicalRegister(Reg))
883 // Only allow one def, and that in the first operand.
884 if (MO.isDef() != (i == 0))
887 // Only allow constant-valued registers.
888 bool IsLiveIn = mri_->isLiveIn(Reg);
889 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
892 // For the def, it should be the only def.
893 if (MO.isDef() && (next(I) != E || IsLiveIn))
897 // Only allow one use other register use, as that's all the
898 // remat mechanisms support currently.
902 else if (Reg != ImpUse)
905 // For uses, there should be only one associate def.
906 if (I != E && (next(I) != E || IsLiveIn))
913 unsigned ImpUse = getReMatImplicitUse(li, MI);
915 const LiveInterval &ImpLi = getInterval(ImpUse);
916 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
917 re = mri_->use_end(); ri != re; ++ri) {
918 MachineInstr *UseMI = &*ri;
919 unsigned UseIdx = getInstructionIndex(UseMI);
920 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
922 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
929 /// isReMaterializable - Returns true if every definition of MI of every
930 /// val# of the specified interval is re-materializable.
931 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
933 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
935 const VNInfo *VNI = *i;
936 unsigned DefIdx = VNI->def;
938 continue; // Dead val#.
939 // Is the def for the val# rematerializable?
942 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
943 bool DefIsLoad = false;
945 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
952 /// FilterFoldedOps - Filter out two-address use operands. Return
953 /// true if it finds any issue with the operands that ought to prevent
955 static bool FilterFoldedOps(MachineInstr *MI,
956 SmallVector<unsigned, 2> &Ops,
958 SmallVector<unsigned, 2> &FoldOps) {
959 const TargetInstrDesc &TID = MI->getDesc();
962 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
963 unsigned OpIdx = Ops[i];
964 MachineOperand &MO = MI->getOperand(OpIdx);
965 // FIXME: fold subreg use.
969 MRInfo |= (unsigned)VirtRegMap::isMod;
971 // Filter out two-address use operand(s).
972 if (!MO.isImplicit() &&
973 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
974 MRInfo = VirtRegMap::isModRef;
977 MRInfo |= (unsigned)VirtRegMap::isRef;
979 FoldOps.push_back(OpIdx);
985 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
986 /// slot / to reg or any rematerialized load into ith operand of specified
987 /// MI. If it is successul, MI is updated with the newly created MI and
989 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
990 VirtRegMap &vrm, MachineInstr *DefMI,
992 SmallVector<unsigned, 2> &Ops,
993 bool isSS, int Slot, unsigned Reg) {
994 // If it is an implicit def instruction, just delete it.
995 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
996 RemoveMachineInstrFromMaps(MI);
997 vrm.RemoveMachineInstrFromMaps(MI);
998 MI->eraseFromParent();
1003 // Filter the list of operand indexes that are to be folded. Abort if
1004 // any operand will prevent folding.
1005 unsigned MRInfo = 0;
1006 SmallVector<unsigned, 2> FoldOps;
1007 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1010 // The only time it's safe to fold into a two address instruction is when
1011 // it's folding reload and spill from / into a spill stack slot.
1012 if (DefMI && (MRInfo & VirtRegMap::isMod))
1015 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1016 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1018 // Remember this instruction uses the spill slot.
1019 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1021 // Attempt to fold the memory reference into the instruction. If
1022 // we can do this, we don't need to insert spill code.
1023 MachineBasicBlock &MBB = *MI->getParent();
1024 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1025 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1026 vrm.transferSpillPts(MI, fmi);
1027 vrm.transferRestorePts(MI, fmi);
1028 vrm.transferEmergencySpills(MI, fmi);
1030 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1031 mi2iMap_[fmi] = InstrIdx;
1032 MI = MBB.insert(MBB.erase(MI), fmi);
1039 /// canFoldMemoryOperand - Returns true if the specified load / store
1040 /// folding is possible.
1041 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1042 SmallVector<unsigned, 2> &Ops,
1044 // Filter the list of operand indexes that are to be folded. Abort if
1045 // any operand will prevent folding.
1046 unsigned MRInfo = 0;
1047 SmallVector<unsigned, 2> FoldOps;
1048 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1051 // It's only legal to remat for a use, not a def.
1052 if (ReMat && (MRInfo & VirtRegMap::isMod))
1055 return tii_->canFoldMemoryOperand(MI, FoldOps);
1058 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1059 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1060 for (LiveInterval::Ranges::const_iterator
1061 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1062 std::vector<IdxMBBPair>::const_iterator II =
1063 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1064 if (II == Idx2MBBMap.end())
1066 if (I->end > II->first) // crossing a MBB.
1068 MBBs.insert(II->second);
1069 if (MBBs.size() > 1)
1075 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1076 /// interval on to-be re-materialized operands of MI) with new register.
1077 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1078 MachineInstr *MI, unsigned NewVReg,
1080 // There is an implicit use. That means one of the other operand is
1081 // being remat'ed and the remat'ed instruction has li.reg as an
1082 // use operand. Make sure we rewrite that as well.
1083 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1084 MachineOperand &MO = MI->getOperand(i);
1085 if (!MO.isRegister())
1087 unsigned Reg = MO.getReg();
1088 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1090 if (!vrm.isReMaterialized(Reg))
1092 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1093 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1095 UseMO->setReg(NewVReg);
1099 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1100 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1101 bool LiveIntervals::
1102 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1103 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1104 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1105 unsigned Slot, int LdSlot,
1106 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1108 const TargetRegisterClass* rc,
1109 SmallVector<int, 4> &ReMatIds,
1110 const MachineLoopInfo *loopInfo,
1111 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1112 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1113 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1114 MachineBasicBlock *MBB = MI->getParent();
1115 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1116 bool CanFold = false;
1118 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1119 MachineOperand& mop = MI->getOperand(i);
1120 if (!mop.isRegister())
1122 unsigned Reg = mop.getReg();
1123 unsigned RegI = Reg;
1124 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1129 bool TryFold = !DefIsReMat;
1130 bool FoldSS = true; // Default behavior unless it's a remat.
1131 int FoldSlot = Slot;
1133 // If this is the rematerializable definition MI itself and
1134 // all of its uses are rematerialized, simply delete it.
1135 if (MI == ReMatOrigDefMI && CanDelete) {
1136 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1138 RemoveMachineInstrFromMaps(MI);
1139 vrm.RemoveMachineInstrFromMaps(MI);
1140 MI->eraseFromParent();
1144 // If def for this use can't be rematerialized, then try folding.
1145 // If def is rematerializable and it's a load, also try folding.
1146 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1148 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1154 // Scan all of the operands of this instruction rewriting operands
1155 // to use NewVReg instead of li.reg as appropriate. We do this for
1158 // 1. If the instr reads the same spilled vreg multiple times, we
1159 // want to reuse the NewVReg.
1160 // 2. If the instr is a two-addr instruction, we are required to
1161 // keep the src/dst regs pinned.
1163 // Keep track of whether we replace a use and/or def so that we can
1164 // create the spill interval with the appropriate range.
1166 HasUse = mop.isUse();
1167 HasDef = mop.isDef();
1168 SmallVector<unsigned, 2> Ops;
1170 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1171 const MachineOperand &MOj = MI->getOperand(j);
1172 if (!MOj.isRegister())
1174 unsigned RegJ = MOj.getReg();
1175 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1179 HasUse |= MOj.isUse();
1180 HasDef |= MOj.isDef();
1184 if (HasUse && !li.liveAt(getUseIndex(index)))
1185 // Must be defined by an implicit def. It should not be spilled. Note,
1186 // this is for correctness reason. e.g.
1187 // 8 %reg1024<def> = IMPLICIT_DEF
1188 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1189 // The live range [12, 14) are not part of the r1024 live interval since
1190 // it's defined by an implicit def. It will not conflicts with live
1191 // interval of r1025. Now suppose both registers are spilled, you can
1192 // easily see a situation where both registers are reloaded before
1193 // the INSERT_SUBREG and both target registers that would overlap.
1196 // Update stack slot spill weight if we are splitting.
1197 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1204 // Do not fold load / store here if we are splitting. We'll find an
1205 // optimal point to insert a load / store later.
1207 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1208 Ops, FoldSS, FoldSlot, Reg)) {
1209 // Folding the load/store can completely change the instruction in
1210 // unpredictable ways, rescan it from the beginning.
1214 if (isRemoved(MI)) {
1218 goto RestartInstruction;
1221 // We'll try to fold it later if it's profitable.
1222 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1226 // Create a new virtual register for the spill interval.
1227 bool CreatedNewVReg = false;
1229 NewVReg = mri_->createVirtualRegister(rc);
1231 CreatedNewVReg = true;
1233 mop.setReg(NewVReg);
1234 if (mop.isImplicit())
1235 rewriteImplicitOps(li, MI, NewVReg, vrm);
1237 // Reuse NewVReg for other reads.
1238 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1239 MachineOperand &mopj = MI->getOperand(Ops[j]);
1240 mopj.setReg(NewVReg);
1241 if (mopj.isImplicit())
1242 rewriteImplicitOps(li, MI, NewVReg, vrm);
1245 if (CreatedNewVReg) {
1247 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1248 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1249 // Each valnum may have its own remat id.
1250 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1252 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1254 if (!CanDelete || (HasUse && HasDef)) {
1255 // If this is a two-addr instruction then its use operands are
1256 // rematerializable but its def is not. It should be assigned a
1258 vrm.assignVirt2StackSlot(NewVReg, Slot);
1261 vrm.assignVirt2StackSlot(NewVReg, Slot);
1263 } else if (HasUse && HasDef &&
1264 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1265 // If this interval hasn't been assigned a stack slot (because earlier
1266 // def is a deleted remat def), do it now.
1267 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1268 vrm.assignVirt2StackSlot(NewVReg, Slot);
1271 // Re-matting an instruction with virtual register use. Add the
1272 // register as an implicit use on the use MI.
1273 if (DefIsReMat && ImpUse)
1274 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1276 // create a new register interval for this spill / remat.
1277 LiveInterval &nI = getOrCreateInterval(NewVReg);
1278 if (CreatedNewVReg) {
1279 NewLIs.push_back(&nI);
1280 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1282 vrm.setIsSplitFromReg(NewVReg, li.reg);
1286 if (CreatedNewVReg) {
1287 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1288 nI.getNextValue(~0U, 0, VNInfoAllocator));
1292 // Extend the split live interval to this def / use.
1293 unsigned End = getUseIndex(index)+1;
1294 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1295 nI.getValNumInfo(nI.getNumValNums()-1));
1301 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1302 nI.getNextValue(~0U, 0, VNInfoAllocator));
1307 DOUT << "\t\t\t\tAdded new interval: ";
1308 nI.print(DOUT, tri_);
1313 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1315 MachineBasicBlock *MBB, unsigned Idx) const {
1316 unsigned End = getMBBEndIdx(MBB);
1317 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1318 unsigned KillIdx = VNI->kills[j];
1319 if (KillIdx > Idx && KillIdx < End)
1325 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1326 /// during spilling.
1328 struct RewriteInfo {
1333 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1334 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1337 struct RewriteInfoCompare {
1338 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1339 return LHS.Index < RHS.Index;
1344 void LiveIntervals::
1345 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1346 LiveInterval::Ranges::const_iterator &I,
1347 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1348 unsigned Slot, int LdSlot,
1349 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1351 const TargetRegisterClass* rc,
1352 SmallVector<int, 4> &ReMatIds,
1353 const MachineLoopInfo *loopInfo,
1354 BitVector &SpillMBBs,
1355 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1356 BitVector &RestoreMBBs,
1357 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1358 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1359 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1360 bool AllCanFold = true;
1361 unsigned NewVReg = 0;
1362 unsigned start = getBaseIndex(I->start);
1363 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1365 // First collect all the def / use in this live range that will be rewritten.
1366 // Make sure they are sorted according to instruction index.
1367 std::vector<RewriteInfo> RewriteMIs;
1368 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1369 re = mri_->reg_end(); ri != re; ) {
1370 MachineInstr *MI = &*ri;
1371 MachineOperand &O = ri.getOperand();
1373 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1374 unsigned index = getInstructionIndex(MI);
1375 if (index < start || index >= end)
1377 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1378 // Must be defined by an implicit def. It should not be spilled. Note,
1379 // this is for correctness reason. e.g.
1380 // 8 %reg1024<def> = IMPLICIT_DEF
1381 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1382 // The live range [12, 14) are not part of the r1024 live interval since
1383 // it's defined by an implicit def. It will not conflicts with live
1384 // interval of r1025. Now suppose both registers are spilled, you can
1385 // easily see a situation where both registers are reloaded before
1386 // the INSERT_SUBREG and both target registers that would overlap.
1388 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1390 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1392 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1393 // Now rewrite the defs and uses.
1394 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1395 RewriteInfo &rwi = RewriteMIs[i];
1397 unsigned index = rwi.Index;
1398 bool MIHasUse = rwi.HasUse;
1399 bool MIHasDef = rwi.HasDef;
1400 MachineInstr *MI = rwi.MI;
1401 // If MI def and/or use the same register multiple times, then there
1402 // are multiple entries.
1403 unsigned NumUses = MIHasUse;
1404 while (i != e && RewriteMIs[i].MI == MI) {
1405 assert(RewriteMIs[i].Index == index);
1406 bool isUse = RewriteMIs[i].HasUse;
1407 if (isUse) ++NumUses;
1409 MIHasDef |= RewriteMIs[i].HasDef;
1412 MachineBasicBlock *MBB = MI->getParent();
1414 if (ImpUse && MI != ReMatDefMI) {
1415 // Re-matting an instruction with virtual register use. Update the
1416 // register interval's spill weight to HUGE_VALF to prevent it from
1418 LiveInterval &ImpLi = getInterval(ImpUse);
1419 ImpLi.weight = HUGE_VALF;
1422 unsigned MBBId = MBB->getNumber();
1423 unsigned ThisVReg = 0;
1425 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1426 if (NVI != MBBVRegsMap.end()) {
1427 ThisVReg = NVI->second;
1434 // It's better to start a new interval to avoid artifically
1435 // extend the new interval.
1436 if (MIHasDef && !MIHasUse) {
1437 MBBVRegsMap.erase(MBB->getNumber());
1443 bool IsNew = ThisVReg == 0;
1445 // This ends the previous live interval. If all of its def / use
1446 // can be folded, give it a low spill weight.
1447 if (NewVReg && TrySplit && AllCanFold) {
1448 LiveInterval &nI = getOrCreateInterval(NewVReg);
1455 bool HasDef = false;
1456 bool HasUse = false;
1457 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1458 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1459 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1460 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1461 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1462 if (!HasDef && !HasUse)
1465 AllCanFold &= CanFold;
1467 // Update weight of spill interval.
1468 LiveInterval &nI = getOrCreateInterval(NewVReg);
1470 // The spill weight is now infinity as it cannot be spilled again.
1471 nI.weight = HUGE_VALF;
1475 // Keep track of the last def and first use in each MBB.
1477 if (MI != ReMatOrigDefMI || !CanDelete) {
1478 bool HasKill = false;
1480 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1482 // If this is a two-address code, then this index starts a new VNInfo.
1483 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1485 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1487 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1488 SpillIdxes.find(MBBId);
1490 if (SII == SpillIdxes.end()) {
1491 std::vector<SRInfo> S;
1492 S.push_back(SRInfo(index, NewVReg, true));
1493 SpillIdxes.insert(std::make_pair(MBBId, S));
1494 } else if (SII->second.back().vreg != NewVReg) {
1495 SII->second.push_back(SRInfo(index, NewVReg, true));
1496 } else if ((int)index > SII->second.back().index) {
1497 // If there is an earlier def and this is a two-address
1498 // instruction, then it's not possible to fold the store (which
1499 // would also fold the load).
1500 SRInfo &Info = SII->second.back();
1502 Info.canFold = !HasUse;
1504 SpillMBBs.set(MBBId);
1505 } else if (SII != SpillIdxes.end() &&
1506 SII->second.back().vreg == NewVReg &&
1507 (int)index > SII->second.back().index) {
1508 // There is an earlier def that's not killed (must be two-address).
1509 // The spill is no longer needed.
1510 SII->second.pop_back();
1511 if (SII->second.empty()) {
1512 SpillIdxes.erase(MBBId);
1513 SpillMBBs.reset(MBBId);
1520 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1521 SpillIdxes.find(MBBId);
1522 if (SII != SpillIdxes.end() &&
1523 SII->second.back().vreg == NewVReg &&
1524 (int)index > SII->second.back().index)
1525 // Use(s) following the last def, it's not safe to fold the spill.
1526 SII->second.back().canFold = false;
1527 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1528 RestoreIdxes.find(MBBId);
1529 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1530 // If we are splitting live intervals, only fold if it's the first
1531 // use and there isn't another use later in the MBB.
1532 RII->second.back().canFold = false;
1534 // Only need a reload if there isn't an earlier def / use.
1535 if (RII == RestoreIdxes.end()) {
1536 std::vector<SRInfo> Infos;
1537 Infos.push_back(SRInfo(index, NewVReg, true));
1538 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1540 RII->second.push_back(SRInfo(index, NewVReg, true));
1542 RestoreMBBs.set(MBBId);
1546 // Update spill weight.
1547 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1548 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1551 if (NewVReg && TrySplit && AllCanFold) {
1552 // If all of its def / use can be folded, give it a low spill weight.
1553 LiveInterval &nI = getOrCreateInterval(NewVReg);
1558 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1559 BitVector &RestoreMBBs,
1560 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1561 if (!RestoreMBBs[Id])
1563 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1564 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1565 if (Restores[i].index == index &&
1566 Restores[i].vreg == vr &&
1567 Restores[i].canFold)
1572 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1573 BitVector &RestoreMBBs,
1574 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1575 if (!RestoreMBBs[Id])
1577 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1578 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1579 if (Restores[i].index == index && Restores[i].vreg)
1580 Restores[i].index = -1;
1583 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1584 /// spilled and create empty intervals for their uses.
1586 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1587 const TargetRegisterClass* rc,
1588 std::vector<LiveInterval*> &NewLIs) {
1589 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1590 re = mri_->reg_end(); ri != re; ) {
1591 MachineOperand &O = ri.getOperand();
1592 MachineInstr *MI = &*ri;
1595 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1596 "Register def was not rewritten?");
1597 RemoveMachineInstrFromMaps(MI);
1598 vrm.RemoveMachineInstrFromMaps(MI);
1599 MI->eraseFromParent();
1601 // This must be an use of an implicit_def so it's not part of the live
1602 // interval. Create a new empty live interval for it.
1603 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1604 unsigned NewVReg = mri_->createVirtualRegister(rc);
1606 vrm.setIsImplicitlyDefined(NewVReg);
1607 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1608 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1609 MachineOperand &MO = MI->getOperand(i);
1610 if (MO.isRegister() && MO.getReg() == li.reg)
1619 bool operator()(LiveInterval* A, LiveInterval* B) {
1620 return A->beginNumber() < B->beginNumber();
1625 std::vector<LiveInterval*> LiveIntervals::
1626 addIntervalsForSpillsFast(const LiveInterval &li,
1627 const MachineLoopInfo *loopInfo,
1628 VirtRegMap &vrm, float& SSWeight) {
1629 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1631 std::vector<LiveInterval*> added;
1633 assert(li.weight != HUGE_VALF &&
1634 "attempt to spill already spilled interval!");
1636 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1640 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1644 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1645 while (RI != mri_->reg_end()) {
1646 MachineInstr* MI = &*RI;
1648 SmallVector<unsigned, 2> Indices;
1649 bool HasUse = false;
1650 bool HasDef = false;
1652 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1653 MachineOperand& mop = MI->getOperand(i);
1654 if (!mop.isRegister() || mop.getReg() != li.reg) continue;
1656 HasUse |= MI->getOperand(i).isUse();
1657 HasDef |= MI->getOperand(i).isDef();
1659 Indices.push_back(i);
1662 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1663 Indices, true, slot, li.reg)) {
1664 unsigned NewVReg = mri_->createVirtualRegister(rc);
1666 vrm.assignVirt2StackSlot(NewVReg, slot);
1668 // create a new register for this spill
1669 LiveInterval &nI = getOrCreateInterval(NewVReg);
1671 // the spill weight is now infinity as it
1672 // cannot be spilled again
1673 nI.weight = HUGE_VALF;
1675 // Rewrite register operands to use the new vreg.
1676 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1677 E = Indices.end(); I != E; ++I) {
1678 MI->getOperand(*I).setReg(NewVReg);
1680 if (MI->getOperand(*I).isUse())
1681 MI->getOperand(*I).setIsKill(true);
1684 // Fill in the new live interval.
1685 unsigned index = getInstructionIndex(MI);
1687 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1688 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1691 vrm.addRestorePoint(NewVReg, MI);
1694 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1695 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1698 vrm.addSpillPoint(NewVReg, true, MI);
1701 added.push_back(&nI);
1703 DOUT << "\t\t\t\tadded new interval: ";
1707 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1710 SSWeight += getSpillWeight(true, true, loopDepth);
1712 SSWeight += getSpillWeight(false, true, loopDepth);
1714 SSWeight += getSpillWeight(true, false, loopDepth);
1718 RI = mri_->reg_begin(li.reg);
1721 // Clients expect the new intervals to be returned in sorted order.
1722 std::sort(added.begin(), added.end(), LISorter());
1727 std::vector<LiveInterval*> LiveIntervals::
1728 addIntervalsForSpills(const LiveInterval &li,
1729 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1732 if (EnableFastSpilling)
1733 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1735 assert(li.weight != HUGE_VALF &&
1736 "attempt to spill already spilled interval!");
1738 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1739 li.print(DOUT, tri_);
1742 // Spill slot weight.
1745 // Each bit specify whether it a spill is required in the MBB.
1746 BitVector SpillMBBs(mf_->getNumBlockIDs());
1747 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1748 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1749 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1750 DenseMap<unsigned,unsigned> MBBVRegsMap;
1751 std::vector<LiveInterval*> NewLIs;
1752 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1754 unsigned NumValNums = li.getNumValNums();
1755 SmallVector<MachineInstr*, 4> ReMatDefs;
1756 ReMatDefs.resize(NumValNums, NULL);
1757 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1758 ReMatOrigDefs.resize(NumValNums, NULL);
1759 SmallVector<int, 4> ReMatIds;
1760 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1761 BitVector ReMatDelete(NumValNums);
1762 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1764 // Spilling a split live interval. It cannot be split any further. Also,
1765 // it's also guaranteed to be a single val# / range interval.
1766 if (vrm.getPreSplitReg(li.reg)) {
1767 vrm.setIsSplitFromReg(li.reg, 0);
1768 // Unset the split kill marker on the last use.
1769 unsigned KillIdx = vrm.getKillPoint(li.reg);
1771 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1772 assert(KillMI && "Last use disappeared?");
1773 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1774 assert(KillOp != -1 && "Last use disappeared?");
1775 KillMI->getOperand(KillOp).setIsKill(false);
1777 vrm.removeKillPoint(li.reg);
1778 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1779 Slot = vrm.getStackSlot(li.reg);
1780 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1781 MachineInstr *ReMatDefMI = DefIsReMat ?
1782 vrm.getReMaterializedMI(li.reg) : NULL;
1784 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1785 bool isLoad = isLoadSS ||
1786 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1787 bool IsFirstRange = true;
1788 for (LiveInterval::Ranges::const_iterator
1789 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1790 // If this is a split live interval with multiple ranges, it means there
1791 // are two-address instructions that re-defined the value. Only the
1792 // first def can be rematerialized!
1794 // Note ReMatOrigDefMI has already been deleted.
1795 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1796 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1797 false, vrm, rc, ReMatIds, loopInfo,
1798 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1799 MBBVRegsMap, NewLIs, SSWeight);
1801 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1802 Slot, 0, false, false, false,
1803 false, vrm, rc, ReMatIds, loopInfo,
1804 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1805 MBBVRegsMap, NewLIs, SSWeight);
1807 IsFirstRange = false;
1810 SSWeight = 0.0f; // Already accounted for when split.
1811 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1815 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1816 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1820 bool NeedStackSlot = false;
1821 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1823 const VNInfo *VNI = *i;
1824 unsigned VN = VNI->id;
1825 unsigned DefIdx = VNI->def;
1827 continue; // Dead val#.
1828 // Is the def for the val# rematerializable?
1829 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1830 ? 0 : getInstructionFromIndex(DefIdx);
1832 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1833 // Remember how to remat the def of this val#.
1834 ReMatOrigDefs[VN] = ReMatDefMI;
1835 // Original def may be modified so we have to make a copy here.
1836 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1837 ClonedMIs.push_back(Clone);
1838 ReMatDefs[VN] = Clone;
1840 bool CanDelete = true;
1841 if (VNI->hasPHIKill) {
1842 // A kill is a phi node, not all of its uses can be rematerialized.
1843 // It must not be deleted.
1845 // Need a stack slot if there is any live range where uses cannot be
1847 NeedStackSlot = true;
1850 ReMatDelete.set(VN);
1852 // Need a stack slot if there is any live range where uses cannot be
1854 NeedStackSlot = true;
1858 // One stack slot per live interval.
1859 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1860 Slot = vrm.assignVirt2StackSlot(li.reg);
1862 // Create new intervals and rewrite defs and uses.
1863 for (LiveInterval::Ranges::const_iterator
1864 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1865 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1866 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1867 bool DefIsReMat = ReMatDefMI != NULL;
1868 bool CanDelete = ReMatDelete[I->valno->id];
1870 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1871 bool isLoad = isLoadSS ||
1872 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1873 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1874 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1875 CanDelete, vrm, rc, ReMatIds, loopInfo,
1876 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1877 MBBVRegsMap, NewLIs, SSWeight);
1880 // Insert spills / restores if we are splitting.
1882 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1886 SmallPtrSet<LiveInterval*, 4> AddedKill;
1887 SmallVector<unsigned, 2> Ops;
1888 if (NeedStackSlot) {
1889 int Id = SpillMBBs.find_first();
1891 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1892 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1893 std::vector<SRInfo> &spills = SpillIdxes[Id];
1894 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1895 int index = spills[i].index;
1896 unsigned VReg = spills[i].vreg;
1897 LiveInterval &nI = getOrCreateInterval(VReg);
1898 bool isReMat = vrm.isReMaterialized(VReg);
1899 MachineInstr *MI = getInstructionFromIndex(index);
1900 bool CanFold = false;
1901 bool FoundUse = false;
1903 if (spills[i].canFold) {
1905 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1906 MachineOperand &MO = MI->getOperand(j);
1907 if (!MO.isRegister() || MO.getReg() != VReg)
1914 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1915 RestoreMBBs, RestoreIdxes))) {
1916 // MI has two-address uses of the same register. If the use
1917 // isn't the first and only use in the BB, then we can't fold
1918 // it. FIXME: Move this to rewriteInstructionsForSpills.
1925 // Fold the store into the def if possible.
1926 bool Folded = false;
1927 if (CanFold && !Ops.empty()) {
1928 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1931 // Also folded uses, do not issue a load.
1932 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1933 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1935 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1939 // Otherwise tell the spiller to issue a spill.
1941 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1942 bool isKill = LR->end == getStoreIndex(index);
1943 if (!MI->registerDefIsDead(nI.reg))
1944 // No need to spill a dead def.
1945 vrm.addSpillPoint(VReg, isKill, MI);
1947 AddedKill.insert(&nI);
1950 // Update spill slot weight.
1952 SSWeight += getSpillWeight(true, false, loopDepth);
1954 Id = SpillMBBs.find_next(Id);
1958 int Id = RestoreMBBs.find_first();
1960 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1961 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1963 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1964 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1965 int index = restores[i].index;
1968 unsigned VReg = restores[i].vreg;
1969 LiveInterval &nI = getOrCreateInterval(VReg);
1970 bool isReMat = vrm.isReMaterialized(VReg);
1971 MachineInstr *MI = getInstructionFromIndex(index);
1972 bool CanFold = false;
1974 if (restores[i].canFold) {
1976 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1977 MachineOperand &MO = MI->getOperand(j);
1978 if (!MO.isRegister() || MO.getReg() != VReg)
1982 // If this restore were to be folded, it would have been folded
1991 // Fold the load into the use if possible.
1992 bool Folded = false;
1993 if (CanFold && !Ops.empty()) {
1995 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1997 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1999 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2000 // If the rematerializable def is a load, also try to fold it.
2001 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2002 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2003 Ops, isLoadSS, LdSlot, VReg);
2004 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2006 // Re-matting an instruction with virtual register use. Add the
2007 // register as an implicit use on the use MI and update the register
2008 // interval's spill weight to HUGE_VALF to prevent it from being
2010 LiveInterval &ImpLi = getInterval(ImpUse);
2011 ImpLi.weight = HUGE_VALF;
2012 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2016 // If folding is not possible / failed, then tell the spiller to issue a
2017 // load / rematerialization for us.
2019 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2021 vrm.addRestorePoint(VReg, MI);
2023 // Update spill slot weight.
2025 SSWeight += getSpillWeight(false, true, loopDepth);
2027 Id = RestoreMBBs.find_next(Id);
2030 // Finalize intervals: add kills, finalize spill weights, and filter out
2032 std::vector<LiveInterval*> RetNewLIs;
2033 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2034 LiveInterval *LI = NewLIs[i];
2036 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2037 if (!AddedKill.count(LI)) {
2038 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2039 unsigned LastUseIdx = getBaseIndex(LR->end);
2040 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2041 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2042 assert(UseIdx != -1);
2043 if (LastUse->getOperand(UseIdx).isImplicit() ||
2044 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2045 LastUse->getOperand(UseIdx).setIsKill();
2046 vrm.addKillPoint(LI->reg, LastUseIdx);
2049 RetNewLIs.push_back(LI);
2053 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2057 /// hasAllocatableSuperReg - Return true if the specified physical register has
2058 /// any super register that's allocatable.
2059 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2060 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2061 if (allocatableRegs_[*AS] && hasInterval(*AS))
2066 /// getRepresentativeReg - Find the largest super register of the specified
2067 /// physical register.
2068 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2069 // Find the largest super-register that is allocatable.
2070 unsigned BestReg = Reg;
2071 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2072 unsigned SuperReg = *AS;
2073 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2081 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2082 /// specified interval that conflicts with the specified physical register.
2083 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2084 unsigned PhysReg) const {
2085 unsigned NumConflicts = 0;
2086 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2087 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2088 E = mri_->reg_end(); I != E; ++I) {
2089 MachineOperand &O = I.getOperand();
2090 MachineInstr *MI = O.getParent();
2091 unsigned Index = getInstructionIndex(MI);
2092 if (pli.liveAt(Index))
2095 return NumConflicts;
2098 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2099 /// around all defs and uses of the specified interval.
2100 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2101 unsigned PhysReg, VirtRegMap &vrm) {
2102 unsigned SpillReg = getRepresentativeReg(PhysReg);
2104 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2105 // If there are registers which alias PhysReg, but which are not a
2106 // sub-register of the chosen representative super register. Assert
2107 // since we can't handle it yet.
2108 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2109 tri_->isSuperRegister(*AS, SpillReg));
2111 LiveInterval &pli = getInterval(SpillReg);
2112 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2113 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2114 E = mri_->reg_end(); I != E; ++I) {
2115 MachineOperand &O = I.getOperand();
2116 MachineInstr *MI = O.getParent();
2117 if (SeenMIs.count(MI))
2120 unsigned Index = getInstructionIndex(MI);
2121 if (pli.liveAt(Index)) {
2122 vrm.addEmergencySpill(SpillReg, MI);
2123 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2124 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2125 if (!hasInterval(*AS))
2127 LiveInterval &spli = getInterval(*AS);
2128 if (spli.liveAt(Index))
2129 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2135 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2136 MachineInstr* startInst) {
2137 LiveInterval& Interval = getOrCreateInterval(reg);
2138 VNInfo* VN = Interval.getNextValue(
2139 getInstructionIndex(startInst) + InstrSlots::DEF,
2140 startInst, getVNInfoAllocator());
2141 VN->hasPHIKill = true;
2142 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2143 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2144 getMBBEndIdx(startInst->getParent()) + 1, VN);
2145 Interval.addRange(LR);