1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 static cl::opt<bool> EnableFastSpilling("fast-spill",
53 cl::init(false), cl::Hidden);
55 STATISTIC(numIntervals, "Number of original intervals");
56 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
72 AU.addRequiredID(TwoAddressInstructionPassID);
73 MachineFunctionPass::getAnalysisUsage(AU);
76 void LiveIntervals::releaseMemory() {
77 // Free the live intervals themselves.
78 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79 E = r2iMap_.end(); I != E; ++I)
87 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88 VNInfoAllocator.Reset();
89 while (!ClonedMIs.empty()) {
90 MachineInstr *MI = ClonedMIs.back();
92 mf_->DeleteMachineInstr(MI);
96 void LiveIntervals::computeNumbering() {
97 Index2MiMap OldI2MI = i2miMap_;
98 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
107 // Number MachineInstrs and MachineBasicBlocks.
108 // Initialize MBB indexes to a sentinal.
109 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111 unsigned MIIndex = 0;
112 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114 unsigned StartIdx = MIIndex;
116 // Insert an empty slot at the beginning of each block.
117 MIIndex += InstrSlots::NUM;
118 i2miMap_.push_back(0);
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123 assert(inserted && "multiple MachineInstr -> index mappings");
124 i2miMap_.push_back(I);
125 MIIndex += InstrSlots::NUM;
128 // Insert an empty slot after every instruction.
129 MIIndex += InstrSlots::NUM;
130 i2miMap_.push_back(0);
133 // Set the MBB2IdxMap entry for this MBB.
134 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
137 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
139 if (!OldI2MI.empty())
140 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141 for (LiveInterval::iterator LI = OI->second->begin(),
142 LE = OI->second->end(); LI != LE; ++LI) {
144 // Remap the start index of the live range to the corresponding new
145 // number, or our best guess at what it _should_ correspond to if the
146 // original instruction has been erased. This is either the following
147 // instruction or its predecessor.
148 unsigned index = LI->start / InstrSlots::NUM;
149 unsigned offset = LI->start % InstrSlots::NUM;
150 if (offset == InstrSlots::LOAD) {
151 std::vector<IdxMBBPair>::const_iterator I =
152 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153 // Take the pair containing the index
154 std::vector<IdxMBBPair>::const_iterator J =
155 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
157 LI->start = getMBBStartIdx(J->second);
159 LI->start = mi2iMap_[OldI2MI[index]] + offset;
162 // Remap the ending index in the same way that we remapped the start,
163 // except for the final step where we always map to the immediately
164 // following instruction.
165 index = (LI->end - 1) / InstrSlots::NUM;
166 offset = LI->end % InstrSlots::NUM;
167 if (offset == InstrSlots::LOAD) {
168 // VReg dies at end of block.
169 std::vector<IdxMBBPair>::const_iterator I =
170 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
173 LI->end = getMBBEndIdx(I->second) + 1;
175 unsigned idx = index;
176 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
178 if (index != OldI2MI.size())
179 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
181 LI->end = InstrSlots::NUM * i2miMap_.size();
185 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
189 // Remap the VNInfo def index, which works the same as the
190 // start indices above. VN's with special sentinel defs
191 // don't need to be remapped.
192 if (vni->def != ~0U && vni->def != ~1U) {
193 unsigned index = vni->def / InstrSlots::NUM;
194 unsigned offset = vni->def % InstrSlots::NUM;
195 if (offset == InstrSlots::LOAD) {
196 std::vector<IdxMBBPair>::const_iterator I =
197 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198 // Take the pair containing the index
199 std::vector<IdxMBBPair>::const_iterator J =
200 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
202 vni->def = getMBBStartIdx(J->second);
204 vni->def = mi2iMap_[OldI2MI[index]] + offset;
208 // Remap the VNInfo kill indices, which works the same as
209 // the end indices above.
210 for (size_t i = 0; i < vni->kills.size(); ++i) {
211 // PHI kills don't need to be remapped.
212 if (!vni->kills[i]) continue;
214 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215 unsigned offset = vni->kills[i] % InstrSlots::NUM;
216 if (offset == InstrSlots::STORE) {
217 std::vector<IdxMBBPair>::const_iterator I =
218 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
221 vni->kills[i] = getMBBEndIdx(I->second);
223 unsigned idx = index;
224 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
226 if (index != OldI2MI.size())
227 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228 (idx == index ? offset : 0);
230 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
237 /// runOnMachineFunction - Register allocate the whole function
239 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
241 mri_ = &mf_->getRegInfo();
242 tm_ = &fn.getTarget();
243 tri_ = tm_->getRegisterInfo();
244 tii_ = tm_->getInstrInfo();
245 aa_ = &getAnalysis<AliasAnalysis>();
246 lv_ = &getAnalysis<LiveVariables>();
247 allocatableRegs_ = tri_->getAllocatableSet(fn);
252 numIntervals += getNumIntervals();
254 DOUT << "********** INTERVALS **********\n";
255 for (iterator I = begin(), E = end(); I != E; ++I) {
256 I->second->print(DOUT, tri_);
260 numIntervalsAfter += getNumIntervals();
265 /// print - Implement the dump method.
266 void LiveIntervals::print(std::ostream &O, const Module* ) const {
267 O << "********** INTERVALS **********\n";
268 for (const_iterator I = begin(), E = end(); I != E; ++I) {
269 I->second->print(O, tri_);
273 O << "********** MACHINEINSTRS **********\n";
274 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275 mbbi != mbbe; ++mbbi) {
276 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277 for (MachineBasicBlock::iterator mii = mbbi->begin(),
278 mie = mbbi->end(); mii != mie; ++mii) {
279 O << getInstructionIndex(mii) << '\t' << *mii;
284 /// conflictsWithPhysRegDef - Returns true if the specified register
285 /// is defined during the duration of the specified interval.
286 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287 VirtRegMap &vrm, unsigned reg) {
288 for (LiveInterval::Ranges::const_iterator
289 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290 for (unsigned index = getBaseIndex(I->start),
291 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292 index += InstrSlots::NUM) {
293 // skip deleted instructions
294 while (index != end && !getInstructionFromIndex(index))
295 index += InstrSlots::NUM;
296 if (index == end) break;
298 MachineInstr *MI = getInstructionFromIndex(index);
299 unsigned SrcReg, DstReg;
300 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301 if (SrcReg == li.reg || DstReg == li.reg)
303 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304 MachineOperand& mop = MI->getOperand(i);
305 if (!mop.isRegister())
307 unsigned PhysReg = mop.getReg();
308 if (PhysReg == 0 || PhysReg == li.reg)
310 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311 if (!vrm.hasPhys(PhysReg))
313 PhysReg = vrm.getPhys(PhysReg);
315 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
324 void LiveIntervals::printRegName(unsigned reg) const {
325 if (TargetRegisterInfo::isPhysicalRegister(reg))
326 cerr << tri_->getName(reg);
328 cerr << "%reg" << reg;
331 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332 MachineBasicBlock::iterator mi,
333 unsigned MIIdx, MachineOperand& MO,
335 LiveInterval &interval) {
336 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
339 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340 DOUT << "is a implicit_def\n";
344 // Virtual registers may be defined multiple times (due to phi
345 // elimination and 2-addr elimination). Much of what we do only has to be
346 // done once for the vreg. We use an empty interval to detect the first
347 // time we see a vreg.
348 if (interval.empty()) {
349 // Get the Idx of the defining instructions.
350 unsigned defIndex = getDefIndex(MIIdx);
352 MachineInstr *CopyMI = NULL;
353 unsigned SrcReg, DstReg;
354 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
355 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
356 tii_->isMoveInstr(*mi, SrcReg, DstReg))
358 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
360 assert(ValNo->id == 0 && "First value in interval is not 0?");
362 // Loop over all of the blocks that the vreg is defined in. There are
363 // two cases we have to handle here. The most common case is a vreg
364 // whose lifetime is contained within a basic block. In this case there
365 // will be a single kill, in MBB, which comes after the definition.
366 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
367 // FIXME: what about dead vars?
369 if (vi.Kills[0] != mi)
370 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
372 killIdx = defIndex+1;
374 // If the kill happens after the definition, we have an intra-block
376 if (killIdx > defIndex) {
377 assert(vi.AliveBlocks.none() &&
378 "Shouldn't be alive across any blocks!");
379 LiveRange LR(defIndex, killIdx, ValNo);
380 interval.addRange(LR);
381 DOUT << " +" << LR << "\n";
382 interval.addKill(ValNo, killIdx);
387 // The other case we handle is when a virtual register lives to the end
388 // of the defining block, potentially live across some blocks, then is
389 // live into some number of blocks, but gets killed. Start by adding a
390 // range that goes from this definition to the end of the defining block.
391 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
392 DOUT << " +" << NewLR;
393 interval.addRange(NewLR);
395 // Iterate over all of the blocks that the variable is completely
396 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
398 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
399 if (vi.AliveBlocks[i]) {
400 LiveRange LR(getMBBStartIdx(i),
401 getMBBEndIdx(i)+1, // MBB ends at -1.
403 interval.addRange(LR);
408 // Finally, this virtual register is live from the start of any killing
409 // block to the 'use' slot of the killing instruction.
410 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
411 MachineInstr *Kill = vi.Kills[i];
412 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
413 LiveRange LR(getMBBStartIdx(Kill->getParent()),
415 interval.addRange(LR);
416 interval.addKill(ValNo, killIdx);
421 // If this is the second time we see a virtual register definition, it
422 // must be due to phi elimination or two addr elimination. If this is
423 // the result of two address elimination, then the vreg is one of the
424 // def-and-use register operand.
425 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
426 // If this is a two-address definition, then we have already processed
427 // the live range. The only problem is that we didn't realize there
428 // are actually two values in the live interval. Because of this we
429 // need to take the LiveRegion that defines this register and split it
431 assert(interval.containsOneValue());
432 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
433 unsigned RedefIndex = getDefIndex(MIIdx);
435 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
436 VNInfo *OldValNo = OldLR->valno;
438 // Delete the initial value, which should be short and continuous,
439 // because the 2-addr copy must be in the same MBB as the redef.
440 interval.removeRange(DefIndex, RedefIndex);
442 // Two-address vregs should always only be redefined once. This means
443 // that at this point, there should be exactly one value number in it.
444 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
446 // The new value number (#1) is defined by the instruction we claimed
448 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
451 // Value#0 is now defined by the 2-addr instruction.
452 OldValNo->def = RedefIndex;
455 // Add the new live interval which replaces the range for the input copy.
456 LiveRange LR(DefIndex, RedefIndex, ValNo);
457 DOUT << " replace range with " << LR;
458 interval.addRange(LR);
459 interval.addKill(ValNo, RedefIndex);
461 // If this redefinition is dead, we need to add a dummy unit live
462 // range covering the def slot.
464 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
467 interval.print(DOUT, tri_);
470 // Otherwise, this must be because of phi elimination. If this is the
471 // first redefinition of the vreg that we have seen, go back and change
472 // the live range in the PHI block to be a different value number.
473 if (interval.containsOneValue()) {
474 assert(vi.Kills.size() == 1 &&
475 "PHI elimination vreg should have one kill, the PHI itself!");
477 // Remove the old range that we now know has an incorrect number.
478 VNInfo *VNI = interval.getValNumInfo(0);
479 MachineInstr *Killer = vi.Kills[0];
480 unsigned Start = getMBBStartIdx(Killer->getParent());
481 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
482 DOUT << " Removing [" << Start << "," << End << "] from: ";
483 interval.print(DOUT, tri_); DOUT << "\n";
484 interval.removeRange(Start, End);
485 VNI->hasPHIKill = true;
486 DOUT << " RESULT: "; interval.print(DOUT, tri_);
488 // Replace the interval with one of a NEW value number. Note that this
489 // value number isn't actually defined by an instruction, weird huh? :)
490 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
491 DOUT << " replace range with " << LR;
492 interval.addRange(LR);
493 interval.addKill(LR.valno, End);
494 DOUT << " RESULT: "; interval.print(DOUT, tri_);
497 // In the case of PHI elimination, each variable definition is only
498 // live until the end of the block. We've already taken care of the
499 // rest of the live range.
500 unsigned defIndex = getDefIndex(MIIdx);
503 MachineInstr *CopyMI = NULL;
504 unsigned SrcReg, DstReg;
505 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
506 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
507 tii_->isMoveInstr(*mi, SrcReg, DstReg))
509 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
511 unsigned killIndex = getMBBEndIdx(mbb) + 1;
512 LiveRange LR(defIndex, killIndex, ValNo);
513 interval.addRange(LR);
514 interval.addKill(ValNo, killIndex);
515 ValNo->hasPHIKill = true;
523 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
524 MachineBasicBlock::iterator mi,
527 LiveInterval &interval,
528 MachineInstr *CopyMI) {
529 // A physical register cannot be live across basic block, so its
530 // lifetime must end somewhere in its defining basic block.
531 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
533 unsigned baseIndex = MIIdx;
534 unsigned start = getDefIndex(baseIndex);
535 unsigned end = start;
537 // If it is not used after definition, it is considered dead at
538 // the instruction defining it. Hence its interval is:
539 // [defSlot(def), defSlot(def)+1)
542 end = getDefIndex(start) + 1;
546 // If it is not dead on definition, it must be killed by a
547 // subsequent instruction. Hence its interval is:
548 // [defSlot(def), useSlot(kill)+1)
549 baseIndex += InstrSlots::NUM;
550 while (++mi != MBB->end()) {
551 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
552 getInstructionFromIndex(baseIndex) == 0)
553 baseIndex += InstrSlots::NUM;
554 if (mi->killsRegister(interval.reg, tri_)) {
556 end = getUseIndex(baseIndex) + 1;
558 } else if (mi->modifiesRegister(interval.reg, tri_)) {
559 // Another instruction redefines the register before it is ever read.
560 // Then the register is essentially dead at the instruction that defines
561 // it. Hence its interval is:
562 // [defSlot(def), defSlot(def)+1)
564 end = getDefIndex(start) + 1;
568 baseIndex += InstrSlots::NUM;
571 // The only case we should have a dead physreg here without a killing or
572 // instruction where we know it's dead is if it is live-in to the function
574 assert(!CopyMI && "physreg was not killed in defining block!");
575 end = getDefIndex(start) + 1; // It's dead.
578 assert(start < end && "did not find end of interval?");
580 // Already exists? Extend old live interval.
581 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
582 VNInfo *ValNo = (OldLR != interval.end())
583 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
584 LiveRange LR(start, end, ValNo);
585 interval.addRange(LR);
586 interval.addKill(LR.valno, end);
587 DOUT << " +" << LR << '\n';
590 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
591 MachineBasicBlock::iterator MI,
595 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
596 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
597 getOrCreateInterval(MO.getReg()));
598 else if (allocatableRegs_[MO.getReg()]) {
599 MachineInstr *CopyMI = NULL;
600 unsigned SrcReg, DstReg;
601 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
602 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
603 tii_->isMoveInstr(*MI, SrcReg, DstReg))
605 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606 getOrCreateInterval(MO.getReg()), CopyMI);
607 // Def of a register also defines its sub-registers.
608 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609 // If MI also modifies the sub-register explicitly, avoid processing it
610 // more than once. Do not pass in TRI here so it checks for exact match.
611 if (!MI->modifiesRegister(*AS))
612 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613 getOrCreateInterval(*AS), 0);
617 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
619 LiveInterval &interval, bool isAlias) {
620 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
622 // Look for kills, if it reaches a def before it's killed, then it shouldn't
623 // be considered a livein.
624 MachineBasicBlock::iterator mi = MBB->begin();
625 unsigned baseIndex = MIIdx;
626 unsigned start = baseIndex;
627 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
628 getInstructionFromIndex(baseIndex) == 0)
629 baseIndex += InstrSlots::NUM;
630 unsigned end = baseIndex;
632 while (mi != MBB->end()) {
633 if (mi->killsRegister(interval.reg, tri_)) {
635 end = getUseIndex(baseIndex) + 1;
637 } else if (mi->modifiesRegister(interval.reg, tri_)) {
638 // Another instruction redefines the register before it is ever read.
639 // Then the register is essentially dead at the instruction that defines
640 // it. Hence its interval is:
641 // [defSlot(def), defSlot(def)+1)
643 end = getDefIndex(start) + 1;
647 baseIndex += InstrSlots::NUM;
648 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
649 getInstructionFromIndex(baseIndex) == 0)
650 baseIndex += InstrSlots::NUM;
655 // Live-in register might not be used at all.
659 end = getDefIndex(MIIdx) + 1;
661 DOUT << " live through";
666 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
667 interval.addRange(LR);
668 interval.addKill(LR.valno, end);
669 DOUT << " +" << LR << '\n';
672 /// computeIntervals - computes the live intervals for virtual
673 /// registers. for some ordering of the machine instructions [1,N] a
674 /// live interval is an interval [i, j) where 1 <= i <= j < N for
675 /// which a variable is live
676 void LiveIntervals::computeIntervals() {
677 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
678 << "********** Function: "
679 << ((Value*)mf_->getFunction())->getName() << '\n';
680 // Track the index of the current machine instr.
681 unsigned MIIndex = 0;
683 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
685 MachineBasicBlock *MBB = MBBI;
686 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
688 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
690 // Create intervals for live-ins to this BB first.
691 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
692 LE = MBB->livein_end(); LI != LE; ++LI) {
693 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
694 // Multiple live-ins can alias the same register.
695 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
696 if (!hasInterval(*AS))
697 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
701 // Skip over empty initial indices.
702 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
703 getInstructionFromIndex(MIIndex) == 0)
704 MIIndex += InstrSlots::NUM;
706 for (; MI != miEnd; ++MI) {
707 DOUT << MIIndex << "\t" << *MI;
710 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
711 MachineOperand &MO = MI->getOperand(i);
712 // handle register defs - build intervals
713 if (MO.isRegister() && MO.getReg() && MO.isDef())
714 handleRegisterDef(MBB, MI, MIIndex, MO, i);
717 MIIndex += InstrSlots::NUM;
719 // Skip over empty indices.
720 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
721 getInstructionFromIndex(MIIndex) == 0)
722 MIIndex += InstrSlots::NUM;
727 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
728 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
729 std::vector<IdxMBBPair>::const_iterator I =
730 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
733 while (I != Idx2MBBMap.end()) {
734 if (LR.end <= I->first)
736 MBBs.push_back(I->second);
744 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
745 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
747 return new LiveInterval(reg, Weight);
750 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
751 /// copy field and returns the source register that defines it.
752 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
756 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
757 return VNI->copy->getOperand(1).getReg();
758 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
759 return VNI->copy->getOperand(2).getReg();
760 unsigned SrcReg, DstReg;
761 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
763 assert(0 && "Unrecognized copy instruction!");
767 //===----------------------------------------------------------------------===//
768 // Register allocator hooks.
771 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
772 /// allow one) virtual register operand, then its uses are implicitly using
773 /// the register. Returns the virtual register.
774 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
775 MachineInstr *MI) const {
777 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
778 MachineOperand &MO = MI->getOperand(i);
779 if (!MO.isRegister() || !MO.isUse())
781 unsigned Reg = MO.getReg();
782 if (Reg == 0 || Reg == li.reg)
784 // FIXME: For now, only remat MI with at most one register operand.
786 "Can't rematerialize instruction with multiple register operand!");
795 /// isValNoAvailableAt - Return true if the val# of the specified interval
796 /// which reaches the given instruction also reaches the specified use index.
797 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
798 unsigned UseIdx) const {
799 unsigned Index = getInstructionIndex(MI);
800 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
801 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
802 return UI != li.end() && UI->valno == ValNo;
805 /// isReMaterializable - Returns true if the definition MI of the specified
806 /// val# of the specified interval is re-materializable.
807 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
808 const VNInfo *ValNo, MachineInstr *MI,
813 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
817 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
818 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
819 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
820 // this but remember this is not safe to fold into a two-address
822 // This is a load from fixed stack slot. It can be rematerialized.
825 // If the target-specific rules don't identify an instruction as
826 // being trivially rematerializable, use some target-independent
828 if (!MI->getDesc().isRematerializable() ||
829 !tii_->isTriviallyReMaterializable(MI)) {
830 if (!EnableAggressiveRemat)
833 // If the instruction accesses memory but the memoperands have been lost,
834 // we can't analyze it.
835 const TargetInstrDesc &TID = MI->getDesc();
836 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
839 // Avoid instructions obviously unsafe for remat.
840 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
843 // If the instruction accesses memory and the memory could be non-constant,
844 // assume the instruction is not rematerializable.
845 for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
846 E = MI->memoperands_end(); I != E; ++I) {
847 const MachineMemOperand &MMO = *I;
848 if (MMO.isVolatile() || MMO.isStore())
850 const Value *V = MMO.getValue();
853 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
854 if (!PSV->isConstant(mf_->getFrameInfo()))
856 } else if (!aa_->pointsToConstantMemory(V))
860 // If any of the registers accessed are non-constant, conservatively assume
861 // the instruction is not rematerializable.
863 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
864 const MachineOperand &MO = MI->getOperand(i);
865 if (MO.isRegister()) {
866 unsigned Reg = MO.getReg();
869 if (TargetRegisterInfo::isPhysicalRegister(Reg))
872 // Only allow one def, and that in the first operand.
873 if (MO.isDef() != (i == 0))
876 // Only allow constant-valued registers.
877 bool IsLiveIn = mri_->isLiveIn(Reg);
878 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
881 // For the def, it should be the only def.
882 if (MO.isDef() && (next(I) != E || IsLiveIn))
886 // Only allow one use other register use, as that's all the
887 // remat mechanisms support currently.
891 else if (Reg != ImpUse)
894 // For uses, there should be only one associate def.
895 if (I != E && (next(I) != E || IsLiveIn))
902 unsigned ImpUse = getReMatImplicitUse(li, MI);
904 const LiveInterval &ImpLi = getInterval(ImpUse);
905 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
906 re = mri_->use_end(); ri != re; ++ri) {
907 MachineInstr *UseMI = &*ri;
908 unsigned UseIdx = getInstructionIndex(UseMI);
909 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
911 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
918 /// isReMaterializable - Returns true if every definition of MI of every
919 /// val# of the specified interval is re-materializable.
920 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
922 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
924 const VNInfo *VNI = *i;
925 unsigned DefIdx = VNI->def;
927 continue; // Dead val#.
928 // Is the def for the val# rematerializable?
931 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
932 bool DefIsLoad = false;
934 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
941 /// FilterFoldedOps - Filter out two-address use operands. Return
942 /// true if it finds any issue with the operands that ought to prevent
944 static bool FilterFoldedOps(MachineInstr *MI,
945 SmallVector<unsigned, 2> &Ops,
947 SmallVector<unsigned, 2> &FoldOps) {
948 const TargetInstrDesc &TID = MI->getDesc();
951 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
952 unsigned OpIdx = Ops[i];
953 MachineOperand &MO = MI->getOperand(OpIdx);
954 // FIXME: fold subreg use.
958 MRInfo |= (unsigned)VirtRegMap::isMod;
960 // Filter out two-address use operand(s).
961 if (!MO.isImplicit() &&
962 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
963 MRInfo = VirtRegMap::isModRef;
966 MRInfo |= (unsigned)VirtRegMap::isRef;
968 FoldOps.push_back(OpIdx);
974 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
975 /// slot / to reg or any rematerialized load into ith operand of specified
976 /// MI. If it is successul, MI is updated with the newly created MI and
978 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
979 VirtRegMap &vrm, MachineInstr *DefMI,
981 SmallVector<unsigned, 2> &Ops,
982 bool isSS, int Slot, unsigned Reg) {
983 // If it is an implicit def instruction, just delete it.
984 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
985 RemoveMachineInstrFromMaps(MI);
986 vrm.RemoveMachineInstrFromMaps(MI);
987 MI->eraseFromParent();
992 // Filter the list of operand indexes that are to be folded. Abort if
993 // any operand will prevent folding.
995 SmallVector<unsigned, 2> FoldOps;
996 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
999 // The only time it's safe to fold into a two address instruction is when
1000 // it's folding reload and spill from / into a spill stack slot.
1001 if (DefMI && (MRInfo & VirtRegMap::isMod))
1004 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1005 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1007 // Remember this instruction uses the spill slot.
1008 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1010 // Attempt to fold the memory reference into the instruction. If
1011 // we can do this, we don't need to insert spill code.
1012 MachineBasicBlock &MBB = *MI->getParent();
1013 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1014 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1015 vrm.transferSpillPts(MI, fmi);
1016 vrm.transferRestorePts(MI, fmi);
1017 vrm.transferEmergencySpills(MI, fmi);
1019 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1020 mi2iMap_[fmi] = InstrIdx;
1021 MI = MBB.insert(MBB.erase(MI), fmi);
1028 /// canFoldMemoryOperand - Returns true if the specified load / store
1029 /// folding is possible.
1030 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1031 SmallVector<unsigned, 2> &Ops,
1033 // Filter the list of operand indexes that are to be folded. Abort if
1034 // any operand will prevent folding.
1035 unsigned MRInfo = 0;
1036 SmallVector<unsigned, 2> FoldOps;
1037 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1040 // It's only legal to remat for a use, not a def.
1041 if (ReMat && (MRInfo & VirtRegMap::isMod))
1044 return tii_->canFoldMemoryOperand(MI, FoldOps);
1047 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1048 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1049 for (LiveInterval::Ranges::const_iterator
1050 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1051 std::vector<IdxMBBPair>::const_iterator II =
1052 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1053 if (II == Idx2MBBMap.end())
1055 if (I->end > II->first) // crossing a MBB.
1057 MBBs.insert(II->second);
1058 if (MBBs.size() > 1)
1064 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1065 /// interval on to-be re-materialized operands of MI) with new register.
1066 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1067 MachineInstr *MI, unsigned NewVReg,
1069 // There is an implicit use. That means one of the other operand is
1070 // being remat'ed and the remat'ed instruction has li.reg as an
1071 // use operand. Make sure we rewrite that as well.
1072 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1073 MachineOperand &MO = MI->getOperand(i);
1074 if (!MO.isRegister())
1076 unsigned Reg = MO.getReg();
1077 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1079 if (!vrm.isReMaterialized(Reg))
1081 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1082 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1084 UseMO->setReg(NewVReg);
1088 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1089 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1090 bool LiveIntervals::
1091 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1092 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1093 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1094 unsigned Slot, int LdSlot,
1095 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1097 const TargetRegisterClass* rc,
1098 SmallVector<int, 4> &ReMatIds,
1099 const MachineLoopInfo *loopInfo,
1100 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1101 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1102 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1103 MachineBasicBlock *MBB = MI->getParent();
1104 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1105 bool CanFold = false;
1107 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1108 MachineOperand& mop = MI->getOperand(i);
1109 if (!mop.isRegister())
1111 unsigned Reg = mop.getReg();
1112 unsigned RegI = Reg;
1113 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1118 bool TryFold = !DefIsReMat;
1119 bool FoldSS = true; // Default behavior unless it's a remat.
1120 int FoldSlot = Slot;
1122 // If this is the rematerializable definition MI itself and
1123 // all of its uses are rematerialized, simply delete it.
1124 if (MI == ReMatOrigDefMI && CanDelete) {
1125 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1127 RemoveMachineInstrFromMaps(MI);
1128 vrm.RemoveMachineInstrFromMaps(MI);
1129 MI->eraseFromParent();
1133 // If def for this use can't be rematerialized, then try folding.
1134 // If def is rematerializable and it's a load, also try folding.
1135 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1137 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1143 // Scan all of the operands of this instruction rewriting operands
1144 // to use NewVReg instead of li.reg as appropriate. We do this for
1147 // 1. If the instr reads the same spilled vreg multiple times, we
1148 // want to reuse the NewVReg.
1149 // 2. If the instr is a two-addr instruction, we are required to
1150 // keep the src/dst regs pinned.
1152 // Keep track of whether we replace a use and/or def so that we can
1153 // create the spill interval with the appropriate range.
1155 HasUse = mop.isUse();
1156 HasDef = mop.isDef();
1157 SmallVector<unsigned, 2> Ops;
1159 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1160 const MachineOperand &MOj = MI->getOperand(j);
1161 if (!MOj.isRegister())
1163 unsigned RegJ = MOj.getReg();
1164 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1168 HasUse |= MOj.isUse();
1169 HasDef |= MOj.isDef();
1173 if (HasUse && !li.liveAt(getUseIndex(index)))
1174 // Must be defined by an implicit def. It should not be spilled. Note,
1175 // this is for correctness reason. e.g.
1176 // 8 %reg1024<def> = IMPLICIT_DEF
1177 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1178 // The live range [12, 14) are not part of the r1024 live interval since
1179 // it's defined by an implicit def. It will not conflicts with live
1180 // interval of r1025. Now suppose both registers are spilled, you can
1181 // easily see a situation where both registers are reloaded before
1182 // the INSERT_SUBREG and both target registers that would overlap.
1185 // Update stack slot spill weight if we are splitting.
1186 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1193 // Do not fold load / store here if we are splitting. We'll find an
1194 // optimal point to insert a load / store later.
1196 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1197 Ops, FoldSS, FoldSlot, Reg)) {
1198 // Folding the load/store can completely change the instruction in
1199 // unpredictable ways, rescan it from the beginning.
1203 if (isRemoved(MI)) {
1207 goto RestartInstruction;
1210 // We'll try to fold it later if it's profitable.
1211 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1215 // Create a new virtual register for the spill interval.
1216 bool CreatedNewVReg = false;
1218 NewVReg = mri_->createVirtualRegister(rc);
1220 CreatedNewVReg = true;
1222 mop.setReg(NewVReg);
1223 if (mop.isImplicit())
1224 rewriteImplicitOps(li, MI, NewVReg, vrm);
1226 // Reuse NewVReg for other reads.
1227 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1228 MachineOperand &mopj = MI->getOperand(Ops[j]);
1229 mopj.setReg(NewVReg);
1230 if (mopj.isImplicit())
1231 rewriteImplicitOps(li, MI, NewVReg, vrm);
1234 if (CreatedNewVReg) {
1236 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1237 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1238 // Each valnum may have its own remat id.
1239 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1241 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1243 if (!CanDelete || (HasUse && HasDef)) {
1244 // If this is a two-addr instruction then its use operands are
1245 // rematerializable but its def is not. It should be assigned a
1247 vrm.assignVirt2StackSlot(NewVReg, Slot);
1250 vrm.assignVirt2StackSlot(NewVReg, Slot);
1252 } else if (HasUse && HasDef &&
1253 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1254 // If this interval hasn't been assigned a stack slot (because earlier
1255 // def is a deleted remat def), do it now.
1256 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1257 vrm.assignVirt2StackSlot(NewVReg, Slot);
1260 // Re-matting an instruction with virtual register use. Add the
1261 // register as an implicit use on the use MI.
1262 if (DefIsReMat && ImpUse)
1263 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1265 // create a new register interval for this spill / remat.
1266 LiveInterval &nI = getOrCreateInterval(NewVReg);
1267 if (CreatedNewVReg) {
1268 NewLIs.push_back(&nI);
1269 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1271 vrm.setIsSplitFromReg(NewVReg, li.reg);
1275 if (CreatedNewVReg) {
1276 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1277 nI.getNextValue(~0U, 0, VNInfoAllocator));
1281 // Extend the split live interval to this def / use.
1282 unsigned End = getUseIndex(index)+1;
1283 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1284 nI.getValNumInfo(nI.getNumValNums()-1));
1290 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1291 nI.getNextValue(~0U, 0, VNInfoAllocator));
1296 DOUT << "\t\t\t\tAdded new interval: ";
1297 nI.print(DOUT, tri_);
1302 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1304 MachineBasicBlock *MBB, unsigned Idx) const {
1305 unsigned End = getMBBEndIdx(MBB);
1306 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1307 unsigned KillIdx = VNI->kills[j];
1308 if (KillIdx > Idx && KillIdx < End)
1314 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1315 /// during spilling.
1317 struct RewriteInfo {
1322 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1323 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1326 struct RewriteInfoCompare {
1327 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1328 return LHS.Index < RHS.Index;
1333 void LiveIntervals::
1334 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1335 LiveInterval::Ranges::const_iterator &I,
1336 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1337 unsigned Slot, int LdSlot,
1338 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1340 const TargetRegisterClass* rc,
1341 SmallVector<int, 4> &ReMatIds,
1342 const MachineLoopInfo *loopInfo,
1343 BitVector &SpillMBBs,
1344 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1345 BitVector &RestoreMBBs,
1346 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1347 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1348 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1349 bool AllCanFold = true;
1350 unsigned NewVReg = 0;
1351 unsigned start = getBaseIndex(I->start);
1352 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1354 // First collect all the def / use in this live range that will be rewritten.
1355 // Make sure they are sorted according to instruction index.
1356 std::vector<RewriteInfo> RewriteMIs;
1357 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1358 re = mri_->reg_end(); ri != re; ) {
1359 MachineInstr *MI = &*ri;
1360 MachineOperand &O = ri.getOperand();
1362 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1363 unsigned index = getInstructionIndex(MI);
1364 if (index < start || index >= end)
1366 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1367 // Must be defined by an implicit def. It should not be spilled. Note,
1368 // this is for correctness reason. e.g.
1369 // 8 %reg1024<def> = IMPLICIT_DEF
1370 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1371 // The live range [12, 14) are not part of the r1024 live interval since
1372 // it's defined by an implicit def. It will not conflicts with live
1373 // interval of r1025. Now suppose both registers are spilled, you can
1374 // easily see a situation where both registers are reloaded before
1375 // the INSERT_SUBREG and both target registers that would overlap.
1377 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1379 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1381 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1382 // Now rewrite the defs and uses.
1383 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1384 RewriteInfo &rwi = RewriteMIs[i];
1386 unsigned index = rwi.Index;
1387 bool MIHasUse = rwi.HasUse;
1388 bool MIHasDef = rwi.HasDef;
1389 MachineInstr *MI = rwi.MI;
1390 // If MI def and/or use the same register multiple times, then there
1391 // are multiple entries.
1392 unsigned NumUses = MIHasUse;
1393 while (i != e && RewriteMIs[i].MI == MI) {
1394 assert(RewriteMIs[i].Index == index);
1395 bool isUse = RewriteMIs[i].HasUse;
1396 if (isUse) ++NumUses;
1398 MIHasDef |= RewriteMIs[i].HasDef;
1401 MachineBasicBlock *MBB = MI->getParent();
1403 if (ImpUse && MI != ReMatDefMI) {
1404 // Re-matting an instruction with virtual register use. Update the
1405 // register interval's spill weight to HUGE_VALF to prevent it from
1407 LiveInterval &ImpLi = getInterval(ImpUse);
1408 ImpLi.weight = HUGE_VALF;
1411 unsigned MBBId = MBB->getNumber();
1412 unsigned ThisVReg = 0;
1414 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1415 if (NVI != MBBVRegsMap.end()) {
1416 ThisVReg = NVI->second;
1423 // It's better to start a new interval to avoid artifically
1424 // extend the new interval.
1425 if (MIHasDef && !MIHasUse) {
1426 MBBVRegsMap.erase(MBB->getNumber());
1432 bool IsNew = ThisVReg == 0;
1434 // This ends the previous live interval. If all of its def / use
1435 // can be folded, give it a low spill weight.
1436 if (NewVReg && TrySplit && AllCanFold) {
1437 LiveInterval &nI = getOrCreateInterval(NewVReg);
1444 bool HasDef = false;
1445 bool HasUse = false;
1446 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1447 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1448 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1449 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1450 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1451 if (!HasDef && !HasUse)
1454 AllCanFold &= CanFold;
1456 // Update weight of spill interval.
1457 LiveInterval &nI = getOrCreateInterval(NewVReg);
1459 // The spill weight is now infinity as it cannot be spilled again.
1460 nI.weight = HUGE_VALF;
1464 // Keep track of the last def and first use in each MBB.
1466 if (MI != ReMatOrigDefMI || !CanDelete) {
1467 bool HasKill = false;
1469 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1471 // If this is a two-address code, then this index starts a new VNInfo.
1472 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1474 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1476 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1477 SpillIdxes.find(MBBId);
1479 if (SII == SpillIdxes.end()) {
1480 std::vector<SRInfo> S;
1481 S.push_back(SRInfo(index, NewVReg, true));
1482 SpillIdxes.insert(std::make_pair(MBBId, S));
1483 } else if (SII->second.back().vreg != NewVReg) {
1484 SII->second.push_back(SRInfo(index, NewVReg, true));
1485 } else if ((int)index > SII->second.back().index) {
1486 // If there is an earlier def and this is a two-address
1487 // instruction, then it's not possible to fold the store (which
1488 // would also fold the load).
1489 SRInfo &Info = SII->second.back();
1491 Info.canFold = !HasUse;
1493 SpillMBBs.set(MBBId);
1494 } else if (SII != SpillIdxes.end() &&
1495 SII->second.back().vreg == NewVReg &&
1496 (int)index > SII->second.back().index) {
1497 // There is an earlier def that's not killed (must be two-address).
1498 // The spill is no longer needed.
1499 SII->second.pop_back();
1500 if (SII->second.empty()) {
1501 SpillIdxes.erase(MBBId);
1502 SpillMBBs.reset(MBBId);
1509 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1510 SpillIdxes.find(MBBId);
1511 if (SII != SpillIdxes.end() &&
1512 SII->second.back().vreg == NewVReg &&
1513 (int)index > SII->second.back().index)
1514 // Use(s) following the last def, it's not safe to fold the spill.
1515 SII->second.back().canFold = false;
1516 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1517 RestoreIdxes.find(MBBId);
1518 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1519 // If we are splitting live intervals, only fold if it's the first
1520 // use and there isn't another use later in the MBB.
1521 RII->second.back().canFold = false;
1523 // Only need a reload if there isn't an earlier def / use.
1524 if (RII == RestoreIdxes.end()) {
1525 std::vector<SRInfo> Infos;
1526 Infos.push_back(SRInfo(index, NewVReg, true));
1527 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1529 RII->second.push_back(SRInfo(index, NewVReg, true));
1531 RestoreMBBs.set(MBBId);
1535 // Update spill weight.
1536 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1537 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1540 if (NewVReg && TrySplit && AllCanFold) {
1541 // If all of its def / use can be folded, give it a low spill weight.
1542 LiveInterval &nI = getOrCreateInterval(NewVReg);
1547 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1548 BitVector &RestoreMBBs,
1549 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1550 if (!RestoreMBBs[Id])
1552 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1553 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1554 if (Restores[i].index == index &&
1555 Restores[i].vreg == vr &&
1556 Restores[i].canFold)
1561 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1562 BitVector &RestoreMBBs,
1563 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1564 if (!RestoreMBBs[Id])
1566 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1567 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1568 if (Restores[i].index == index && Restores[i].vreg)
1569 Restores[i].index = -1;
1572 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1573 /// spilled and create empty intervals for their uses.
1575 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1576 const TargetRegisterClass* rc,
1577 std::vector<LiveInterval*> &NewLIs) {
1578 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1579 re = mri_->reg_end(); ri != re; ) {
1580 MachineOperand &O = ri.getOperand();
1581 MachineInstr *MI = &*ri;
1584 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1585 "Register def was not rewritten?");
1586 RemoveMachineInstrFromMaps(MI);
1587 vrm.RemoveMachineInstrFromMaps(MI);
1588 MI->eraseFromParent();
1590 // This must be an use of an implicit_def so it's not part of the live
1591 // interval. Create a new empty live interval for it.
1592 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1593 unsigned NewVReg = mri_->createVirtualRegister(rc);
1595 vrm.setIsImplicitlyDefined(NewVReg);
1596 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1597 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1598 MachineOperand &MO = MI->getOperand(i);
1599 if (MO.isRegister() && MO.getReg() == li.reg)
1608 bool operator()(LiveInterval* A, LiveInterval* B) {
1609 return A->beginNumber() < B->beginNumber();
1614 std::vector<LiveInterval*> LiveIntervals::
1615 addIntervalsForSpillsFast(const LiveInterval &li,
1616 const MachineLoopInfo *loopInfo,
1617 VirtRegMap &vrm, float& SSWeight) {
1618 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1620 std::vector<LiveInterval*> added;
1622 assert(li.weight != HUGE_VALF &&
1623 "attempt to spill already spilled interval!");
1625 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1629 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1633 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1634 while (RI != mri_->reg_end()) {
1635 MachineInstr* MI = &*RI;
1637 SmallVector<unsigned, 2> Indices;
1638 bool HasUse = false;
1639 bool HasDef = false;
1641 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1642 MachineOperand& mop = MI->getOperand(i);
1643 if (!mop.isRegister() || mop.getReg() != li.reg) continue;
1645 HasUse |= MI->getOperand(i).isUse();
1646 HasDef |= MI->getOperand(i).isDef();
1648 Indices.push_back(i);
1651 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1652 Indices, true, slot, li.reg)) {
1653 unsigned NewVReg = mri_->createVirtualRegister(rc);
1655 vrm.assignVirt2StackSlot(NewVReg, slot);
1657 // create a new register for this spill
1658 LiveInterval &nI = getOrCreateInterval(NewVReg);
1660 // the spill weight is now infinity as it
1661 // cannot be spilled again
1662 nI.weight = HUGE_VALF;
1664 // Rewrite register operands to use the new vreg.
1665 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1666 E = Indices.end(); I != E; ++I) {
1667 MI->getOperand(*I).setReg(NewVReg);
1669 if (MI->getOperand(*I).isUse())
1670 MI->getOperand(*I).setIsKill(true);
1673 // Fill in the new live interval.
1674 unsigned index = getInstructionIndex(MI);
1676 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1677 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1680 vrm.addRestorePoint(NewVReg, MI);
1683 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1684 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1687 vrm.addSpillPoint(NewVReg, true, MI);
1690 added.push_back(&nI);
1692 DOUT << "\t\t\t\tadded new interval: ";
1696 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1699 SSWeight += getSpillWeight(true, true, loopDepth);
1701 SSWeight += getSpillWeight(false, true, loopDepth);
1703 SSWeight += getSpillWeight(true, false, loopDepth);
1707 RI = mri_->reg_begin(li.reg);
1710 // Clients expect the new intervals to be returned in sorted order.
1711 std::sort(added.begin(), added.end(), LISorter());
1716 std::vector<LiveInterval*> LiveIntervals::
1717 addIntervalsForSpills(const LiveInterval &li,
1718 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1721 if (EnableFastSpilling)
1722 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1724 assert(li.weight != HUGE_VALF &&
1725 "attempt to spill already spilled interval!");
1727 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1728 li.print(DOUT, tri_);
1731 // Spill slot weight.
1734 // Each bit specify whether it a spill is required in the MBB.
1735 BitVector SpillMBBs(mf_->getNumBlockIDs());
1736 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1737 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1738 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1739 DenseMap<unsigned,unsigned> MBBVRegsMap;
1740 std::vector<LiveInterval*> NewLIs;
1741 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1743 unsigned NumValNums = li.getNumValNums();
1744 SmallVector<MachineInstr*, 4> ReMatDefs;
1745 ReMatDefs.resize(NumValNums, NULL);
1746 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1747 ReMatOrigDefs.resize(NumValNums, NULL);
1748 SmallVector<int, 4> ReMatIds;
1749 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1750 BitVector ReMatDelete(NumValNums);
1751 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1753 // Spilling a split live interval. It cannot be split any further. Also,
1754 // it's also guaranteed to be a single val# / range interval.
1755 if (vrm.getPreSplitReg(li.reg)) {
1756 vrm.setIsSplitFromReg(li.reg, 0);
1757 // Unset the split kill marker on the last use.
1758 unsigned KillIdx = vrm.getKillPoint(li.reg);
1760 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1761 assert(KillMI && "Last use disappeared?");
1762 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1763 assert(KillOp != -1 && "Last use disappeared?");
1764 KillMI->getOperand(KillOp).setIsKill(false);
1766 vrm.removeKillPoint(li.reg);
1767 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1768 Slot = vrm.getStackSlot(li.reg);
1769 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1770 MachineInstr *ReMatDefMI = DefIsReMat ?
1771 vrm.getReMaterializedMI(li.reg) : NULL;
1773 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1774 bool isLoad = isLoadSS ||
1775 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1776 bool IsFirstRange = true;
1777 for (LiveInterval::Ranges::const_iterator
1778 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1779 // If this is a split live interval with multiple ranges, it means there
1780 // are two-address instructions that re-defined the value. Only the
1781 // first def can be rematerialized!
1783 // Note ReMatOrigDefMI has already been deleted.
1784 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1785 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1786 false, vrm, rc, ReMatIds, loopInfo,
1787 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1788 MBBVRegsMap, NewLIs, SSWeight);
1790 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1791 Slot, 0, false, false, false,
1792 false, vrm, rc, ReMatIds, loopInfo,
1793 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1794 MBBVRegsMap, NewLIs, SSWeight);
1796 IsFirstRange = false;
1799 SSWeight = 0.0f; // Already accounted for when split.
1800 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1804 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1805 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1809 bool NeedStackSlot = false;
1810 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1812 const VNInfo *VNI = *i;
1813 unsigned VN = VNI->id;
1814 unsigned DefIdx = VNI->def;
1816 continue; // Dead val#.
1817 // Is the def for the val# rematerializable?
1818 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1819 ? 0 : getInstructionFromIndex(DefIdx);
1821 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1822 // Remember how to remat the def of this val#.
1823 ReMatOrigDefs[VN] = ReMatDefMI;
1824 // Original def may be modified so we have to make a copy here.
1825 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1826 ClonedMIs.push_back(Clone);
1827 ReMatDefs[VN] = Clone;
1829 bool CanDelete = true;
1830 if (VNI->hasPHIKill) {
1831 // A kill is a phi node, not all of its uses can be rematerialized.
1832 // It must not be deleted.
1834 // Need a stack slot if there is any live range where uses cannot be
1836 NeedStackSlot = true;
1839 ReMatDelete.set(VN);
1841 // Need a stack slot if there is any live range where uses cannot be
1843 NeedStackSlot = true;
1847 // One stack slot per live interval.
1848 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1849 Slot = vrm.assignVirt2StackSlot(li.reg);
1851 // Create new intervals and rewrite defs and uses.
1852 for (LiveInterval::Ranges::const_iterator
1853 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1854 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1855 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1856 bool DefIsReMat = ReMatDefMI != NULL;
1857 bool CanDelete = ReMatDelete[I->valno->id];
1859 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1860 bool isLoad = isLoadSS ||
1861 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1862 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1863 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1864 CanDelete, vrm, rc, ReMatIds, loopInfo,
1865 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1866 MBBVRegsMap, NewLIs, SSWeight);
1869 // Insert spills / restores if we are splitting.
1871 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1875 SmallPtrSet<LiveInterval*, 4> AddedKill;
1876 SmallVector<unsigned, 2> Ops;
1877 if (NeedStackSlot) {
1878 int Id = SpillMBBs.find_first();
1880 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1881 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1882 std::vector<SRInfo> &spills = SpillIdxes[Id];
1883 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1884 int index = spills[i].index;
1885 unsigned VReg = spills[i].vreg;
1886 LiveInterval &nI = getOrCreateInterval(VReg);
1887 bool isReMat = vrm.isReMaterialized(VReg);
1888 MachineInstr *MI = getInstructionFromIndex(index);
1889 bool CanFold = false;
1890 bool FoundUse = false;
1892 if (spills[i].canFold) {
1894 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1895 MachineOperand &MO = MI->getOperand(j);
1896 if (!MO.isRegister() || MO.getReg() != VReg)
1903 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1904 RestoreMBBs, RestoreIdxes))) {
1905 // MI has two-address uses of the same register. If the use
1906 // isn't the first and only use in the BB, then we can't fold
1907 // it. FIXME: Move this to rewriteInstructionsForSpills.
1914 // Fold the store into the def if possible.
1915 bool Folded = false;
1916 if (CanFold && !Ops.empty()) {
1917 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1920 // Also folded uses, do not issue a load.
1921 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1922 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1924 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1928 // Otherwise tell the spiller to issue a spill.
1930 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1931 bool isKill = LR->end == getStoreIndex(index);
1932 if (!MI->registerDefIsDead(nI.reg))
1933 // No need to spill a dead def.
1934 vrm.addSpillPoint(VReg, isKill, MI);
1936 AddedKill.insert(&nI);
1939 // Update spill slot weight.
1941 SSWeight += getSpillWeight(true, false, loopDepth);
1943 Id = SpillMBBs.find_next(Id);
1947 int Id = RestoreMBBs.find_first();
1949 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1950 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1952 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1953 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1954 int index = restores[i].index;
1957 unsigned VReg = restores[i].vreg;
1958 LiveInterval &nI = getOrCreateInterval(VReg);
1959 bool isReMat = vrm.isReMaterialized(VReg);
1960 MachineInstr *MI = getInstructionFromIndex(index);
1961 bool CanFold = false;
1963 if (restores[i].canFold) {
1965 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1966 MachineOperand &MO = MI->getOperand(j);
1967 if (!MO.isRegister() || MO.getReg() != VReg)
1971 // If this restore were to be folded, it would have been folded
1980 // Fold the load into the use if possible.
1981 bool Folded = false;
1982 if (CanFold && !Ops.empty()) {
1984 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1986 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1988 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1989 // If the rematerializable def is a load, also try to fold it.
1990 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1991 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1992 Ops, isLoadSS, LdSlot, VReg);
1993 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1995 // Re-matting an instruction with virtual register use. Add the
1996 // register as an implicit use on the use MI and update the register
1997 // interval's spill weight to HUGE_VALF to prevent it from being
1999 LiveInterval &ImpLi = getInterval(ImpUse);
2000 ImpLi.weight = HUGE_VALF;
2001 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2005 // If folding is not possible / failed, then tell the spiller to issue a
2006 // load / rematerialization for us.
2008 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2010 vrm.addRestorePoint(VReg, MI);
2012 // Update spill slot weight.
2014 SSWeight += getSpillWeight(false, true, loopDepth);
2016 Id = RestoreMBBs.find_next(Id);
2019 // Finalize intervals: add kills, finalize spill weights, and filter out
2021 std::vector<LiveInterval*> RetNewLIs;
2022 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2023 LiveInterval *LI = NewLIs[i];
2025 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2026 if (!AddedKill.count(LI)) {
2027 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2028 unsigned LastUseIdx = getBaseIndex(LR->end);
2029 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2030 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2031 assert(UseIdx != -1);
2032 if (LastUse->getOperand(UseIdx).isImplicit() ||
2033 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2034 LastUse->getOperand(UseIdx).setIsKill();
2035 vrm.addKillPoint(LI->reg, LastUseIdx);
2038 RetNewLIs.push_back(LI);
2042 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2046 /// hasAllocatableSuperReg - Return true if the specified physical register has
2047 /// any super register that's allocatable.
2048 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2049 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2050 if (allocatableRegs_[*AS] && hasInterval(*AS))
2055 /// getRepresentativeReg - Find the largest super register of the specified
2056 /// physical register.
2057 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2058 // Find the largest super-register that is allocatable.
2059 unsigned BestReg = Reg;
2060 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2061 unsigned SuperReg = *AS;
2062 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2070 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2071 /// specified interval that conflicts with the specified physical register.
2072 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2073 unsigned PhysReg) const {
2074 unsigned NumConflicts = 0;
2075 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2076 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2077 E = mri_->reg_end(); I != E; ++I) {
2078 MachineOperand &O = I.getOperand();
2079 MachineInstr *MI = O.getParent();
2080 unsigned Index = getInstructionIndex(MI);
2081 if (pli.liveAt(Index))
2084 return NumConflicts;
2087 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2088 /// around all defs and uses of the specified interval.
2089 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2090 unsigned PhysReg, VirtRegMap &vrm) {
2091 unsigned SpillReg = getRepresentativeReg(PhysReg);
2093 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2094 // If there are registers which alias PhysReg, but which are not a
2095 // sub-register of the chosen representative super register. Assert
2096 // since we can't handle it yet.
2097 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2098 tri_->isSuperRegister(*AS, SpillReg));
2100 LiveInterval &pli = getInterval(SpillReg);
2101 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2102 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2103 E = mri_->reg_end(); I != E; ++I) {
2104 MachineOperand &O = I.getOperand();
2105 MachineInstr *MI = O.getParent();
2106 if (SeenMIs.count(MI))
2109 unsigned Index = getInstructionIndex(MI);
2110 if (pli.liveAt(Index)) {
2111 vrm.addEmergencySpill(SpillReg, MI);
2112 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2113 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2114 if (!hasInterval(*AS))
2116 LiveInterval &spli = getInterval(*AS);
2117 if (spli.liveAt(Index))
2118 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2124 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2125 MachineInstr* startInst) {
2126 LiveInterval& Interval = getOrCreateInterval(reg);
2127 VNInfo* VN = Interval.getNextValue(
2128 getInstructionIndex(startInst) + InstrSlots::DEF,
2129 startInst, getVNInfoAllocator());
2130 VN->hasPHIKill = true;
2131 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2132 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2133 getMBBEndIdx(startInst->getParent()) + 1, VN);
2134 Interval.addRange(LR);