1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
263 DOUT << "********** INTERVALS **********\n";
264 for (iterator I = begin(), E = end(); I != E; ++I) {
265 I->second->print(DOUT, tri_);
269 numIntervalsAfter += getNumIntervals();
274 /// print - Implement the dump method.
275 void LiveIntervals::print(std::ostream &O, const Module* ) const {
276 O << "********** INTERVALS **********\n";
277 for (const_iterator I = begin(), E = end(); I != E; ++I) {
278 I->second->print(O, tri_);
282 O << "********** MACHINEINSTRS **********\n";
283 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
284 mbbi != mbbe; ++mbbi) {
285 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
286 for (MachineBasicBlock::iterator mii = mbbi->begin(),
287 mie = mbbi->end(); mii != mie; ++mii) {
288 O << getInstructionIndex(mii) << '\t' << *mii;
293 /// conflictsWithPhysRegDef - Returns true if the specified register
294 /// is defined during the duration of the specified interval.
295 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
296 VirtRegMap &vrm, unsigned reg) {
297 for (LiveInterval::Ranges::const_iterator
298 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
299 for (unsigned index = getBaseIndex(I->start),
300 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
301 index += InstrSlots::NUM) {
302 // skip deleted instructions
303 while (index != end && !getInstructionFromIndex(index))
304 index += InstrSlots::NUM;
305 if (index == end) break;
307 MachineInstr *MI = getInstructionFromIndex(index);
308 unsigned SrcReg, DstReg;
309 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
310 if (SrcReg == li.reg || DstReg == li.reg)
312 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
313 MachineOperand& mop = MI->getOperand(i);
316 unsigned PhysReg = mop.getReg();
317 if (PhysReg == 0 || PhysReg == li.reg)
319 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
320 if (!vrm.hasPhys(PhysReg))
322 PhysReg = vrm.getPhys(PhysReg);
324 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
333 void LiveIntervals::printRegName(unsigned reg) const {
334 if (TargetRegisterInfo::isPhysicalRegister(reg))
335 cerr << tri_->getName(reg);
337 cerr << "%reg" << reg;
340 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
341 MachineBasicBlock::iterator mi,
342 unsigned MIIdx, MachineOperand& MO,
344 LiveInterval &interval) {
345 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
346 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
348 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
349 DOUT << "is a implicit_def\n";
353 // Virtual registers may be defined multiple times (due to phi
354 // elimination and 2-addr elimination). Much of what we do only has to be
355 // done once for the vreg. We use an empty interval to detect the first
356 // time we see a vreg.
357 if (interval.empty()) {
358 // Get the Idx of the defining instructions.
359 unsigned defIndex = getDefIndex(MIIdx);
360 // Earlyclobbers move back one.
361 if (MO.isEarlyClobber())
362 defIndex = getUseIndex(MIIdx);
364 MachineInstr *CopyMI = NULL;
365 unsigned SrcReg, DstReg;
366 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
367 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
368 tii_->isMoveInstr(*mi, SrcReg, DstReg))
370 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
372 assert(ValNo->id == 0 && "First value in interval is not 0?");
374 // Loop over all of the blocks that the vreg is defined in. There are
375 // two cases we have to handle here. The most common case is a vreg
376 // whose lifetime is contained within a basic block. In this case there
377 // will be a single kill, in MBB, which comes after the definition.
378 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
379 // FIXME: what about dead vars?
381 if (vi.Kills[0] != mi)
382 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
384 killIdx = defIndex+1;
386 // If the kill happens after the definition, we have an intra-block
388 if (killIdx > defIndex) {
389 assert(vi.AliveBlocks.none() &&
390 "Shouldn't be alive across any blocks!");
391 LiveRange LR(defIndex, killIdx, ValNo);
392 interval.addRange(LR);
393 DOUT << " +" << LR << "\n";
394 interval.addKill(ValNo, killIdx);
399 // The other case we handle is when a virtual register lives to the end
400 // of the defining block, potentially live across some blocks, then is
401 // live into some number of blocks, but gets killed. Start by adding a
402 // range that goes from this definition to the end of the defining block.
403 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
404 DOUT << " +" << NewLR;
405 interval.addRange(NewLR);
407 // Iterate over all of the blocks that the variable is completely
408 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
410 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
411 if (vi.AliveBlocks[i]) {
412 LiveRange LR(getMBBStartIdx(i),
413 getMBBEndIdx(i)+1, // MBB ends at -1.
415 interval.addRange(LR);
420 // Finally, this virtual register is live from the start of any killing
421 // block to the 'use' slot of the killing instruction.
422 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
423 MachineInstr *Kill = vi.Kills[i];
424 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
425 LiveRange LR(getMBBStartIdx(Kill->getParent()),
427 interval.addRange(LR);
428 interval.addKill(ValNo, killIdx);
433 // If this is the second time we see a virtual register definition, it
434 // must be due to phi elimination or two addr elimination. If this is
435 // the result of two address elimination, then the vreg is one of the
436 // def-and-use register operand.
437 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
438 // If this is a two-address definition, then we have already processed
439 // the live range. The only problem is that we didn't realize there
440 // are actually two values in the live interval. Because of this we
441 // need to take the LiveRegion that defines this register and split it
443 assert(interval.containsOneValue());
444 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
445 unsigned RedefIndex = getDefIndex(MIIdx);
446 // Earlyclobbers move back one.
447 if (MO.isEarlyClobber())
448 RedefIndex = getUseIndex(MIIdx);
450 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
451 VNInfo *OldValNo = OldLR->valno;
453 // Delete the initial value, which should be short and continuous,
454 // because the 2-addr copy must be in the same MBB as the redef.
455 interval.removeRange(DefIndex, RedefIndex);
457 // Two-address vregs should always only be redefined once. This means
458 // that at this point, there should be exactly one value number in it.
459 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
461 // The new value number (#1) is defined by the instruction we claimed
463 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
466 // Value#0 is now defined by the 2-addr instruction.
467 OldValNo->def = RedefIndex;
470 // Add the new live interval which replaces the range for the input copy.
471 LiveRange LR(DefIndex, RedefIndex, ValNo);
472 DOUT << " replace range with " << LR;
473 interval.addRange(LR);
474 interval.addKill(ValNo, RedefIndex);
476 // If this redefinition is dead, we need to add a dummy unit live
477 // range covering the def slot.
479 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
482 interval.print(DOUT, tri_);
485 // Otherwise, this must be because of phi elimination. If this is the
486 // first redefinition of the vreg that we have seen, go back and change
487 // the live range in the PHI block to be a different value number.
488 if (interval.containsOneValue()) {
489 assert(vi.Kills.size() == 1 &&
490 "PHI elimination vreg should have one kill, the PHI itself!");
492 // Remove the old range that we now know has an incorrect number.
493 VNInfo *VNI = interval.getValNumInfo(0);
494 MachineInstr *Killer = vi.Kills[0];
495 unsigned Start = getMBBStartIdx(Killer->getParent());
496 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
497 DOUT << " Removing [" << Start << "," << End << "] from: ";
498 interval.print(DOUT, tri_); DOUT << "\n";
499 interval.removeRange(Start, End);
500 VNI->hasPHIKill = true;
501 DOUT << " RESULT: "; interval.print(DOUT, tri_);
503 // Replace the interval with one of a NEW value number. Note that this
504 // value number isn't actually defined by an instruction, weird huh? :)
505 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
506 DOUT << " replace range with " << LR;
507 interval.addRange(LR);
508 interval.addKill(LR.valno, End);
509 DOUT << " RESULT: "; interval.print(DOUT, tri_);
512 // In the case of PHI elimination, each variable definition is only
513 // live until the end of the block. We've already taken care of the
514 // rest of the live range.
515 unsigned defIndex = getDefIndex(MIIdx);
516 // Earlyclobbers move back one.
517 if (MO.isEarlyClobber())
518 defIndex = getUseIndex(MIIdx);
521 MachineInstr *CopyMI = NULL;
522 unsigned SrcReg, DstReg;
523 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
524 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
525 tii_->isMoveInstr(*mi, SrcReg, DstReg))
527 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
529 unsigned killIndex = getMBBEndIdx(mbb) + 1;
530 LiveRange LR(defIndex, killIndex, ValNo);
531 interval.addRange(LR);
532 interval.addKill(ValNo, killIndex);
533 ValNo->hasPHIKill = true;
541 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
542 MachineBasicBlock::iterator mi,
545 LiveInterval &interval,
546 MachineInstr *CopyMI) {
547 // A physical register cannot be live across basic block, so its
548 // lifetime must end somewhere in its defining basic block.
549 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
551 unsigned baseIndex = MIIdx;
552 unsigned start = getDefIndex(baseIndex);
553 // Earlyclobbers move back one.
554 if (MO.isEarlyClobber())
555 start = getUseIndex(MIIdx);
556 unsigned end = start;
558 // If it is not used after definition, it is considered dead at
559 // the instruction defining it. Hence its interval is:
560 // [defSlot(def), defSlot(def)+1)
567 // If it is not dead on definition, it must be killed by a
568 // subsequent instruction. Hence its interval is:
569 // [defSlot(def), useSlot(kill)+1)
570 baseIndex += InstrSlots::NUM;
571 while (++mi != MBB->end()) {
572 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
573 getInstructionFromIndex(baseIndex) == 0)
574 baseIndex += InstrSlots::NUM;
575 if (mi->killsRegister(interval.reg, tri_)) {
577 end = getUseIndex(baseIndex) + 1;
579 } else if (mi->modifiesRegister(interval.reg, tri_)) {
580 // Another instruction redefines the register before it is ever read.
581 // Then the register is essentially dead at the instruction that defines
582 // it. Hence its interval is:
583 // [defSlot(def), defSlot(def)+1)
589 baseIndex += InstrSlots::NUM;
592 // The only case we should have a dead physreg here without a killing or
593 // instruction where we know it's dead is if it is live-in to the function
595 assert(!CopyMI && "physreg was not killed in defining block!");
599 assert(start < end && "did not find end of interval?");
601 // Already exists? Extend old live interval.
602 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
603 VNInfo *ValNo = (OldLR != interval.end())
604 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
605 LiveRange LR(start, end, ValNo);
606 interval.addRange(LR);
607 interval.addKill(LR.valno, end);
608 DOUT << " +" << LR << '\n';
611 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
612 MachineBasicBlock::iterator MI,
616 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
617 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
618 getOrCreateInterval(MO.getReg()));
619 else if (allocatableRegs_[MO.getReg()]) {
620 MachineInstr *CopyMI = NULL;
621 unsigned SrcReg, DstReg;
622 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
623 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
624 tii_->isMoveInstr(*MI, SrcReg, DstReg))
626 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
627 getOrCreateInterval(MO.getReg()), CopyMI);
628 // Def of a register also defines its sub-registers.
629 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
630 // If MI also modifies the sub-register explicitly, avoid processing it
631 // more than once. Do not pass in TRI here so it checks for exact match.
632 if (!MI->modifiesRegister(*AS))
633 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
634 getOrCreateInterval(*AS), 0);
638 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
640 LiveInterval &interval, bool isAlias) {
641 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
643 // Look for kills, if it reaches a def before it's killed, then it shouldn't
644 // be considered a livein.
645 MachineBasicBlock::iterator mi = MBB->begin();
646 unsigned baseIndex = MIIdx;
647 unsigned start = baseIndex;
648 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
649 getInstructionFromIndex(baseIndex) == 0)
650 baseIndex += InstrSlots::NUM;
651 unsigned end = baseIndex;
653 while (mi != MBB->end()) {
654 if (mi->killsRegister(interval.reg, tri_)) {
656 end = getUseIndex(baseIndex) + 1;
658 } else if (mi->modifiesRegister(interval.reg, tri_)) {
659 // Another instruction redefines the register before it is ever read.
660 // Then the register is essentially dead at the instruction that defines
661 // it. Hence its interval is:
662 // [defSlot(def), defSlot(def)+1)
664 end = getDefIndex(start) + 1;
668 baseIndex += InstrSlots::NUM;
669 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
670 getInstructionFromIndex(baseIndex) == 0)
671 baseIndex += InstrSlots::NUM;
676 // Live-in register might not be used at all.
680 end = getDefIndex(MIIdx) + 1;
682 DOUT << " live through";
687 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
688 interval.addRange(LR);
689 interval.addKill(LR.valno, end);
690 DOUT << " +" << LR << '\n';
693 /// computeIntervals - computes the live intervals for virtual
694 /// registers. for some ordering of the machine instructions [1,N] a
695 /// live interval is an interval [i, j) where 1 <= i <= j < N for
696 /// which a variable is live
697 void LiveIntervals::computeIntervals() {
699 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
700 << "********** Function: "
701 << ((Value*)mf_->getFunction())->getName() << '\n';
703 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
705 MachineBasicBlock *MBB = MBBI;
706 // Track the index of the current machine instr.
707 unsigned MIIndex = getMBBStartIdx(MBB);
708 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
710 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
712 // Create intervals for live-ins to this BB first.
713 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
714 LE = MBB->livein_end(); LI != LE; ++LI) {
715 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
716 // Multiple live-ins can alias the same register.
717 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
718 if (!hasInterval(*AS))
719 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
723 // Skip over empty initial indices.
724 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
725 getInstructionFromIndex(MIIndex) == 0)
726 MIIndex += InstrSlots::NUM;
728 for (; MI != miEnd; ++MI) {
729 DOUT << MIIndex << "\t" << *MI;
732 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
733 MachineOperand &MO = MI->getOperand(i);
734 // handle register defs - build intervals
735 if (MO.isReg() && MO.getReg() && MO.isDef()) {
736 handleRegisterDef(MBB, MI, MIIndex, MO, i);
740 // Skip over the empty slots after each instruction.
741 unsigned Slots = MI->getDesc().getNumDefs();
744 MIIndex += InstrSlots::NUM * Slots;
746 // Skip over empty indices.
747 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
748 getInstructionFromIndex(MIIndex) == 0)
749 MIIndex += InstrSlots::NUM;
754 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
755 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
756 std::vector<IdxMBBPair>::const_iterator I =
757 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
760 while (I != Idx2MBBMap.end()) {
761 if (LR.end <= I->first)
763 MBBs.push_back(I->second);
770 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
771 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
773 return new LiveInterval(reg, Weight);
776 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
777 /// copy field and returns the source register that defines it.
778 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
782 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
783 return VNI->copy->getOperand(1).getReg();
784 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
785 return VNI->copy->getOperand(2).getReg();
786 unsigned SrcReg, DstReg;
787 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
789 assert(0 && "Unrecognized copy instruction!");
793 //===----------------------------------------------------------------------===//
794 // Register allocator hooks.
797 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
798 /// allow one) virtual register operand, then its uses are implicitly using
799 /// the register. Returns the virtual register.
800 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
801 MachineInstr *MI) const {
803 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
804 MachineOperand &MO = MI->getOperand(i);
805 if (!MO.isReg() || !MO.isUse())
807 unsigned Reg = MO.getReg();
808 if (Reg == 0 || Reg == li.reg)
810 // FIXME: For now, only remat MI with at most one register operand.
812 "Can't rematerialize instruction with multiple register operand!");
821 /// isValNoAvailableAt - Return true if the val# of the specified interval
822 /// which reaches the given instruction also reaches the specified use index.
823 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
824 unsigned UseIdx) const {
825 unsigned Index = getInstructionIndex(MI);
826 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
827 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
828 return UI != li.end() && UI->valno == ValNo;
831 /// isReMaterializable - Returns true if the definition MI of the specified
832 /// val# of the specified interval is re-materializable.
833 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
834 const VNInfo *ValNo, MachineInstr *MI,
835 SmallVectorImpl<LiveInterval*> &SpillIs,
840 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
844 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
845 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
846 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
847 // this but remember this is not safe to fold into a two-address
849 // This is a load from fixed stack slot. It can be rematerialized.
852 // If the target-specific rules don't identify an instruction as
853 // being trivially rematerializable, use some target-independent
855 if (!MI->getDesc().isRematerializable() ||
856 !tii_->isTriviallyReMaterializable(MI)) {
857 if (!EnableAggressiveRemat)
860 // If the instruction accesses memory but the memoperands have been lost,
861 // we can't analyze it.
862 const TargetInstrDesc &TID = MI->getDesc();
863 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
866 // Avoid instructions obviously unsafe for remat.
867 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
870 // If the instruction accesses memory and the memory could be non-constant,
871 // assume the instruction is not rematerializable.
872 for (std::list<MachineMemOperand>::const_iterator
873 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
874 const MachineMemOperand &MMO = *I;
875 if (MMO.isVolatile() || MMO.isStore())
877 const Value *V = MMO.getValue();
880 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
881 if (!PSV->isConstant(mf_->getFrameInfo()))
883 } else if (!aa_->pointsToConstantMemory(V))
887 // If any of the registers accessed are non-constant, conservatively assume
888 // the instruction is not rematerializable.
890 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
891 const MachineOperand &MO = MI->getOperand(i);
893 unsigned Reg = MO.getReg();
896 if (TargetRegisterInfo::isPhysicalRegister(Reg))
899 // Only allow one def, and that in the first operand.
900 if (MO.isDef() != (i == 0))
903 // Only allow constant-valued registers.
904 bool IsLiveIn = mri_->isLiveIn(Reg);
905 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
908 // For the def, it should be the only def.
909 if (MO.isDef() && (next(I) != E || IsLiveIn))
913 // Only allow one use other register use, as that's all the
914 // remat mechanisms support currently.
918 else if (Reg != ImpUse)
921 // For uses, there should be only one associate def.
922 if (I != E && (next(I) != E || IsLiveIn))
929 unsigned ImpUse = getReMatImplicitUse(li, MI);
931 const LiveInterval &ImpLi = getInterval(ImpUse);
932 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
933 re = mri_->use_end(); ri != re; ++ri) {
934 MachineInstr *UseMI = &*ri;
935 unsigned UseIdx = getInstructionIndex(UseMI);
936 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
938 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
942 // If a register operand of the re-materialized instruction is going to
943 // be spilled next, then it's not legal to re-materialize this instruction.
944 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
945 if (ImpUse == SpillIs[i]->reg)
951 /// isReMaterializable - Returns true if the definition MI of the specified
952 /// val# of the specified interval is re-materializable.
953 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
954 const VNInfo *ValNo, MachineInstr *MI) {
955 SmallVector<LiveInterval*, 4> Dummy1;
957 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
960 /// isReMaterializable - Returns true if every definition of MI of every
961 /// val# of the specified interval is re-materializable.
962 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
963 SmallVectorImpl<LiveInterval*> &SpillIs,
966 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
968 const VNInfo *VNI = *i;
969 unsigned DefIdx = VNI->def;
971 continue; // Dead val#.
972 // Is the def for the val# rematerializable?
975 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
976 bool DefIsLoad = false;
978 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
985 /// FilterFoldedOps - Filter out two-address use operands. Return
986 /// true if it finds any issue with the operands that ought to prevent
988 static bool FilterFoldedOps(MachineInstr *MI,
989 SmallVector<unsigned, 2> &Ops,
991 SmallVector<unsigned, 2> &FoldOps) {
992 const TargetInstrDesc &TID = MI->getDesc();
995 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
996 unsigned OpIdx = Ops[i];
997 MachineOperand &MO = MI->getOperand(OpIdx);
998 // FIXME: fold subreg use.
1002 MRInfo |= (unsigned)VirtRegMap::isMod;
1004 // Filter out two-address use operand(s).
1005 if (!MO.isImplicit() &&
1006 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1007 MRInfo = VirtRegMap::isModRef;
1010 MRInfo |= (unsigned)VirtRegMap::isRef;
1012 FoldOps.push_back(OpIdx);
1018 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1019 /// slot / to reg or any rematerialized load into ith operand of specified
1020 /// MI. If it is successul, MI is updated with the newly created MI and
1022 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1023 VirtRegMap &vrm, MachineInstr *DefMI,
1025 SmallVector<unsigned, 2> &Ops,
1026 bool isSS, int Slot, unsigned Reg) {
1027 // If it is an implicit def instruction, just delete it.
1028 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1029 RemoveMachineInstrFromMaps(MI);
1030 vrm.RemoveMachineInstrFromMaps(MI);
1031 MI->eraseFromParent();
1036 // Filter the list of operand indexes that are to be folded. Abort if
1037 // any operand will prevent folding.
1038 unsigned MRInfo = 0;
1039 SmallVector<unsigned, 2> FoldOps;
1040 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1043 // The only time it's safe to fold into a two address instruction is when
1044 // it's folding reload and spill from / into a spill stack slot.
1045 if (DefMI && (MRInfo & VirtRegMap::isMod))
1048 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1049 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1051 // Remember this instruction uses the spill slot.
1052 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1054 // Attempt to fold the memory reference into the instruction. If
1055 // we can do this, we don't need to insert spill code.
1056 MachineBasicBlock &MBB = *MI->getParent();
1057 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1058 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1059 vrm.transferSpillPts(MI, fmi);
1060 vrm.transferRestorePts(MI, fmi);
1061 vrm.transferEmergencySpills(MI, fmi);
1063 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1064 mi2iMap_[fmi] = InstrIdx;
1065 MI = MBB.insert(MBB.erase(MI), fmi);
1072 /// canFoldMemoryOperand - Returns true if the specified load / store
1073 /// folding is possible.
1074 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1075 SmallVector<unsigned, 2> &Ops,
1077 // Filter the list of operand indexes that are to be folded. Abort if
1078 // any operand will prevent folding.
1079 unsigned MRInfo = 0;
1080 SmallVector<unsigned, 2> FoldOps;
1081 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1084 // It's only legal to remat for a use, not a def.
1085 if (ReMat && (MRInfo & VirtRegMap::isMod))
1088 return tii_->canFoldMemoryOperand(MI, FoldOps);
1091 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1092 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1093 for (LiveInterval::Ranges::const_iterator
1094 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1095 std::vector<IdxMBBPair>::const_iterator II =
1096 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1097 if (II == Idx2MBBMap.end())
1099 if (I->end > II->first) // crossing a MBB.
1101 MBBs.insert(II->second);
1102 if (MBBs.size() > 1)
1108 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1109 /// interval on to-be re-materialized operands of MI) with new register.
1110 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1111 MachineInstr *MI, unsigned NewVReg,
1113 // There is an implicit use. That means one of the other operand is
1114 // being remat'ed and the remat'ed instruction has li.reg as an
1115 // use operand. Make sure we rewrite that as well.
1116 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1117 MachineOperand &MO = MI->getOperand(i);
1120 unsigned Reg = MO.getReg();
1121 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1123 if (!vrm.isReMaterialized(Reg))
1125 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1126 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1128 UseMO->setReg(NewVReg);
1132 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1133 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1134 bool LiveIntervals::
1135 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1136 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1137 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1138 unsigned Slot, int LdSlot,
1139 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1141 const TargetRegisterClass* rc,
1142 SmallVector<int, 4> &ReMatIds,
1143 const MachineLoopInfo *loopInfo,
1144 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1145 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1146 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1147 MachineBasicBlock *MBB = MI->getParent();
1148 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1149 bool CanFold = false;
1151 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1152 MachineOperand& mop = MI->getOperand(i);
1155 unsigned Reg = mop.getReg();
1156 unsigned RegI = Reg;
1157 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1162 bool TryFold = !DefIsReMat;
1163 bool FoldSS = true; // Default behavior unless it's a remat.
1164 int FoldSlot = Slot;
1166 // If this is the rematerializable definition MI itself and
1167 // all of its uses are rematerialized, simply delete it.
1168 if (MI == ReMatOrigDefMI && CanDelete) {
1169 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1171 RemoveMachineInstrFromMaps(MI);
1172 vrm.RemoveMachineInstrFromMaps(MI);
1173 MI->eraseFromParent();
1177 // If def for this use can't be rematerialized, then try folding.
1178 // If def is rematerializable and it's a load, also try folding.
1179 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1181 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1187 // Scan all of the operands of this instruction rewriting operands
1188 // to use NewVReg instead of li.reg as appropriate. We do this for
1191 // 1. If the instr reads the same spilled vreg multiple times, we
1192 // want to reuse the NewVReg.
1193 // 2. If the instr is a two-addr instruction, we are required to
1194 // keep the src/dst regs pinned.
1196 // Keep track of whether we replace a use and/or def so that we can
1197 // create the spill interval with the appropriate range.
1199 HasUse = mop.isUse();
1200 HasDef = mop.isDef();
1201 SmallVector<unsigned, 2> Ops;
1203 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1204 const MachineOperand &MOj = MI->getOperand(j);
1207 unsigned RegJ = MOj.getReg();
1208 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1212 HasUse |= MOj.isUse();
1213 HasDef |= MOj.isDef();
1217 if (HasUse && !li.liveAt(getUseIndex(index)))
1218 // Must be defined by an implicit def. It should not be spilled. Note,
1219 // this is for correctness reason. e.g.
1220 // 8 %reg1024<def> = IMPLICIT_DEF
1221 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1222 // The live range [12, 14) are not part of the r1024 live interval since
1223 // it's defined by an implicit def. It will not conflicts with live
1224 // interval of r1025. Now suppose both registers are spilled, you can
1225 // easily see a situation where both registers are reloaded before
1226 // the INSERT_SUBREG and both target registers that would overlap.
1229 // Update stack slot spill weight if we are splitting.
1230 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1234 // Create a new virtual register for the spill interval.
1235 // Create the new register now so we can map the fold instruction
1236 // to the new register so when it is unfolded we get the correct
1238 bool CreatedNewVReg = false;
1240 NewVReg = mri_->createVirtualRegister(rc);
1242 CreatedNewVReg = true;
1248 // Do not fold load / store here if we are splitting. We'll find an
1249 // optimal point to insert a load / store later.
1251 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1252 Ops, FoldSS, FoldSlot, NewVReg)) {
1253 // Folding the load/store can completely change the instruction in
1254 // unpredictable ways, rescan it from the beginning.
1257 // We need to give the new vreg the same stack slot as the
1258 // spilled interval.
1259 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1265 if (isRemoved(MI)) {
1269 goto RestartInstruction;
1272 // We'll try to fold it later if it's profitable.
1273 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1277 mop.setReg(NewVReg);
1278 if (mop.isImplicit())
1279 rewriteImplicitOps(li, MI, NewVReg, vrm);
1281 // Reuse NewVReg for other reads.
1282 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1283 MachineOperand &mopj = MI->getOperand(Ops[j]);
1284 mopj.setReg(NewVReg);
1285 if (mopj.isImplicit())
1286 rewriteImplicitOps(li, MI, NewVReg, vrm);
1289 if (CreatedNewVReg) {
1291 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1292 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1293 // Each valnum may have its own remat id.
1294 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1296 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1298 if (!CanDelete || (HasUse && HasDef)) {
1299 // If this is a two-addr instruction then its use operands are
1300 // rematerializable but its def is not. It should be assigned a
1302 vrm.assignVirt2StackSlot(NewVReg, Slot);
1305 vrm.assignVirt2StackSlot(NewVReg, Slot);
1307 } else if (HasUse && HasDef &&
1308 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1309 // If this interval hasn't been assigned a stack slot (because earlier
1310 // def is a deleted remat def), do it now.
1311 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1312 vrm.assignVirt2StackSlot(NewVReg, Slot);
1315 // Re-matting an instruction with virtual register use. Add the
1316 // register as an implicit use on the use MI.
1317 if (DefIsReMat && ImpUse)
1318 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1320 // create a new register interval for this spill / remat.
1321 LiveInterval &nI = getOrCreateInterval(NewVReg);
1322 if (CreatedNewVReg) {
1323 NewLIs.push_back(&nI);
1324 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1326 vrm.setIsSplitFromReg(NewVReg, li.reg);
1330 if (CreatedNewVReg) {
1331 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1332 nI.getNextValue(~0U, 0, VNInfoAllocator));
1336 // Extend the split live interval to this def / use.
1337 unsigned End = getUseIndex(index)+1;
1338 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1339 nI.getValNumInfo(nI.getNumValNums()-1));
1345 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1346 nI.getNextValue(~0U, 0, VNInfoAllocator));
1351 DOUT << "\t\t\t\tAdded new interval: ";
1352 nI.print(DOUT, tri_);
1357 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1359 MachineBasicBlock *MBB, unsigned Idx) const {
1360 unsigned End = getMBBEndIdx(MBB);
1361 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1362 unsigned KillIdx = VNI->kills[j];
1363 if (KillIdx > Idx && KillIdx < End)
1369 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1370 /// during spilling.
1372 struct RewriteInfo {
1377 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1378 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1381 struct RewriteInfoCompare {
1382 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1383 return LHS.Index < RHS.Index;
1388 void LiveIntervals::
1389 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1390 LiveInterval::Ranges::const_iterator &I,
1391 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1392 unsigned Slot, int LdSlot,
1393 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1395 const TargetRegisterClass* rc,
1396 SmallVector<int, 4> &ReMatIds,
1397 const MachineLoopInfo *loopInfo,
1398 BitVector &SpillMBBs,
1399 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1400 BitVector &RestoreMBBs,
1401 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1402 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1403 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1404 bool AllCanFold = true;
1405 unsigned NewVReg = 0;
1406 unsigned start = getBaseIndex(I->start);
1407 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1409 // First collect all the def / use in this live range that will be rewritten.
1410 // Make sure they are sorted according to instruction index.
1411 std::vector<RewriteInfo> RewriteMIs;
1412 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1413 re = mri_->reg_end(); ri != re; ) {
1414 MachineInstr *MI = &*ri;
1415 MachineOperand &O = ri.getOperand();
1417 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1418 unsigned index = getInstructionIndex(MI);
1419 if (index < start || index >= end)
1421 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1422 // Must be defined by an implicit def. It should not be spilled. Note,
1423 // this is for correctness reason. e.g.
1424 // 8 %reg1024<def> = IMPLICIT_DEF
1425 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1426 // The live range [12, 14) are not part of the r1024 live interval since
1427 // it's defined by an implicit def. It will not conflicts with live
1428 // interval of r1025. Now suppose both registers are spilled, you can
1429 // easily see a situation where both registers are reloaded before
1430 // the INSERT_SUBREG and both target registers that would overlap.
1432 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1434 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1436 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1437 // Now rewrite the defs and uses.
1438 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1439 RewriteInfo &rwi = RewriteMIs[i];
1441 unsigned index = rwi.Index;
1442 bool MIHasUse = rwi.HasUse;
1443 bool MIHasDef = rwi.HasDef;
1444 MachineInstr *MI = rwi.MI;
1445 // If MI def and/or use the same register multiple times, then there
1446 // are multiple entries.
1447 unsigned NumUses = MIHasUse;
1448 while (i != e && RewriteMIs[i].MI == MI) {
1449 assert(RewriteMIs[i].Index == index);
1450 bool isUse = RewriteMIs[i].HasUse;
1451 if (isUse) ++NumUses;
1453 MIHasDef |= RewriteMIs[i].HasDef;
1456 MachineBasicBlock *MBB = MI->getParent();
1458 if (ImpUse && MI != ReMatDefMI) {
1459 // Re-matting an instruction with virtual register use. Update the
1460 // register interval's spill weight to HUGE_VALF to prevent it from
1462 LiveInterval &ImpLi = getInterval(ImpUse);
1463 ImpLi.weight = HUGE_VALF;
1466 unsigned MBBId = MBB->getNumber();
1467 unsigned ThisVReg = 0;
1469 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1470 if (NVI != MBBVRegsMap.end()) {
1471 ThisVReg = NVI->second;
1478 // It's better to start a new interval to avoid artifically
1479 // extend the new interval.
1480 if (MIHasDef && !MIHasUse) {
1481 MBBVRegsMap.erase(MBB->getNumber());
1487 bool IsNew = ThisVReg == 0;
1489 // This ends the previous live interval. If all of its def / use
1490 // can be folded, give it a low spill weight.
1491 if (NewVReg && TrySplit && AllCanFold) {
1492 LiveInterval &nI = getOrCreateInterval(NewVReg);
1499 bool HasDef = false;
1500 bool HasUse = false;
1501 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1502 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1503 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1504 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1505 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1506 if (!HasDef && !HasUse)
1509 AllCanFold &= CanFold;
1511 // Update weight of spill interval.
1512 LiveInterval &nI = getOrCreateInterval(NewVReg);
1514 // The spill weight is now infinity as it cannot be spilled again.
1515 nI.weight = HUGE_VALF;
1519 // Keep track of the last def and first use in each MBB.
1521 if (MI != ReMatOrigDefMI || !CanDelete) {
1522 bool HasKill = false;
1524 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1526 // If this is a two-address code, then this index starts a new VNInfo.
1527 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1529 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1531 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1532 SpillIdxes.find(MBBId);
1534 if (SII == SpillIdxes.end()) {
1535 std::vector<SRInfo> S;
1536 S.push_back(SRInfo(index, NewVReg, true));
1537 SpillIdxes.insert(std::make_pair(MBBId, S));
1538 } else if (SII->second.back().vreg != NewVReg) {
1539 SII->second.push_back(SRInfo(index, NewVReg, true));
1540 } else if ((int)index > SII->second.back().index) {
1541 // If there is an earlier def and this is a two-address
1542 // instruction, then it's not possible to fold the store (which
1543 // would also fold the load).
1544 SRInfo &Info = SII->second.back();
1546 Info.canFold = !HasUse;
1548 SpillMBBs.set(MBBId);
1549 } else if (SII != SpillIdxes.end() &&
1550 SII->second.back().vreg == NewVReg &&
1551 (int)index > SII->second.back().index) {
1552 // There is an earlier def that's not killed (must be two-address).
1553 // The spill is no longer needed.
1554 SII->second.pop_back();
1555 if (SII->second.empty()) {
1556 SpillIdxes.erase(MBBId);
1557 SpillMBBs.reset(MBBId);
1564 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1565 SpillIdxes.find(MBBId);
1566 if (SII != SpillIdxes.end() &&
1567 SII->second.back().vreg == NewVReg &&
1568 (int)index > SII->second.back().index)
1569 // Use(s) following the last def, it's not safe to fold the spill.
1570 SII->second.back().canFold = false;
1571 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1572 RestoreIdxes.find(MBBId);
1573 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1574 // If we are splitting live intervals, only fold if it's the first
1575 // use and there isn't another use later in the MBB.
1576 RII->second.back().canFold = false;
1578 // Only need a reload if there isn't an earlier def / use.
1579 if (RII == RestoreIdxes.end()) {
1580 std::vector<SRInfo> Infos;
1581 Infos.push_back(SRInfo(index, NewVReg, true));
1582 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1584 RII->second.push_back(SRInfo(index, NewVReg, true));
1586 RestoreMBBs.set(MBBId);
1590 // Update spill weight.
1591 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1592 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1595 if (NewVReg && TrySplit && AllCanFold) {
1596 // If all of its def / use can be folded, give it a low spill weight.
1597 LiveInterval &nI = getOrCreateInterval(NewVReg);
1602 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1603 BitVector &RestoreMBBs,
1604 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1605 if (!RestoreMBBs[Id])
1607 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1608 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1609 if (Restores[i].index == index &&
1610 Restores[i].vreg == vr &&
1611 Restores[i].canFold)
1616 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1617 BitVector &RestoreMBBs,
1618 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1619 if (!RestoreMBBs[Id])
1621 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1622 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1623 if (Restores[i].index == index && Restores[i].vreg)
1624 Restores[i].index = -1;
1627 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1628 /// spilled and create empty intervals for their uses.
1630 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1631 const TargetRegisterClass* rc,
1632 std::vector<LiveInterval*> &NewLIs) {
1633 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1634 re = mri_->reg_end(); ri != re; ) {
1635 MachineOperand &O = ri.getOperand();
1636 MachineInstr *MI = &*ri;
1639 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1640 "Register def was not rewritten?");
1641 RemoveMachineInstrFromMaps(MI);
1642 vrm.RemoveMachineInstrFromMaps(MI);
1643 MI->eraseFromParent();
1645 // This must be an use of an implicit_def so it's not part of the live
1646 // interval. Create a new empty live interval for it.
1647 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1648 unsigned NewVReg = mri_->createVirtualRegister(rc);
1650 vrm.setIsImplicitlyDefined(NewVReg);
1651 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1652 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1653 MachineOperand &MO = MI->getOperand(i);
1654 if (MO.isReg() && MO.getReg() == li.reg)
1663 bool operator()(LiveInterval* A, LiveInterval* B) {
1664 return A->beginNumber() < B->beginNumber();
1669 std::vector<LiveInterval*> LiveIntervals::
1670 addIntervalsForSpillsFast(const LiveInterval &li,
1671 const MachineLoopInfo *loopInfo,
1672 VirtRegMap &vrm, float& SSWeight) {
1673 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1675 std::vector<LiveInterval*> added;
1677 assert(li.weight != HUGE_VALF &&
1678 "attempt to spill already spilled interval!");
1680 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1684 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1688 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1689 while (RI != mri_->reg_end()) {
1690 MachineInstr* MI = &*RI;
1692 SmallVector<unsigned, 2> Indices;
1693 bool HasUse = false;
1694 bool HasDef = false;
1696 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1697 MachineOperand& mop = MI->getOperand(i);
1698 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1700 HasUse |= MI->getOperand(i).isUse();
1701 HasDef |= MI->getOperand(i).isDef();
1703 Indices.push_back(i);
1706 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1707 Indices, true, slot, li.reg)) {
1708 unsigned NewVReg = mri_->createVirtualRegister(rc);
1710 vrm.assignVirt2StackSlot(NewVReg, slot);
1712 // create a new register for this spill
1713 LiveInterval &nI = getOrCreateInterval(NewVReg);
1715 // the spill weight is now infinity as it
1716 // cannot be spilled again
1717 nI.weight = HUGE_VALF;
1719 // Rewrite register operands to use the new vreg.
1720 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1721 E = Indices.end(); I != E; ++I) {
1722 MI->getOperand(*I).setReg(NewVReg);
1724 if (MI->getOperand(*I).isUse())
1725 MI->getOperand(*I).setIsKill(true);
1728 // Fill in the new live interval.
1729 unsigned index = getInstructionIndex(MI);
1731 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1732 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1735 vrm.addRestorePoint(NewVReg, MI);
1738 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1739 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1742 vrm.addSpillPoint(NewVReg, true, MI);
1745 added.push_back(&nI);
1747 DOUT << "\t\t\t\tadded new interval: ";
1751 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1754 SSWeight += getSpillWeight(true, true, loopDepth);
1756 SSWeight += getSpillWeight(false, true, loopDepth);
1758 SSWeight += getSpillWeight(true, false, loopDepth);
1762 RI = mri_->reg_begin(li.reg);
1765 // Clients expect the new intervals to be returned in sorted order.
1766 std::sort(added.begin(), added.end(), LISorter());
1771 std::vector<LiveInterval*> LiveIntervals::
1772 addIntervalsForSpills(const LiveInterval &li,
1773 SmallVectorImpl<LiveInterval*> &SpillIs,
1774 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1777 if (EnableFastSpilling)
1778 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1780 assert(li.weight != HUGE_VALF &&
1781 "attempt to spill already spilled interval!");
1783 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1784 li.print(DOUT, tri_);
1787 // Spill slot weight.
1790 // Each bit specify whether it a spill is required in the MBB.
1791 BitVector SpillMBBs(mf_->getNumBlockIDs());
1792 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1793 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1794 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1795 DenseMap<unsigned,unsigned> MBBVRegsMap;
1796 std::vector<LiveInterval*> NewLIs;
1797 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1799 unsigned NumValNums = li.getNumValNums();
1800 SmallVector<MachineInstr*, 4> ReMatDefs;
1801 ReMatDefs.resize(NumValNums, NULL);
1802 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1803 ReMatOrigDefs.resize(NumValNums, NULL);
1804 SmallVector<int, 4> ReMatIds;
1805 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1806 BitVector ReMatDelete(NumValNums);
1807 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1809 // Spilling a split live interval. It cannot be split any further. Also,
1810 // it's also guaranteed to be a single val# / range interval.
1811 if (vrm.getPreSplitReg(li.reg)) {
1812 vrm.setIsSplitFromReg(li.reg, 0);
1813 // Unset the split kill marker on the last use.
1814 unsigned KillIdx = vrm.getKillPoint(li.reg);
1816 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1817 assert(KillMI && "Last use disappeared?");
1818 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1819 assert(KillOp != -1 && "Last use disappeared?");
1820 KillMI->getOperand(KillOp).setIsKill(false);
1822 vrm.removeKillPoint(li.reg);
1823 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1824 Slot = vrm.getStackSlot(li.reg);
1825 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1826 MachineInstr *ReMatDefMI = DefIsReMat ?
1827 vrm.getReMaterializedMI(li.reg) : NULL;
1829 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1830 bool isLoad = isLoadSS ||
1831 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1832 bool IsFirstRange = true;
1833 for (LiveInterval::Ranges::const_iterator
1834 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1835 // If this is a split live interval with multiple ranges, it means there
1836 // are two-address instructions that re-defined the value. Only the
1837 // first def can be rematerialized!
1839 // Note ReMatOrigDefMI has already been deleted.
1840 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1841 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1842 false, vrm, rc, ReMatIds, loopInfo,
1843 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1844 MBBVRegsMap, NewLIs, SSWeight);
1846 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1847 Slot, 0, false, false, false,
1848 false, vrm, rc, ReMatIds, loopInfo,
1849 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1850 MBBVRegsMap, NewLIs, SSWeight);
1852 IsFirstRange = false;
1855 SSWeight = 0.0f; // Already accounted for when split.
1856 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1860 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1861 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1865 bool NeedStackSlot = false;
1866 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1868 const VNInfo *VNI = *i;
1869 unsigned VN = VNI->id;
1870 unsigned DefIdx = VNI->def;
1872 continue; // Dead val#.
1873 // Is the def for the val# rematerializable?
1874 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1875 ? 0 : getInstructionFromIndex(DefIdx);
1877 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1878 // Remember how to remat the def of this val#.
1879 ReMatOrigDefs[VN] = ReMatDefMI;
1880 // Original def may be modified so we have to make a copy here.
1881 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1882 ClonedMIs.push_back(Clone);
1883 ReMatDefs[VN] = Clone;
1885 bool CanDelete = true;
1886 if (VNI->hasPHIKill) {
1887 // A kill is a phi node, not all of its uses can be rematerialized.
1888 // It must not be deleted.
1890 // Need a stack slot if there is any live range where uses cannot be
1892 NeedStackSlot = true;
1895 ReMatDelete.set(VN);
1897 // Need a stack slot if there is any live range where uses cannot be
1899 NeedStackSlot = true;
1903 // One stack slot per live interval.
1904 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1905 Slot = vrm.assignVirt2StackSlot(li.reg);
1907 // Create new intervals and rewrite defs and uses.
1908 for (LiveInterval::Ranges::const_iterator
1909 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1910 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1911 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1912 bool DefIsReMat = ReMatDefMI != NULL;
1913 bool CanDelete = ReMatDelete[I->valno->id];
1915 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1916 bool isLoad = isLoadSS ||
1917 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1918 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1919 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1920 CanDelete, vrm, rc, ReMatIds, loopInfo,
1921 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1922 MBBVRegsMap, NewLIs, SSWeight);
1925 // Insert spills / restores if we are splitting.
1927 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1931 SmallPtrSet<LiveInterval*, 4> AddedKill;
1932 SmallVector<unsigned, 2> Ops;
1933 if (NeedStackSlot) {
1934 int Id = SpillMBBs.find_first();
1936 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1937 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1938 std::vector<SRInfo> &spills = SpillIdxes[Id];
1939 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1940 int index = spills[i].index;
1941 unsigned VReg = spills[i].vreg;
1942 LiveInterval &nI = getOrCreateInterval(VReg);
1943 bool isReMat = vrm.isReMaterialized(VReg);
1944 MachineInstr *MI = getInstructionFromIndex(index);
1945 bool CanFold = false;
1946 bool FoundUse = false;
1948 if (spills[i].canFold) {
1950 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1951 MachineOperand &MO = MI->getOperand(j);
1952 if (!MO.isReg() || MO.getReg() != VReg)
1959 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1960 RestoreMBBs, RestoreIdxes))) {
1961 // MI has two-address uses of the same register. If the use
1962 // isn't the first and only use in the BB, then we can't fold
1963 // it. FIXME: Move this to rewriteInstructionsForSpills.
1970 // Fold the store into the def if possible.
1971 bool Folded = false;
1972 if (CanFold && !Ops.empty()) {
1973 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1976 // Also folded uses, do not issue a load.
1977 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1978 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1980 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1984 // Otherwise tell the spiller to issue a spill.
1986 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1987 bool isKill = LR->end == getStoreIndex(index);
1988 if (!MI->registerDefIsDead(nI.reg))
1989 // No need to spill a dead def.
1990 vrm.addSpillPoint(VReg, isKill, MI);
1992 AddedKill.insert(&nI);
1995 // Update spill slot weight.
1997 SSWeight += getSpillWeight(true, false, loopDepth);
1999 Id = SpillMBBs.find_next(Id);
2003 int Id = RestoreMBBs.find_first();
2005 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2006 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2008 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2009 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2010 int index = restores[i].index;
2013 unsigned VReg = restores[i].vreg;
2014 LiveInterval &nI = getOrCreateInterval(VReg);
2015 bool isReMat = vrm.isReMaterialized(VReg);
2016 MachineInstr *MI = getInstructionFromIndex(index);
2017 bool CanFold = false;
2019 if (restores[i].canFold) {
2021 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2022 MachineOperand &MO = MI->getOperand(j);
2023 if (!MO.isReg() || MO.getReg() != VReg)
2027 // If this restore were to be folded, it would have been folded
2036 // Fold the load into the use if possible.
2037 bool Folded = false;
2038 if (CanFold && !Ops.empty()) {
2040 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2042 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2044 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2045 // If the rematerializable def is a load, also try to fold it.
2046 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2047 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2048 Ops, isLoadSS, LdSlot, VReg);
2049 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2051 // Re-matting an instruction with virtual register use. Add the
2052 // register as an implicit use on the use MI and update the register
2053 // interval's spill weight to HUGE_VALF to prevent it from being
2055 LiveInterval &ImpLi = getInterval(ImpUse);
2056 ImpLi.weight = HUGE_VALF;
2057 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2061 // If folding is not possible / failed, then tell the spiller to issue a
2062 // load / rematerialization for us.
2064 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2066 vrm.addRestorePoint(VReg, MI);
2068 // Update spill slot weight.
2070 SSWeight += getSpillWeight(false, true, loopDepth);
2072 Id = RestoreMBBs.find_next(Id);
2075 // Finalize intervals: add kills, finalize spill weights, and filter out
2077 std::vector<LiveInterval*> RetNewLIs;
2078 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2079 LiveInterval *LI = NewLIs[i];
2081 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2082 if (!AddedKill.count(LI)) {
2083 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2084 unsigned LastUseIdx = getBaseIndex(LR->end);
2085 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2086 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2087 assert(UseIdx != -1);
2088 if (LastUse->getOperand(UseIdx).isImplicit() ||
2089 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2090 LastUse->getOperand(UseIdx).setIsKill();
2091 vrm.addKillPoint(LI->reg, LastUseIdx);
2094 RetNewLIs.push_back(LI);
2098 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2102 /// hasAllocatableSuperReg - Return true if the specified physical register has
2103 /// any super register that's allocatable.
2104 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2105 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2106 if (allocatableRegs_[*AS] && hasInterval(*AS))
2111 /// getRepresentativeReg - Find the largest super register of the specified
2112 /// physical register.
2113 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2114 // Find the largest super-register that is allocatable.
2115 unsigned BestReg = Reg;
2116 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2117 unsigned SuperReg = *AS;
2118 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2126 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2127 /// specified interval that conflicts with the specified physical register.
2128 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2129 unsigned PhysReg) const {
2130 unsigned NumConflicts = 0;
2131 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2132 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2133 E = mri_->reg_end(); I != E; ++I) {
2134 MachineOperand &O = I.getOperand();
2135 MachineInstr *MI = O.getParent();
2136 unsigned Index = getInstructionIndex(MI);
2137 if (pli.liveAt(Index))
2140 return NumConflicts;
2143 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2144 /// around all defs and uses of the specified interval.
2145 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2146 unsigned PhysReg, VirtRegMap &vrm) {
2147 unsigned SpillReg = getRepresentativeReg(PhysReg);
2149 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2150 // If there are registers which alias PhysReg, but which are not a
2151 // sub-register of the chosen representative super register. Assert
2152 // since we can't handle it yet.
2153 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2154 tri_->isSuperRegister(*AS, SpillReg));
2156 LiveInterval &pli = getInterval(SpillReg);
2157 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2158 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2159 E = mri_->reg_end(); I != E; ++I) {
2160 MachineOperand &O = I.getOperand();
2161 MachineInstr *MI = O.getParent();
2162 if (SeenMIs.count(MI))
2165 unsigned Index = getInstructionIndex(MI);
2166 if (pli.liveAt(Index)) {
2167 vrm.addEmergencySpill(SpillReg, MI);
2168 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2169 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2170 if (!hasInterval(*AS))
2172 LiveInterval &spli = getInterval(*AS);
2173 if (spli.liveAt(Index))
2174 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2180 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2181 MachineInstr* startInst) {
2182 LiveInterval& Interval = getOrCreateInterval(reg);
2183 VNInfo* VN = Interval.getNextValue(
2184 getInstructionIndex(startInst) + InstrSlots::DEF,
2185 startInst, getVNInfoAllocator());
2186 VN->hasPHIKill = true;
2187 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2188 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2189 getMBBEndIdx(startInst->getParent()) + 1, VN);
2190 Interval.addRange(LR);