1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
352 if (MO.isUse() && !CheckUse)
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
357 if (tri_->isSubRegister(Reg, PhysReg))
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
403 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
405 // Earlyclobbers move back one.
406 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
408 assert(ValNo->id == 0 && "First value in interval is not 0?");
410 // Loop over all of the blocks that the vreg is defined in. There are
411 // two cases we have to handle here. The most common case is a vreg
412 // whose lifetime is contained within a basic block. In this case there
413 // will be a single kill, in MBB, which comes after the definition.
414 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
415 // FIXME: what about dead vars?
417 if (vi.Kills[0] != mi)
418 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
420 killIdx = defIndex+1;
422 // If the kill happens after the definition, we have an intra-block
424 if (killIdx > defIndex) {
425 assert(vi.AliveBlocks.none() &&
426 "Shouldn't be alive across any blocks!");
427 LiveRange LR(defIndex, killIdx, ValNo);
428 interval.addRange(LR);
429 DOUT << " +" << LR << "\n";
430 interval.addKill(ValNo, killIdx);
435 // The other case we handle is when a virtual register lives to the end
436 // of the defining block, potentially live across some blocks, then is
437 // live into some number of blocks, but gets killed. Start by adding a
438 // range that goes from this definition to the end of the defining block.
439 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
440 DOUT << " +" << NewLR;
441 interval.addRange(NewLR);
443 // Iterate over all of the blocks that the variable is completely
444 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
446 for (int i = vi.AliveBlocks.find_first(); i != -1;
447 i = vi.AliveBlocks.find_next(i)) {
448 LiveRange LR(getMBBStartIdx(i),
449 getMBBEndIdx(i)+1, // MBB ends at -1.
451 interval.addRange(LR);
455 // Finally, this virtual register is live from the start of any killing
456 // block to the 'use' slot of the killing instruction.
457 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
458 MachineInstr *Kill = vi.Kills[i];
459 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
460 LiveRange LR(getMBBStartIdx(Kill->getParent()),
462 interval.addRange(LR);
463 interval.addKill(ValNo, killIdx);
468 // If this is the second time we see a virtual register definition, it
469 // must be due to phi elimination or two addr elimination. If this is
470 // the result of two address elimination, then the vreg is one of the
471 // def-and-use register operand.
472 if (mi->isRegTiedToUseOperand(MOIdx)) {
473 // If this is a two-address definition, then we have already processed
474 // the live range. The only problem is that we didn't realize there
475 // are actually two values in the live interval. Because of this we
476 // need to take the LiveRegion that defines this register and split it
478 assert(interval.containsOneValue());
479 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
480 unsigned RedefIndex = getDefIndex(MIIdx);
481 if (MO.isEarlyClobber())
482 RedefIndex = getUseIndex(MIIdx);
484 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
485 VNInfo *OldValNo = OldLR->valno;
487 // Delete the initial value, which should be short and continuous,
488 // because the 2-addr copy must be in the same MBB as the redef.
489 interval.removeRange(DefIndex, RedefIndex);
491 // Two-address vregs should always only be redefined once. This means
492 // that at this point, there should be exactly one value number in it.
493 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
495 // The new value number (#1) is defined by the instruction we claimed
497 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
500 // Value#0 is now defined by the 2-addr instruction.
501 OldValNo->def = RedefIndex;
503 if (MO.isEarlyClobber())
504 OldValNo->redefByEC = true;
506 // Add the new live interval which replaces the range for the input copy.
507 LiveRange LR(DefIndex, RedefIndex, ValNo);
508 DOUT << " replace range with " << LR;
509 interval.addRange(LR);
510 interval.addKill(ValNo, RedefIndex);
512 // If this redefinition is dead, we need to add a dummy unit live
513 // range covering the def slot.
515 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
518 interval.print(DOUT, tri_);
521 // Otherwise, this must be because of phi elimination. If this is the
522 // first redefinition of the vreg that we have seen, go back and change
523 // the live range in the PHI block to be a different value number.
524 if (interval.containsOneValue()) {
525 assert(vi.Kills.size() == 1 &&
526 "PHI elimination vreg should have one kill, the PHI itself!");
528 // Remove the old range that we now know has an incorrect number.
529 VNInfo *VNI = interval.getValNumInfo(0);
530 MachineInstr *Killer = vi.Kills[0];
531 unsigned Start = getMBBStartIdx(Killer->getParent());
532 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
533 DOUT << " Removing [" << Start << "," << End << "] from: ";
534 interval.print(DOUT, tri_); DOUT << "\n";
535 interval.removeRange(Start, End);
536 VNI->hasPHIKill = true;
537 DOUT << " RESULT: "; interval.print(DOUT, tri_);
539 // Replace the interval with one of a NEW value number. Note that this
540 // value number isn't actually defined by an instruction, weird huh? :)
541 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
542 DOUT << " replace range with " << LR;
543 interval.addRange(LR);
544 interval.addKill(LR.valno, End);
545 DOUT << " RESULT: "; interval.print(DOUT, tri_);
548 // In the case of PHI elimination, each variable definition is only
549 // live until the end of the block. We've already taken care of the
550 // rest of the live range.
551 unsigned defIndex = getDefIndex(MIIdx);
552 if (MO.isEarlyClobber())
553 defIndex = getUseIndex(MIIdx);
556 MachineInstr *CopyMI = NULL;
557 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
558 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
559 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
560 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
561 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
563 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
565 unsigned killIndex = getMBBEndIdx(mbb) + 1;
566 LiveRange LR(defIndex, killIndex, ValNo);
567 interval.addRange(LR);
568 interval.addKill(ValNo, killIndex);
569 ValNo->hasPHIKill = true;
577 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
578 MachineBasicBlock::iterator mi,
581 LiveInterval &interval,
582 MachineInstr *CopyMI) {
583 // A physical register cannot be live across basic block, so its
584 // lifetime must end somewhere in its defining basic block.
585 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
587 unsigned baseIndex = MIIdx;
588 unsigned start = getDefIndex(baseIndex);
589 // Earlyclobbers move back one.
590 if (MO.isEarlyClobber())
591 start = getUseIndex(MIIdx);
592 unsigned end = start;
594 // If it is not used after definition, it is considered dead at
595 // the instruction defining it. Hence its interval is:
596 // [defSlot(def), defSlot(def)+1)
603 // If it is not dead on definition, it must be killed by a
604 // subsequent instruction. Hence its interval is:
605 // [defSlot(def), useSlot(kill)+1)
606 baseIndex += InstrSlots::NUM;
607 while (++mi != MBB->end()) {
608 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
609 getInstructionFromIndex(baseIndex) == 0)
610 baseIndex += InstrSlots::NUM;
611 if (mi->killsRegister(interval.reg, tri_)) {
613 end = getUseIndex(baseIndex) + 1;
615 } else if (mi->modifiesRegister(interval.reg, tri_)) {
616 // Another instruction redefines the register before it is ever read.
617 // Then the register is essentially dead at the instruction that defines
618 // it. Hence its interval is:
619 // [defSlot(def), defSlot(def)+1)
625 baseIndex += InstrSlots::NUM;
628 // The only case we should have a dead physreg here without a killing or
629 // instruction where we know it's dead is if it is live-in to the function
630 // and never used. Another possible case is the implicit use of the
631 // physical register has been deleted by two-address pass.
635 assert(start < end && "did not find end of interval?");
637 // Already exists? Extend old live interval.
638 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
639 bool Extend = OldLR != interval.end();
640 VNInfo *ValNo = Extend
641 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
642 if (MO.isEarlyClobber() && Extend)
643 ValNo->redefByEC = true;
644 LiveRange LR(start, end, ValNo);
645 interval.addRange(LR);
646 interval.addKill(LR.valno, end);
647 DOUT << " +" << LR << '\n';
650 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
651 MachineBasicBlock::iterator MI,
655 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
656 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
657 getOrCreateInterval(MO.getReg()));
658 else if (allocatableRegs_[MO.getReg()]) {
659 MachineInstr *CopyMI = NULL;
660 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
661 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
662 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
663 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
664 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
666 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
667 getOrCreateInterval(MO.getReg()), CopyMI);
668 // Def of a register also defines its sub-registers.
669 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
670 // If MI also modifies the sub-register explicitly, avoid processing it
671 // more than once. Do not pass in TRI here so it checks for exact match.
672 if (!MI->modifiesRegister(*AS))
673 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
674 getOrCreateInterval(*AS), 0);
678 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
680 LiveInterval &interval, bool isAlias) {
681 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
683 // Look for kills, if it reaches a def before it's killed, then it shouldn't
684 // be considered a livein.
685 MachineBasicBlock::iterator mi = MBB->begin();
686 unsigned baseIndex = MIIdx;
687 unsigned start = baseIndex;
688 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
689 getInstructionFromIndex(baseIndex) == 0)
690 baseIndex += InstrSlots::NUM;
691 unsigned end = baseIndex;
692 bool SeenDefUse = false;
694 while (mi != MBB->end()) {
695 if (mi->killsRegister(interval.reg, tri_)) {
697 end = getUseIndex(baseIndex) + 1;
700 } else if (mi->modifiesRegister(interval.reg, tri_)) {
701 // Another instruction redefines the register before it is ever read.
702 // Then the register is essentially dead at the instruction that defines
703 // it. Hence its interval is:
704 // [defSlot(def), defSlot(def)+1)
706 end = getDefIndex(start) + 1;
711 baseIndex += InstrSlots::NUM;
713 if (mi != MBB->end()) {
714 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
715 getInstructionFromIndex(baseIndex) == 0)
716 baseIndex += InstrSlots::NUM;
721 // Live-in register might not be used at all.
725 end = getDefIndex(MIIdx) + 1;
727 DOUT << " live through";
732 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
733 interval.addRange(LR);
734 interval.addKill(LR.valno, end);
735 DOUT << " +" << LR << '\n';
738 /// computeIntervals - computes the live intervals for virtual
739 /// registers. for some ordering of the machine instructions [1,N] a
740 /// live interval is an interval [i, j) where 1 <= i <= j < N for
741 /// which a variable is live
742 void LiveIntervals::computeIntervals() {
744 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
745 << "********** Function: "
746 << ((Value*)mf_->getFunction())->getName() << '\n';
748 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
750 MachineBasicBlock *MBB = MBBI;
751 // Track the index of the current machine instr.
752 unsigned MIIndex = getMBBStartIdx(MBB);
753 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
755 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
757 // Create intervals for live-ins to this BB first.
758 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
759 LE = MBB->livein_end(); LI != LE; ++LI) {
760 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
761 // Multiple live-ins can alias the same register.
762 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
763 if (!hasInterval(*AS))
764 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
768 // Skip over empty initial indices.
769 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
770 getInstructionFromIndex(MIIndex) == 0)
771 MIIndex += InstrSlots::NUM;
773 for (; MI != miEnd; ++MI) {
774 DOUT << MIIndex << "\t" << *MI;
777 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
778 MachineOperand &MO = MI->getOperand(i);
779 // handle register defs - build intervals
780 if (MO.isReg() && MO.getReg() && MO.isDef()) {
781 handleRegisterDef(MBB, MI, MIIndex, MO, i);
785 // Skip over the empty slots after each instruction.
786 unsigned Slots = MI->getDesc().getNumDefs();
789 MIIndex += InstrSlots::NUM * Slots;
791 // Skip over empty indices.
792 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
793 getInstructionFromIndex(MIIndex) == 0)
794 MIIndex += InstrSlots::NUM;
799 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
800 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
801 std::vector<IdxMBBPair>::const_iterator I =
802 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
805 while (I != Idx2MBBMap.end()) {
808 MBBs.push_back(I->second);
815 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
816 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
817 std::vector<IdxMBBPair>::const_iterator I =
818 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
821 while (I != Idx2MBBMap.end()) {
824 MachineBasicBlock *MBB = I->second;
825 if (getMBBEndIdx(MBB) > End)
827 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
828 SE = MBB->succ_end(); SI != SE; ++SI)
836 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
837 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
838 return new LiveInterval(reg, Weight);
841 /// dupInterval - Duplicate a live interval. The caller is responsible for
842 /// managing the allocated memory.
843 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
844 LiveInterval *NewLI = createInterval(li->reg);
845 NewLI->Copy(*li, getVNInfoAllocator());
849 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
850 /// copy field and returns the source register that defines it.
851 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
855 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
856 // If it's extracting out of a physical register, return the sub-register.
857 unsigned Reg = VNI->copy->getOperand(1).getReg();
858 if (TargetRegisterInfo::isPhysicalRegister(Reg))
859 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
861 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
862 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
863 return VNI->copy->getOperand(2).getReg();
865 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
866 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
868 assert(0 && "Unrecognized copy instruction!");
872 //===----------------------------------------------------------------------===//
873 // Register allocator hooks.
876 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
877 /// allow one) virtual register operand, then its uses are implicitly using
878 /// the register. Returns the virtual register.
879 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
880 MachineInstr *MI) const {
882 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
883 MachineOperand &MO = MI->getOperand(i);
884 if (!MO.isReg() || !MO.isUse())
886 unsigned Reg = MO.getReg();
887 if (Reg == 0 || Reg == li.reg)
889 // FIXME: For now, only remat MI with at most one register operand.
891 "Can't rematerialize instruction with multiple register operand!");
900 /// isValNoAvailableAt - Return true if the val# of the specified interval
901 /// which reaches the given instruction also reaches the specified use index.
902 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
903 unsigned UseIdx) const {
904 unsigned Index = getInstructionIndex(MI);
905 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
906 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
907 return UI != li.end() && UI->valno == ValNo;
910 /// isReMaterializable - Returns true if the definition MI of the specified
911 /// val# of the specified interval is re-materializable.
912 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
913 const VNInfo *ValNo, MachineInstr *MI,
914 SmallVectorImpl<LiveInterval*> &SpillIs,
919 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
923 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
924 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
925 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
926 // this but remember this is not safe to fold into a two-address
928 // This is a load from fixed stack slot. It can be rematerialized.
931 // If the target-specific rules don't identify an instruction as
932 // being trivially rematerializable, use some target-independent
934 if (!MI->getDesc().isRematerializable() ||
935 !tii_->isTriviallyReMaterializable(MI)) {
936 if (!EnableAggressiveRemat)
939 // If the instruction accesses memory but the memoperands have been lost,
940 // we can't analyze it.
941 const TargetInstrDesc &TID = MI->getDesc();
942 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
945 // Avoid instructions obviously unsafe for remat.
946 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
949 // If the instruction accesses memory and the memory could be non-constant,
950 // assume the instruction is not rematerializable.
951 for (std::list<MachineMemOperand>::const_iterator
952 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
953 const MachineMemOperand &MMO = *I;
954 if (MMO.isVolatile() || MMO.isStore())
956 const Value *V = MMO.getValue();
959 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
960 if (!PSV->isConstant(mf_->getFrameInfo()))
962 } else if (!aa_->pointsToConstantMemory(V))
966 // If any of the registers accessed are non-constant, conservatively assume
967 // the instruction is not rematerializable.
969 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
970 const MachineOperand &MO = MI->getOperand(i);
972 unsigned Reg = MO.getReg();
975 if (TargetRegisterInfo::isPhysicalRegister(Reg))
978 // Only allow one def, and that in the first operand.
979 if (MO.isDef() != (i == 0))
982 // Only allow constant-valued registers.
983 bool IsLiveIn = mri_->isLiveIn(Reg);
984 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
987 // For the def, it should be the only def of that register.
988 if (MO.isDef() && (next(I) != E || IsLiveIn))
992 // Only allow one use other register use, as that's all the
993 // remat mechanisms support currently.
997 else if (Reg != ImpUse)
1000 // For the use, there should be only one associated def.
1001 if (I != E && (next(I) != E || IsLiveIn))
1008 unsigned ImpUse = getReMatImplicitUse(li, MI);
1010 const LiveInterval &ImpLi = getInterval(ImpUse);
1011 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1012 re = mri_->use_end(); ri != re; ++ri) {
1013 MachineInstr *UseMI = &*ri;
1014 unsigned UseIdx = getInstructionIndex(UseMI);
1015 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1017 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1021 // If a register operand of the re-materialized instruction is going to
1022 // be spilled next, then it's not legal to re-materialize this instruction.
1023 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1024 if (ImpUse == SpillIs[i]->reg)
1030 /// isReMaterializable - Returns true if the definition MI of the specified
1031 /// val# of the specified interval is re-materializable.
1032 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1033 const VNInfo *ValNo, MachineInstr *MI) {
1034 SmallVector<LiveInterval*, 4> Dummy1;
1036 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1039 /// isReMaterializable - Returns true if every definition of MI of every
1040 /// val# of the specified interval is re-materializable.
1041 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1042 SmallVectorImpl<LiveInterval*> &SpillIs,
1045 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1047 const VNInfo *VNI = *i;
1048 unsigned DefIdx = VNI->def;
1050 continue; // Dead val#.
1051 // Is the def for the val# rematerializable?
1054 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1055 bool DefIsLoad = false;
1057 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1059 isLoad |= DefIsLoad;
1064 /// FilterFoldedOps - Filter out two-address use operands. Return
1065 /// true if it finds any issue with the operands that ought to prevent
1067 static bool FilterFoldedOps(MachineInstr *MI,
1068 SmallVector<unsigned, 2> &Ops,
1070 SmallVector<unsigned, 2> &FoldOps) {
1072 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1073 unsigned OpIdx = Ops[i];
1074 MachineOperand &MO = MI->getOperand(OpIdx);
1075 // FIXME: fold subreg use.
1079 MRInfo |= (unsigned)VirtRegMap::isMod;
1081 // Filter out two-address use operand(s).
1082 if (MI->isRegTiedToDefOperand(OpIdx)) {
1083 MRInfo = VirtRegMap::isModRef;
1086 MRInfo |= (unsigned)VirtRegMap::isRef;
1088 FoldOps.push_back(OpIdx);
1094 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1095 /// slot / to reg or any rematerialized load into ith operand of specified
1096 /// MI. If it is successul, MI is updated with the newly created MI and
1098 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1099 VirtRegMap &vrm, MachineInstr *DefMI,
1101 SmallVector<unsigned, 2> &Ops,
1102 bool isSS, int Slot, unsigned Reg) {
1103 // If it is an implicit def instruction, just delete it.
1104 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1105 RemoveMachineInstrFromMaps(MI);
1106 vrm.RemoveMachineInstrFromMaps(MI);
1107 MI->eraseFromParent();
1112 // Filter the list of operand indexes that are to be folded. Abort if
1113 // any operand will prevent folding.
1114 unsigned MRInfo = 0;
1115 SmallVector<unsigned, 2> FoldOps;
1116 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1119 // The only time it's safe to fold into a two address instruction is when
1120 // it's folding reload and spill from / into a spill stack slot.
1121 if (DefMI && (MRInfo & VirtRegMap::isMod))
1124 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1125 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1127 // Remember this instruction uses the spill slot.
1128 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1130 // Attempt to fold the memory reference into the instruction. If
1131 // we can do this, we don't need to insert spill code.
1132 MachineBasicBlock &MBB = *MI->getParent();
1133 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1134 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1135 vrm.transferSpillPts(MI, fmi);
1136 vrm.transferRestorePts(MI, fmi);
1137 vrm.transferEmergencySpills(MI, fmi);
1139 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1140 mi2iMap_[fmi] = InstrIdx;
1141 MI = MBB.insert(MBB.erase(MI), fmi);
1148 /// canFoldMemoryOperand - Returns true if the specified load / store
1149 /// folding is possible.
1150 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1151 SmallVector<unsigned, 2> &Ops,
1153 // Filter the list of operand indexes that are to be folded. Abort if
1154 // any operand will prevent folding.
1155 unsigned MRInfo = 0;
1156 SmallVector<unsigned, 2> FoldOps;
1157 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1160 // It's only legal to remat for a use, not a def.
1161 if (ReMat && (MRInfo & VirtRegMap::isMod))
1164 return tii_->canFoldMemoryOperand(MI, FoldOps);
1167 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1168 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1169 for (LiveInterval::Ranges::const_iterator
1170 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1171 std::vector<IdxMBBPair>::const_iterator II =
1172 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1173 if (II == Idx2MBBMap.end())
1175 if (I->end > II->first) // crossing a MBB.
1177 MBBs.insert(II->second);
1178 if (MBBs.size() > 1)
1184 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1185 /// interval on to-be re-materialized operands of MI) with new register.
1186 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1187 MachineInstr *MI, unsigned NewVReg,
1189 // There is an implicit use. That means one of the other operand is
1190 // being remat'ed and the remat'ed instruction has li.reg as an
1191 // use operand. Make sure we rewrite that as well.
1192 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1193 MachineOperand &MO = MI->getOperand(i);
1196 unsigned Reg = MO.getReg();
1197 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1199 if (!vrm.isReMaterialized(Reg))
1201 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1202 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1204 UseMO->setReg(NewVReg);
1208 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1209 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1210 bool LiveIntervals::
1211 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1212 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1213 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1214 unsigned Slot, int LdSlot,
1215 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1217 const TargetRegisterClass* rc,
1218 SmallVector<int, 4> &ReMatIds,
1219 const MachineLoopInfo *loopInfo,
1220 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1221 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1222 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1223 MachineBasicBlock *MBB = MI->getParent();
1224 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1225 bool CanFold = false;
1227 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1228 MachineOperand& mop = MI->getOperand(i);
1231 unsigned Reg = mop.getReg();
1232 unsigned RegI = Reg;
1233 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1238 bool TryFold = !DefIsReMat;
1239 bool FoldSS = true; // Default behavior unless it's a remat.
1240 int FoldSlot = Slot;
1242 // If this is the rematerializable definition MI itself and
1243 // all of its uses are rematerialized, simply delete it.
1244 if (MI == ReMatOrigDefMI && CanDelete) {
1245 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1247 RemoveMachineInstrFromMaps(MI);
1248 vrm.RemoveMachineInstrFromMaps(MI);
1249 MI->eraseFromParent();
1253 // If def for this use can't be rematerialized, then try folding.
1254 // If def is rematerializable and it's a load, also try folding.
1255 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1257 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1263 // Scan all of the operands of this instruction rewriting operands
1264 // to use NewVReg instead of li.reg as appropriate. We do this for
1267 // 1. If the instr reads the same spilled vreg multiple times, we
1268 // want to reuse the NewVReg.
1269 // 2. If the instr is a two-addr instruction, we are required to
1270 // keep the src/dst regs pinned.
1272 // Keep track of whether we replace a use and/or def so that we can
1273 // create the spill interval with the appropriate range.
1275 HasUse = mop.isUse();
1276 HasDef = mop.isDef();
1277 SmallVector<unsigned, 2> Ops;
1279 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1280 const MachineOperand &MOj = MI->getOperand(j);
1283 unsigned RegJ = MOj.getReg();
1284 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1288 HasUse |= MOj.isUse();
1289 HasDef |= MOj.isDef();
1293 if (HasUse && !li.liveAt(getUseIndex(index)))
1294 // Must be defined by an implicit def. It should not be spilled. Note,
1295 // this is for correctness reason. e.g.
1296 // 8 %reg1024<def> = IMPLICIT_DEF
1297 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1298 // The live range [12, 14) are not part of the r1024 live interval since
1299 // it's defined by an implicit def. It will not conflicts with live
1300 // interval of r1025. Now suppose both registers are spilled, you can
1301 // easily see a situation where both registers are reloaded before
1302 // the INSERT_SUBREG and both target registers that would overlap.
1305 // Update stack slot spill weight if we are splitting.
1306 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1310 // Create a new virtual register for the spill interval.
1311 // Create the new register now so we can map the fold instruction
1312 // to the new register so when it is unfolded we get the correct
1314 bool CreatedNewVReg = false;
1316 NewVReg = mri_->createVirtualRegister(rc);
1318 CreatedNewVReg = true;
1324 // Do not fold load / store here if we are splitting. We'll find an
1325 // optimal point to insert a load / store later.
1327 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1328 Ops, FoldSS, FoldSlot, NewVReg)) {
1329 // Folding the load/store can completely change the instruction in
1330 // unpredictable ways, rescan it from the beginning.
1333 // We need to give the new vreg the same stack slot as the
1334 // spilled interval.
1335 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1341 if (isNotInMIMap(MI)) {
1345 goto RestartInstruction;
1348 // We'll try to fold it later if it's profitable.
1349 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1353 mop.setReg(NewVReg);
1354 if (mop.isImplicit())
1355 rewriteImplicitOps(li, MI, NewVReg, vrm);
1357 // Reuse NewVReg for other reads.
1358 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1359 MachineOperand &mopj = MI->getOperand(Ops[j]);
1360 mopj.setReg(NewVReg);
1361 if (mopj.isImplicit())
1362 rewriteImplicitOps(li, MI, NewVReg, vrm);
1365 if (CreatedNewVReg) {
1367 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1368 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1369 // Each valnum may have its own remat id.
1370 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1372 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1374 if (!CanDelete || (HasUse && HasDef)) {
1375 // If this is a two-addr instruction then its use operands are
1376 // rematerializable but its def is not. It should be assigned a
1378 vrm.assignVirt2StackSlot(NewVReg, Slot);
1381 vrm.assignVirt2StackSlot(NewVReg, Slot);
1383 } else if (HasUse && HasDef &&
1384 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1385 // If this interval hasn't been assigned a stack slot (because earlier
1386 // def is a deleted remat def), do it now.
1387 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1388 vrm.assignVirt2StackSlot(NewVReg, Slot);
1391 // Re-matting an instruction with virtual register use. Add the
1392 // register as an implicit use on the use MI.
1393 if (DefIsReMat && ImpUse)
1394 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1396 // Create a new register interval for this spill / remat.
1397 LiveInterval &nI = getOrCreateInterval(NewVReg);
1398 if (CreatedNewVReg) {
1399 NewLIs.push_back(&nI);
1400 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1402 vrm.setIsSplitFromReg(NewVReg, li.reg);
1406 if (CreatedNewVReg) {
1407 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1408 nI.getNextValue(~0U, 0, VNInfoAllocator));
1412 // Extend the split live interval to this def / use.
1413 unsigned End = getUseIndex(index)+1;
1414 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1415 nI.getValNumInfo(nI.getNumValNums()-1));
1421 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1422 nI.getNextValue(~0U, 0, VNInfoAllocator));
1427 DOUT << "\t\t\t\tAdded new interval: ";
1428 nI.print(DOUT, tri_);
1433 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1435 MachineBasicBlock *MBB, unsigned Idx) const {
1436 unsigned End = getMBBEndIdx(MBB);
1437 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1438 unsigned KillIdx = VNI->kills[j];
1439 if (KillIdx > Idx && KillIdx < End)
1445 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1446 /// during spilling.
1448 struct RewriteInfo {
1453 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1454 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1457 struct RewriteInfoCompare {
1458 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1459 return LHS.Index < RHS.Index;
1464 void LiveIntervals::
1465 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1466 LiveInterval::Ranges::const_iterator &I,
1467 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1468 unsigned Slot, int LdSlot,
1469 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1471 const TargetRegisterClass* rc,
1472 SmallVector<int, 4> &ReMatIds,
1473 const MachineLoopInfo *loopInfo,
1474 BitVector &SpillMBBs,
1475 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1476 BitVector &RestoreMBBs,
1477 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1478 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1479 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1480 bool AllCanFold = true;
1481 unsigned NewVReg = 0;
1482 unsigned start = getBaseIndex(I->start);
1483 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1485 // First collect all the def / use in this live range that will be rewritten.
1486 // Make sure they are sorted according to instruction index.
1487 std::vector<RewriteInfo> RewriteMIs;
1488 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1489 re = mri_->reg_end(); ri != re; ) {
1490 MachineInstr *MI = &*ri;
1491 MachineOperand &O = ri.getOperand();
1493 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1494 unsigned index = getInstructionIndex(MI);
1495 if (index < start || index >= end)
1497 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1498 // Must be defined by an implicit def. It should not be spilled. Note,
1499 // this is for correctness reason. e.g.
1500 // 8 %reg1024<def> = IMPLICIT_DEF
1501 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1502 // The live range [12, 14) are not part of the r1024 live interval since
1503 // it's defined by an implicit def. It will not conflicts with live
1504 // interval of r1025. Now suppose both registers are spilled, you can
1505 // easily see a situation where both registers are reloaded before
1506 // the INSERT_SUBREG and both target registers that would overlap.
1508 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1510 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1512 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1513 // Now rewrite the defs and uses.
1514 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1515 RewriteInfo &rwi = RewriteMIs[i];
1517 unsigned index = rwi.Index;
1518 bool MIHasUse = rwi.HasUse;
1519 bool MIHasDef = rwi.HasDef;
1520 MachineInstr *MI = rwi.MI;
1521 // If MI def and/or use the same register multiple times, then there
1522 // are multiple entries.
1523 unsigned NumUses = MIHasUse;
1524 while (i != e && RewriteMIs[i].MI == MI) {
1525 assert(RewriteMIs[i].Index == index);
1526 bool isUse = RewriteMIs[i].HasUse;
1527 if (isUse) ++NumUses;
1529 MIHasDef |= RewriteMIs[i].HasDef;
1532 MachineBasicBlock *MBB = MI->getParent();
1534 if (ImpUse && MI != ReMatDefMI) {
1535 // Re-matting an instruction with virtual register use. Update the
1536 // register interval's spill weight to HUGE_VALF to prevent it from
1538 LiveInterval &ImpLi = getInterval(ImpUse);
1539 ImpLi.weight = HUGE_VALF;
1542 unsigned MBBId = MBB->getNumber();
1543 unsigned ThisVReg = 0;
1545 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1546 if (NVI != MBBVRegsMap.end()) {
1547 ThisVReg = NVI->second;
1554 // It's better to start a new interval to avoid artifically
1555 // extend the new interval.
1556 if (MIHasDef && !MIHasUse) {
1557 MBBVRegsMap.erase(MBB->getNumber());
1563 bool IsNew = ThisVReg == 0;
1565 // This ends the previous live interval. If all of its def / use
1566 // can be folded, give it a low spill weight.
1567 if (NewVReg && TrySplit && AllCanFold) {
1568 LiveInterval &nI = getOrCreateInterval(NewVReg);
1575 bool HasDef = false;
1576 bool HasUse = false;
1577 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1578 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1579 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1580 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1581 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1582 if (!HasDef && !HasUse)
1585 AllCanFold &= CanFold;
1587 // Update weight of spill interval.
1588 LiveInterval &nI = getOrCreateInterval(NewVReg);
1590 // The spill weight is now infinity as it cannot be spilled again.
1591 nI.weight = HUGE_VALF;
1595 // Keep track of the last def and first use in each MBB.
1597 if (MI != ReMatOrigDefMI || !CanDelete) {
1598 bool HasKill = false;
1600 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1602 // If this is a two-address code, then this index starts a new VNInfo.
1603 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1605 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1607 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1608 SpillIdxes.find(MBBId);
1610 if (SII == SpillIdxes.end()) {
1611 std::vector<SRInfo> S;
1612 S.push_back(SRInfo(index, NewVReg, true));
1613 SpillIdxes.insert(std::make_pair(MBBId, S));
1614 } else if (SII->second.back().vreg != NewVReg) {
1615 SII->second.push_back(SRInfo(index, NewVReg, true));
1616 } else if ((int)index > SII->second.back().index) {
1617 // If there is an earlier def and this is a two-address
1618 // instruction, then it's not possible to fold the store (which
1619 // would also fold the load).
1620 SRInfo &Info = SII->second.back();
1622 Info.canFold = !HasUse;
1624 SpillMBBs.set(MBBId);
1625 } else if (SII != SpillIdxes.end() &&
1626 SII->second.back().vreg == NewVReg &&
1627 (int)index > SII->second.back().index) {
1628 // There is an earlier def that's not killed (must be two-address).
1629 // The spill is no longer needed.
1630 SII->second.pop_back();
1631 if (SII->second.empty()) {
1632 SpillIdxes.erase(MBBId);
1633 SpillMBBs.reset(MBBId);
1640 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1641 SpillIdxes.find(MBBId);
1642 if (SII != SpillIdxes.end() &&
1643 SII->second.back().vreg == NewVReg &&
1644 (int)index > SII->second.back().index)
1645 // Use(s) following the last def, it's not safe to fold the spill.
1646 SII->second.back().canFold = false;
1647 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1648 RestoreIdxes.find(MBBId);
1649 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1650 // If we are splitting live intervals, only fold if it's the first
1651 // use and there isn't another use later in the MBB.
1652 RII->second.back().canFold = false;
1654 // Only need a reload if there isn't an earlier def / use.
1655 if (RII == RestoreIdxes.end()) {
1656 std::vector<SRInfo> Infos;
1657 Infos.push_back(SRInfo(index, NewVReg, true));
1658 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1660 RII->second.push_back(SRInfo(index, NewVReg, true));
1662 RestoreMBBs.set(MBBId);
1666 // Update spill weight.
1667 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1668 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1671 if (NewVReg && TrySplit && AllCanFold) {
1672 // If all of its def / use can be folded, give it a low spill weight.
1673 LiveInterval &nI = getOrCreateInterval(NewVReg);
1678 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1679 BitVector &RestoreMBBs,
1680 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1681 if (!RestoreMBBs[Id])
1683 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1684 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1685 if (Restores[i].index == index &&
1686 Restores[i].vreg == vr &&
1687 Restores[i].canFold)
1692 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1693 BitVector &RestoreMBBs,
1694 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1695 if (!RestoreMBBs[Id])
1697 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1698 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1699 if (Restores[i].index == index && Restores[i].vreg)
1700 Restores[i].index = -1;
1703 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1704 /// spilled and create empty intervals for their uses.
1706 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1707 const TargetRegisterClass* rc,
1708 std::vector<LiveInterval*> &NewLIs) {
1709 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1710 re = mri_->reg_end(); ri != re; ) {
1711 MachineOperand &O = ri.getOperand();
1712 MachineInstr *MI = &*ri;
1715 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1716 "Register def was not rewritten?");
1717 RemoveMachineInstrFromMaps(MI);
1718 vrm.RemoveMachineInstrFromMaps(MI);
1719 MI->eraseFromParent();
1721 // This must be an use of an implicit_def so it's not part of the live
1722 // interval. Create a new empty live interval for it.
1723 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1724 unsigned NewVReg = mri_->createVirtualRegister(rc);
1726 vrm.setIsImplicitlyDefined(NewVReg);
1727 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1728 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1729 MachineOperand &MO = MI->getOperand(i);
1730 if (MO.isReg() && MO.getReg() == li.reg)
1737 std::vector<LiveInterval*> LiveIntervals::
1738 addIntervalsForSpillsFast(const LiveInterval &li,
1739 const MachineLoopInfo *loopInfo,
1740 VirtRegMap &vrm, float& SSWeight) {
1741 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1743 std::vector<LiveInterval*> added;
1745 assert(li.weight != HUGE_VALF &&
1746 "attempt to spill already spilled interval!");
1748 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1752 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1756 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1757 while (RI != mri_->reg_end()) {
1758 MachineInstr* MI = &*RI;
1760 SmallVector<unsigned, 2> Indices;
1761 bool HasUse = false;
1762 bool HasDef = false;
1764 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1765 MachineOperand& mop = MI->getOperand(i);
1766 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1768 HasUse |= MI->getOperand(i).isUse();
1769 HasDef |= MI->getOperand(i).isDef();
1771 Indices.push_back(i);
1774 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1775 Indices, true, slot, li.reg)) {
1776 unsigned NewVReg = mri_->createVirtualRegister(rc);
1778 vrm.assignVirt2StackSlot(NewVReg, slot);
1780 // create a new register for this spill
1781 LiveInterval &nI = getOrCreateInterval(NewVReg);
1783 // the spill weight is now infinity as it
1784 // cannot be spilled again
1785 nI.weight = HUGE_VALF;
1787 // Rewrite register operands to use the new vreg.
1788 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1789 E = Indices.end(); I != E; ++I) {
1790 MI->getOperand(*I).setReg(NewVReg);
1792 if (MI->getOperand(*I).isUse())
1793 MI->getOperand(*I).setIsKill(true);
1796 // Fill in the new live interval.
1797 unsigned index = getInstructionIndex(MI);
1799 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1800 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1803 vrm.addRestorePoint(NewVReg, MI);
1806 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1807 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1810 vrm.addSpillPoint(NewVReg, true, MI);
1813 added.push_back(&nI);
1815 DOUT << "\t\t\t\tadded new interval: ";
1819 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1822 SSWeight += getSpillWeight(true, true, loopDepth);
1824 SSWeight += getSpillWeight(false, true, loopDepth);
1826 SSWeight += getSpillWeight(true, false, loopDepth);
1830 RI = mri_->reg_begin(li.reg);
1836 std::vector<LiveInterval*> LiveIntervals::
1837 addIntervalsForSpills(const LiveInterval &li,
1838 SmallVectorImpl<LiveInterval*> &SpillIs,
1839 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1842 if (EnableFastSpilling)
1843 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1845 assert(li.weight != HUGE_VALF &&
1846 "attempt to spill already spilled interval!");
1848 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1849 li.print(DOUT, tri_);
1852 // Spill slot weight.
1855 // Each bit specify whether a spill is required in the MBB.
1856 BitVector SpillMBBs(mf_->getNumBlockIDs());
1857 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1858 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1859 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1860 DenseMap<unsigned,unsigned> MBBVRegsMap;
1861 std::vector<LiveInterval*> NewLIs;
1862 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1864 unsigned NumValNums = li.getNumValNums();
1865 SmallVector<MachineInstr*, 4> ReMatDefs;
1866 ReMatDefs.resize(NumValNums, NULL);
1867 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1868 ReMatOrigDefs.resize(NumValNums, NULL);
1869 SmallVector<int, 4> ReMatIds;
1870 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1871 BitVector ReMatDelete(NumValNums);
1872 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1874 // Spilling a split live interval. It cannot be split any further. Also,
1875 // it's also guaranteed to be a single val# / range interval.
1876 if (vrm.getPreSplitReg(li.reg)) {
1877 vrm.setIsSplitFromReg(li.reg, 0);
1878 // Unset the split kill marker on the last use.
1879 unsigned KillIdx = vrm.getKillPoint(li.reg);
1881 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1882 assert(KillMI && "Last use disappeared?");
1883 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1884 assert(KillOp != -1 && "Last use disappeared?");
1885 KillMI->getOperand(KillOp).setIsKill(false);
1887 vrm.removeKillPoint(li.reg);
1888 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1889 Slot = vrm.getStackSlot(li.reg);
1890 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1891 MachineInstr *ReMatDefMI = DefIsReMat ?
1892 vrm.getReMaterializedMI(li.reg) : NULL;
1894 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1895 bool isLoad = isLoadSS ||
1896 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1897 bool IsFirstRange = true;
1898 for (LiveInterval::Ranges::const_iterator
1899 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1900 // If this is a split live interval with multiple ranges, it means there
1901 // are two-address instructions that re-defined the value. Only the
1902 // first def can be rematerialized!
1904 // Note ReMatOrigDefMI has already been deleted.
1905 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1906 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1907 false, vrm, rc, ReMatIds, loopInfo,
1908 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1909 MBBVRegsMap, NewLIs, SSWeight);
1911 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1912 Slot, 0, false, false, false,
1913 false, vrm, rc, ReMatIds, loopInfo,
1914 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1915 MBBVRegsMap, NewLIs, SSWeight);
1917 IsFirstRange = false;
1920 SSWeight = 0.0f; // Already accounted for when split.
1921 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1925 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1926 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1930 bool NeedStackSlot = false;
1931 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1933 const VNInfo *VNI = *i;
1934 unsigned VN = VNI->id;
1935 unsigned DefIdx = VNI->def;
1937 continue; // Dead val#.
1938 // Is the def for the val# rematerializable?
1939 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1940 ? 0 : getInstructionFromIndex(DefIdx);
1942 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1943 // Remember how to remat the def of this val#.
1944 ReMatOrigDefs[VN] = ReMatDefMI;
1945 // Original def may be modified so we have to make a copy here.
1946 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1947 ClonedMIs.push_back(Clone);
1948 ReMatDefs[VN] = Clone;
1950 bool CanDelete = true;
1951 if (VNI->hasPHIKill) {
1952 // A kill is a phi node, not all of its uses can be rematerialized.
1953 // It must not be deleted.
1955 // Need a stack slot if there is any live range where uses cannot be
1957 NeedStackSlot = true;
1960 ReMatDelete.set(VN);
1962 // Need a stack slot if there is any live range where uses cannot be
1964 NeedStackSlot = true;
1968 // One stack slot per live interval.
1969 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1970 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1971 Slot = vrm.assignVirt2StackSlot(li.reg);
1973 // This case only occurs when the prealloc splitter has already assigned
1974 // a stack slot to this vreg.
1976 Slot = vrm.getStackSlot(li.reg);
1979 // Create new intervals and rewrite defs and uses.
1980 for (LiveInterval::Ranges::const_iterator
1981 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1982 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1983 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1984 bool DefIsReMat = ReMatDefMI != NULL;
1985 bool CanDelete = ReMatDelete[I->valno->id];
1987 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1988 bool isLoad = isLoadSS ||
1989 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1990 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1991 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1992 CanDelete, vrm, rc, ReMatIds, loopInfo,
1993 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1994 MBBVRegsMap, NewLIs, SSWeight);
1997 // Insert spills / restores if we are splitting.
1999 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2003 SmallPtrSet<LiveInterval*, 4> AddedKill;
2004 SmallVector<unsigned, 2> Ops;
2005 if (NeedStackSlot) {
2006 int Id = SpillMBBs.find_first();
2008 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2009 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2010 std::vector<SRInfo> &spills = SpillIdxes[Id];
2011 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2012 int index = spills[i].index;
2013 unsigned VReg = spills[i].vreg;
2014 LiveInterval &nI = getOrCreateInterval(VReg);
2015 bool isReMat = vrm.isReMaterialized(VReg);
2016 MachineInstr *MI = getInstructionFromIndex(index);
2017 bool CanFold = false;
2018 bool FoundUse = false;
2020 if (spills[i].canFold) {
2022 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2023 MachineOperand &MO = MI->getOperand(j);
2024 if (!MO.isReg() || MO.getReg() != VReg)
2031 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2032 RestoreMBBs, RestoreIdxes))) {
2033 // MI has two-address uses of the same register. If the use
2034 // isn't the first and only use in the BB, then we can't fold
2035 // it. FIXME: Move this to rewriteInstructionsForSpills.
2042 // Fold the store into the def if possible.
2043 bool Folded = false;
2044 if (CanFold && !Ops.empty()) {
2045 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2048 // Also folded uses, do not issue a load.
2049 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2050 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2052 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2056 // Otherwise tell the spiller to issue a spill.
2058 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2059 bool isKill = LR->end == getStoreIndex(index);
2060 if (!MI->registerDefIsDead(nI.reg))
2061 // No need to spill a dead def.
2062 vrm.addSpillPoint(VReg, isKill, MI);
2064 AddedKill.insert(&nI);
2067 // Update spill slot weight.
2069 SSWeight += getSpillWeight(true, false, loopDepth);
2071 Id = SpillMBBs.find_next(Id);
2075 int Id = RestoreMBBs.find_first();
2077 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2078 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2080 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2081 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2082 int index = restores[i].index;
2085 unsigned VReg = restores[i].vreg;
2086 LiveInterval &nI = getOrCreateInterval(VReg);
2087 bool isReMat = vrm.isReMaterialized(VReg);
2088 MachineInstr *MI = getInstructionFromIndex(index);
2089 bool CanFold = false;
2091 if (restores[i].canFold) {
2093 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2094 MachineOperand &MO = MI->getOperand(j);
2095 if (!MO.isReg() || MO.getReg() != VReg)
2099 // If this restore were to be folded, it would have been folded
2108 // Fold the load into the use if possible.
2109 bool Folded = false;
2110 if (CanFold && !Ops.empty()) {
2112 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2114 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2116 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2117 // If the rematerializable def is a load, also try to fold it.
2118 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2119 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2120 Ops, isLoadSS, LdSlot, VReg);
2122 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2124 // Re-matting an instruction with virtual register use. Add the
2125 // register as an implicit use on the use MI and update the register
2126 // interval's spill weight to HUGE_VALF to prevent it from being
2128 LiveInterval &ImpLi = getInterval(ImpUse);
2129 ImpLi.weight = HUGE_VALF;
2130 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2135 // If folding is not possible / failed, then tell the spiller to issue a
2136 // load / rematerialization for us.
2138 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2140 vrm.addRestorePoint(VReg, MI);
2142 // Update spill slot weight.
2144 SSWeight += getSpillWeight(false, true, loopDepth);
2146 Id = RestoreMBBs.find_next(Id);
2149 // Finalize intervals: add kills, finalize spill weights, and filter out
2151 std::vector<LiveInterval*> RetNewLIs;
2152 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2153 LiveInterval *LI = NewLIs[i];
2155 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2156 if (!AddedKill.count(LI)) {
2157 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2158 unsigned LastUseIdx = getBaseIndex(LR->end);
2159 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2160 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2161 assert(UseIdx != -1);
2162 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2163 LastUse->getOperand(UseIdx).setIsKill();
2164 vrm.addKillPoint(LI->reg, LastUseIdx);
2167 RetNewLIs.push_back(LI);
2171 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2175 /// hasAllocatableSuperReg - Return true if the specified physical register has
2176 /// any super register that's allocatable.
2177 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2178 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2179 if (allocatableRegs_[*AS] && hasInterval(*AS))
2184 /// getRepresentativeReg - Find the largest super register of the specified
2185 /// physical register.
2186 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2187 // Find the largest super-register that is allocatable.
2188 unsigned BestReg = Reg;
2189 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2190 unsigned SuperReg = *AS;
2191 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2199 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2200 /// specified interval that conflicts with the specified physical register.
2201 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2202 unsigned PhysReg) const {
2203 unsigned NumConflicts = 0;
2204 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2205 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2206 E = mri_->reg_end(); I != E; ++I) {
2207 MachineOperand &O = I.getOperand();
2208 MachineInstr *MI = O.getParent();
2209 unsigned Index = getInstructionIndex(MI);
2210 if (pli.liveAt(Index))
2213 return NumConflicts;
2216 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2217 /// around all defs and uses of the specified interval. Return true if it
2218 /// was able to cut its interval.
2219 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2220 unsigned PhysReg, VirtRegMap &vrm) {
2221 unsigned SpillReg = getRepresentativeReg(PhysReg);
2223 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2224 // If there are registers which alias PhysReg, but which are not a
2225 // sub-register of the chosen representative super register. Assert
2226 // since we can't handle it yet.
2227 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2228 tri_->isSuperRegister(*AS, SpillReg));
2231 LiveInterval &pli = getInterval(SpillReg);
2232 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2233 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2234 E = mri_->reg_end(); I != E; ++I) {
2235 MachineOperand &O = I.getOperand();
2236 MachineInstr *MI = O.getParent();
2237 if (SeenMIs.count(MI))
2240 unsigned Index = getInstructionIndex(MI);
2241 if (pli.liveAt(Index)) {
2242 vrm.addEmergencySpill(SpillReg, MI);
2243 unsigned StartIdx = getLoadIndex(Index);
2244 unsigned EndIdx = getStoreIndex(Index)+1;
2245 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2246 pli.removeRange(StartIdx, EndIdx);
2249 cerr << "Ran out of registers during register allocation!\n";
2250 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2251 cerr << "Please check your inline asm statement for invalid "
2252 << "constraints:\n";
2253 MI->print(cerr.stream(), tm_);
2257 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2258 if (!hasInterval(*AS))
2260 LiveInterval &spli = getInterval(*AS);
2261 if (spli.liveAt(Index))
2262 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2269 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2270 MachineInstr* startInst) {
2271 LiveInterval& Interval = getOrCreateInterval(reg);
2272 VNInfo* VN = Interval.getNextValue(
2273 getInstructionIndex(startInst) + InstrSlots::DEF,
2274 startInst, getVNInfoAllocator());
2275 VN->hasPHIKill = true;
2276 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2277 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2278 getMBBEndIdx(startInst->getParent()) + 1, VN);
2279 Interval.addRange(LR);