1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
263 DOUT << "********** INTERVALS **********\n";
264 for (iterator I = begin(), E = end(); I != E; ++I) {
265 I->second->print(DOUT, tri_);
269 numIntervalsAfter += getNumIntervals();
274 /// print - Implement the dump method.
275 void LiveIntervals::print(std::ostream &O, const Module* ) const {
276 O << "********** INTERVALS **********\n";
277 for (const_iterator I = begin(), E = end(); I != E; ++I) {
278 I->second->print(O, tri_);
282 O << "********** MACHINEINSTRS **********\n";
283 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
284 mbbi != mbbe; ++mbbi) {
285 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
286 for (MachineBasicBlock::iterator mii = mbbi->begin(),
287 mie = mbbi->end(); mii != mie; ++mii) {
288 O << getInstructionIndex(mii) << '\t' << *mii;
293 /// conflictsWithPhysRegDef - Returns true if the specified register
294 /// is defined during the duration of the specified interval.
295 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
296 VirtRegMap &vrm, unsigned reg) {
297 for (LiveInterval::Ranges::const_iterator
298 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
299 for (unsigned index = getBaseIndex(I->start),
300 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
301 index += InstrSlots::NUM) {
302 // skip deleted instructions
303 while (index != end && !getInstructionFromIndex(index))
304 index += InstrSlots::NUM;
305 if (index == end) break;
307 MachineInstr *MI = getInstructionFromIndex(index);
308 unsigned SrcReg, DstReg;
309 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
310 if (SrcReg == li.reg || DstReg == li.reg)
312 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
313 MachineOperand& mop = MI->getOperand(i);
316 unsigned PhysReg = mop.getReg();
317 if (PhysReg == 0 || PhysReg == li.reg)
319 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
320 if (!vrm.hasPhys(PhysReg))
322 PhysReg = vrm.getPhys(PhysReg);
324 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
333 void LiveIntervals::printRegName(unsigned reg) const {
334 if (TargetRegisterInfo::isPhysicalRegister(reg))
335 cerr << tri_->getName(reg);
337 cerr << "%reg" << reg;
340 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
341 MachineBasicBlock::iterator mi,
342 unsigned MIIdx, MachineOperand& MO,
344 LiveInterval &interval) {
345 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
346 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
348 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
349 DOUT << "is a implicit_def\n";
353 // Virtual registers may be defined multiple times (due to phi
354 // elimination and 2-addr elimination). Much of what we do only has to be
355 // done once for the vreg. We use an empty interval to detect the first
356 // time we see a vreg.
357 if (interval.empty()) {
358 // Get the Idx of the defining instructions.
359 unsigned defIndex = getDefIndex(MIIdx);
360 // Earlyclobbers move back one.
361 if (MO.isEarlyClobber())
362 defIndex = getUseIndex(MIIdx);
364 MachineInstr *CopyMI = NULL;
365 unsigned SrcReg, DstReg;
366 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
367 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
368 tii_->isMoveInstr(*mi, SrcReg, DstReg))
370 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
372 assert(ValNo->id == 0 && "First value in interval is not 0?");
374 // Loop over all of the blocks that the vreg is defined in. There are
375 // two cases we have to handle here. The most common case is a vreg
376 // whose lifetime is contained within a basic block. In this case there
377 // will be a single kill, in MBB, which comes after the definition.
378 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
379 // FIXME: what about dead vars?
381 if (vi.Kills[0] != mi)
382 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
384 killIdx = defIndex+1;
386 // If the kill happens after the definition, we have an intra-block
388 if (killIdx > defIndex) {
389 assert(vi.AliveBlocks.none() &&
390 "Shouldn't be alive across any blocks!");
391 LiveRange LR(defIndex, killIdx, ValNo);
392 interval.addRange(LR);
393 DOUT << " +" << LR << "\n";
394 interval.addKill(ValNo, killIdx);
399 // The other case we handle is when a virtual register lives to the end
400 // of the defining block, potentially live across some blocks, then is
401 // live into some number of blocks, but gets killed. Start by adding a
402 // range that goes from this definition to the end of the defining block.
403 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
404 DOUT << " +" << NewLR;
405 interval.addRange(NewLR);
407 // Iterate over all of the blocks that the variable is completely
408 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
410 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
411 if (vi.AliveBlocks[i]) {
412 LiveRange LR(getMBBStartIdx(i),
413 getMBBEndIdx(i)+1, // MBB ends at -1.
415 interval.addRange(LR);
420 // Finally, this virtual register is live from the start of any killing
421 // block to the 'use' slot of the killing instruction.
422 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
423 MachineInstr *Kill = vi.Kills[i];
424 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
425 LiveRange LR(getMBBStartIdx(Kill->getParent()),
427 interval.addRange(LR);
428 interval.addKill(ValNo, killIdx);
433 // If this is the second time we see a virtual register definition, it
434 // must be due to phi elimination or two addr elimination. If this is
435 // the result of two address elimination, then the vreg is one of the
436 // def-and-use register operand.
437 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
438 // If this is a two-address definition, then we have already processed
439 // the live range. The only problem is that we didn't realize there
440 // are actually two values in the live interval. Because of this we
441 // need to take the LiveRegion that defines this register and split it
443 assert(interval.containsOneValue());
444 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
445 unsigned RedefIndex = getDefIndex(MIIdx);
446 // Earlyclobbers move back one.
447 if (MO.isEarlyClobber())
448 RedefIndex = getUseIndex(MIIdx);
450 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
451 VNInfo *OldValNo = OldLR->valno;
453 // Delete the initial value, which should be short and continuous,
454 // because the 2-addr copy must be in the same MBB as the redef.
455 interval.removeRange(DefIndex, RedefIndex);
457 // Two-address vregs should always only be redefined once. This means
458 // that at this point, there should be exactly one value number in it.
459 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
461 // The new value number (#1) is defined by the instruction we claimed
463 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
466 // Value#0 is now defined by the 2-addr instruction.
467 OldValNo->def = RedefIndex;
470 // Add the new live interval which replaces the range for the input copy.
471 LiveRange LR(DefIndex, RedefIndex, ValNo);
472 DOUT << " replace range with " << LR;
473 interval.addRange(LR);
474 interval.addKill(ValNo, RedefIndex);
476 // If this redefinition is dead, we need to add a dummy unit live
477 // range covering the def slot.
479 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
482 interval.print(DOUT, tri_);
485 // Otherwise, this must be because of phi elimination. If this is the
486 // first redefinition of the vreg that we have seen, go back and change
487 // the live range in the PHI block to be a different value number.
488 if (interval.containsOneValue()) {
489 assert(vi.Kills.size() == 1 &&
490 "PHI elimination vreg should have one kill, the PHI itself!");
492 // Remove the old range that we now know has an incorrect number.
493 VNInfo *VNI = interval.getValNumInfo(0);
494 MachineInstr *Killer = vi.Kills[0];
495 unsigned Start = getMBBStartIdx(Killer->getParent());
496 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
497 DOUT << " Removing [" << Start << "," << End << "] from: ";
498 interval.print(DOUT, tri_); DOUT << "\n";
499 interval.removeRange(Start, End);
500 VNI->hasPHIKill = true;
501 DOUT << " RESULT: "; interval.print(DOUT, tri_);
503 // Replace the interval with one of a NEW value number. Note that this
504 // value number isn't actually defined by an instruction, weird huh? :)
505 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
506 DOUT << " replace range with " << LR;
507 interval.addRange(LR);
508 interval.addKill(LR.valno, End);
509 DOUT << " RESULT: "; interval.print(DOUT, tri_);
512 // In the case of PHI elimination, each variable definition is only
513 // live until the end of the block. We've already taken care of the
514 // rest of the live range.
515 unsigned defIndex = getDefIndex(MIIdx);
516 // Earlyclobbers move back one.
517 if (MO.isEarlyClobber())
518 defIndex = getUseIndex(MIIdx);
521 MachineInstr *CopyMI = NULL;
522 unsigned SrcReg, DstReg;
523 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
524 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
525 tii_->isMoveInstr(*mi, SrcReg, DstReg))
527 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
529 unsigned killIndex = getMBBEndIdx(mbb) + 1;
530 LiveRange LR(defIndex, killIndex, ValNo);
531 interval.addRange(LR);
532 interval.addKill(ValNo, killIndex);
533 ValNo->hasPHIKill = true;
541 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
542 MachineBasicBlock::iterator mi,
545 LiveInterval &interval,
546 MachineInstr *CopyMI) {
547 // A physical register cannot be live across basic block, so its
548 // lifetime must end somewhere in its defining basic block.
549 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
551 unsigned baseIndex = MIIdx;
552 unsigned start = getDefIndex(baseIndex);
553 // Earlyclobbers move back one.
554 if (MO.isEarlyClobber())
555 start = getUseIndex(MIIdx);
556 unsigned end = start;
558 // If it is not used after definition, it is considered dead at
559 // the instruction defining it. Hence its interval is:
560 // [defSlot(def), defSlot(def)+1)
567 // If it is not dead on definition, it must be killed by a
568 // subsequent instruction. Hence its interval is:
569 // [defSlot(def), useSlot(kill)+1)
570 baseIndex += InstrSlots::NUM;
571 while (++mi != MBB->end()) {
572 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
573 getInstructionFromIndex(baseIndex) == 0)
574 baseIndex += InstrSlots::NUM;
575 if (mi->killsRegister(interval.reg, tri_)) {
577 end = getUseIndex(baseIndex) + 1;
579 } else if (mi->modifiesRegister(interval.reg, tri_)) {
580 // Another instruction redefines the register before it is ever read.
581 // Then the register is essentially dead at the instruction that defines
582 // it. Hence its interval is:
583 // [defSlot(def), defSlot(def)+1)
589 baseIndex += InstrSlots::NUM;
592 // The only case we should have a dead physreg here without a killing or
593 // instruction where we know it's dead is if it is live-in to the function
595 assert(!CopyMI && "physreg was not killed in defining block!");
599 assert(start < end && "did not find end of interval?");
601 // Already exists? Extend old live interval.
602 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
603 VNInfo *ValNo = (OldLR != interval.end())
604 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
605 LiveRange LR(start, end, ValNo);
606 interval.addRange(LR);
607 interval.addKill(LR.valno, end);
608 DOUT << " +" << LR << '\n';
611 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
612 MachineBasicBlock::iterator MI,
616 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
617 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
618 getOrCreateInterval(MO.getReg()));
619 else if (allocatableRegs_[MO.getReg()]) {
620 MachineInstr *CopyMI = NULL;
621 unsigned SrcReg, DstReg;
622 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
623 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
624 tii_->isMoveInstr(*MI, SrcReg, DstReg))
626 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
627 getOrCreateInterval(MO.getReg()), CopyMI);
628 // Def of a register also defines its sub-registers.
629 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
630 // If MI also modifies the sub-register explicitly, avoid processing it
631 // more than once. Do not pass in TRI here so it checks for exact match.
632 if (!MI->modifiesRegister(*AS))
633 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
634 getOrCreateInterval(*AS), 0);
638 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
640 LiveInterval &interval, bool isAlias) {
641 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
643 // Look for kills, if it reaches a def before it's killed, then it shouldn't
644 // be considered a livein.
645 MachineBasicBlock::iterator mi = MBB->begin();
646 unsigned baseIndex = MIIdx;
647 unsigned start = baseIndex;
648 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
649 getInstructionFromIndex(baseIndex) == 0)
650 baseIndex += InstrSlots::NUM;
651 unsigned end = baseIndex;
653 while (mi != MBB->end()) {
654 if (mi->killsRegister(interval.reg, tri_)) {
656 end = getUseIndex(baseIndex) + 1;
658 } else if (mi->modifiesRegister(interval.reg, tri_)) {
659 // Another instruction redefines the register before it is ever read.
660 // Then the register is essentially dead at the instruction that defines
661 // it. Hence its interval is:
662 // [defSlot(def), defSlot(def)+1)
664 end = getDefIndex(start) + 1;
668 baseIndex += InstrSlots::NUM;
669 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
670 getInstructionFromIndex(baseIndex) == 0)
671 baseIndex += InstrSlots::NUM;
676 // Live-in register might not be used at all.
680 end = getDefIndex(MIIdx) + 1;
682 DOUT << " live through";
687 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
688 interval.addRange(LR);
689 interval.addKill(LR.valno, end);
690 DOUT << " +" << LR << '\n';
693 /// computeIntervals - computes the live intervals for virtual
694 /// registers. for some ordering of the machine instructions [1,N] a
695 /// live interval is an interval [i, j) where 1 <= i <= j < N for
696 /// which a variable is live
697 void LiveIntervals::computeIntervals() {
699 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
700 << "********** Function: "
701 << ((Value*)mf_->getFunction())->getName() << '\n';
703 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
705 MachineBasicBlock *MBB = MBBI;
706 // Track the index of the current machine instr.
707 unsigned MIIndex = getMBBStartIdx(MBB);
708 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
710 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
712 // Create intervals for live-ins to this BB first.
713 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
714 LE = MBB->livein_end(); LI != LE; ++LI) {
715 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
716 // Multiple live-ins can alias the same register.
717 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
718 if (!hasInterval(*AS))
719 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
723 // Skip over empty initial indices.
724 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
725 getInstructionFromIndex(MIIndex) == 0)
726 MIIndex += InstrSlots::NUM;
728 for (; MI != miEnd; ++MI) {
729 DOUT << MIIndex << "\t" << *MI;
732 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
733 MachineOperand &MO = MI->getOperand(i);
734 // handle register defs - build intervals
735 if (MO.isReg() && MO.getReg() && MO.isDef()) {
736 handleRegisterDef(MBB, MI, MIIndex, MO, i);
740 // Skip over the empty slots after each instruction.
741 unsigned Slots = MI->getDesc().getNumDefs();
744 MIIndex += InstrSlots::NUM * Slots;
746 // Skip over empty indices.
747 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
748 getInstructionFromIndex(MIIndex) == 0)
749 MIIndex += InstrSlots::NUM;
754 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
755 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
756 std::vector<IdxMBBPair>::const_iterator I =
757 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
760 while (I != Idx2MBBMap.end()) {
763 MBBs.push_back(I->second);
770 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
771 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
772 std::vector<IdxMBBPair>::const_iterator I =
773 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
776 while (I != Idx2MBBMap.end()) {
779 MachineBasicBlock *MBB = I->second;
780 if (getMBBEndIdx(MBB) > End)
782 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
783 SE = MBB->succ_end(); SI != SE; ++SI)
791 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
792 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
794 return new LiveInterval(reg, Weight);
797 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
798 /// copy field and returns the source register that defines it.
799 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
803 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
804 return VNI->copy->getOperand(1).getReg();
805 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
806 return VNI->copy->getOperand(2).getReg();
807 unsigned SrcReg, DstReg;
808 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
810 assert(0 && "Unrecognized copy instruction!");
814 //===----------------------------------------------------------------------===//
815 // Register allocator hooks.
818 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
819 /// allow one) virtual register operand, then its uses are implicitly using
820 /// the register. Returns the virtual register.
821 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
822 MachineInstr *MI) const {
824 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
825 MachineOperand &MO = MI->getOperand(i);
826 if (!MO.isReg() || !MO.isUse())
828 unsigned Reg = MO.getReg();
829 if (Reg == 0 || Reg == li.reg)
831 // FIXME: For now, only remat MI with at most one register operand.
833 "Can't rematerialize instruction with multiple register operand!");
842 /// isValNoAvailableAt - Return true if the val# of the specified interval
843 /// which reaches the given instruction also reaches the specified use index.
844 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
845 unsigned UseIdx) const {
846 unsigned Index = getInstructionIndex(MI);
847 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
848 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
849 return UI != li.end() && UI->valno == ValNo;
852 /// isReMaterializable - Returns true if the definition MI of the specified
853 /// val# of the specified interval is re-materializable.
854 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
855 const VNInfo *ValNo, MachineInstr *MI,
856 SmallVectorImpl<LiveInterval*> &SpillIs,
861 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
865 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
866 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
867 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
868 // this but remember this is not safe to fold into a two-address
870 // This is a load from fixed stack slot. It can be rematerialized.
873 // If the target-specific rules don't identify an instruction as
874 // being trivially rematerializable, use some target-independent
876 if (!MI->getDesc().isRematerializable() ||
877 !tii_->isTriviallyReMaterializable(MI)) {
878 if (!EnableAggressiveRemat)
881 // If the instruction accesses memory but the memoperands have been lost,
882 // we can't analyze it.
883 const TargetInstrDesc &TID = MI->getDesc();
884 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
887 // Avoid instructions obviously unsafe for remat.
888 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
891 // If the instruction accesses memory and the memory could be non-constant,
892 // assume the instruction is not rematerializable.
893 for (std::list<MachineMemOperand>::const_iterator
894 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
895 const MachineMemOperand &MMO = *I;
896 if (MMO.isVolatile() || MMO.isStore())
898 const Value *V = MMO.getValue();
901 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
902 if (!PSV->isConstant(mf_->getFrameInfo()))
904 } else if (!aa_->pointsToConstantMemory(V))
908 // If any of the registers accessed are non-constant, conservatively assume
909 // the instruction is not rematerializable.
911 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
912 const MachineOperand &MO = MI->getOperand(i);
914 unsigned Reg = MO.getReg();
917 if (TargetRegisterInfo::isPhysicalRegister(Reg))
920 // Only allow one def, and that in the first operand.
921 if (MO.isDef() != (i == 0))
924 // Only allow constant-valued registers.
925 bool IsLiveIn = mri_->isLiveIn(Reg);
926 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
929 // For the def, it should be the only def.
930 if (MO.isDef() && (next(I) != E || IsLiveIn))
934 // Only allow one use other register use, as that's all the
935 // remat mechanisms support currently.
939 else if (Reg != ImpUse)
942 // For uses, there should be only one associate def.
943 if (I != E && (next(I) != E || IsLiveIn))
950 unsigned ImpUse = getReMatImplicitUse(li, MI);
952 const LiveInterval &ImpLi = getInterval(ImpUse);
953 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
954 re = mri_->use_end(); ri != re; ++ri) {
955 MachineInstr *UseMI = &*ri;
956 unsigned UseIdx = getInstructionIndex(UseMI);
957 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
959 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
963 // If a register operand of the re-materialized instruction is going to
964 // be spilled next, then it's not legal to re-materialize this instruction.
965 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
966 if (ImpUse == SpillIs[i]->reg)
972 /// isReMaterializable - Returns true if the definition MI of the specified
973 /// val# of the specified interval is re-materializable.
974 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
975 const VNInfo *ValNo, MachineInstr *MI) {
976 SmallVector<LiveInterval*, 4> Dummy1;
978 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
981 /// isReMaterializable - Returns true if every definition of MI of every
982 /// val# of the specified interval is re-materializable.
983 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
984 SmallVectorImpl<LiveInterval*> &SpillIs,
987 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
989 const VNInfo *VNI = *i;
990 unsigned DefIdx = VNI->def;
992 continue; // Dead val#.
993 // Is the def for the val# rematerializable?
996 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
997 bool DefIsLoad = false;
999 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1001 isLoad |= DefIsLoad;
1006 /// FilterFoldedOps - Filter out two-address use operands. Return
1007 /// true if it finds any issue with the operands that ought to prevent
1009 static bool FilterFoldedOps(MachineInstr *MI,
1010 SmallVector<unsigned, 2> &Ops,
1012 SmallVector<unsigned, 2> &FoldOps) {
1013 const TargetInstrDesc &TID = MI->getDesc();
1016 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1017 unsigned OpIdx = Ops[i];
1018 MachineOperand &MO = MI->getOperand(OpIdx);
1019 // FIXME: fold subreg use.
1023 MRInfo |= (unsigned)VirtRegMap::isMod;
1025 // Filter out two-address use operand(s).
1026 if (!MO.isImplicit() &&
1027 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1028 MRInfo = VirtRegMap::isModRef;
1031 MRInfo |= (unsigned)VirtRegMap::isRef;
1033 FoldOps.push_back(OpIdx);
1039 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1040 /// slot / to reg or any rematerialized load into ith operand of specified
1041 /// MI. If it is successul, MI is updated with the newly created MI and
1043 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1044 VirtRegMap &vrm, MachineInstr *DefMI,
1046 SmallVector<unsigned, 2> &Ops,
1047 bool isSS, int Slot, unsigned Reg) {
1048 // If it is an implicit def instruction, just delete it.
1049 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1050 RemoveMachineInstrFromMaps(MI);
1051 vrm.RemoveMachineInstrFromMaps(MI);
1052 MI->eraseFromParent();
1057 // Filter the list of operand indexes that are to be folded. Abort if
1058 // any operand will prevent folding.
1059 unsigned MRInfo = 0;
1060 SmallVector<unsigned, 2> FoldOps;
1061 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1064 // The only time it's safe to fold into a two address instruction is when
1065 // it's folding reload and spill from / into a spill stack slot.
1066 if (DefMI && (MRInfo & VirtRegMap::isMod))
1069 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1070 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1072 // Remember this instruction uses the spill slot.
1073 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1075 // Attempt to fold the memory reference into the instruction. If
1076 // we can do this, we don't need to insert spill code.
1077 MachineBasicBlock &MBB = *MI->getParent();
1078 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1079 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1080 vrm.transferSpillPts(MI, fmi);
1081 vrm.transferRestorePts(MI, fmi);
1082 vrm.transferEmergencySpills(MI, fmi);
1084 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1085 mi2iMap_[fmi] = InstrIdx;
1086 MI = MBB.insert(MBB.erase(MI), fmi);
1093 /// canFoldMemoryOperand - Returns true if the specified load / store
1094 /// folding is possible.
1095 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1096 SmallVector<unsigned, 2> &Ops,
1098 // Filter the list of operand indexes that are to be folded. Abort if
1099 // any operand will prevent folding.
1100 unsigned MRInfo = 0;
1101 SmallVector<unsigned, 2> FoldOps;
1102 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1105 // It's only legal to remat for a use, not a def.
1106 if (ReMat && (MRInfo & VirtRegMap::isMod))
1109 return tii_->canFoldMemoryOperand(MI, FoldOps);
1112 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1113 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1114 for (LiveInterval::Ranges::const_iterator
1115 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1116 std::vector<IdxMBBPair>::const_iterator II =
1117 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1118 if (II == Idx2MBBMap.end())
1120 if (I->end > II->first) // crossing a MBB.
1122 MBBs.insert(II->second);
1123 if (MBBs.size() > 1)
1129 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1130 /// interval on to-be re-materialized operands of MI) with new register.
1131 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1132 MachineInstr *MI, unsigned NewVReg,
1134 // There is an implicit use. That means one of the other operand is
1135 // being remat'ed and the remat'ed instruction has li.reg as an
1136 // use operand. Make sure we rewrite that as well.
1137 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1138 MachineOperand &MO = MI->getOperand(i);
1141 unsigned Reg = MO.getReg();
1142 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1144 if (!vrm.isReMaterialized(Reg))
1146 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1147 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1149 UseMO->setReg(NewVReg);
1153 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1154 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1155 bool LiveIntervals::
1156 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1157 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1158 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1159 unsigned Slot, int LdSlot,
1160 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1162 const TargetRegisterClass* rc,
1163 SmallVector<int, 4> &ReMatIds,
1164 const MachineLoopInfo *loopInfo,
1165 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1166 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1167 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1168 MachineBasicBlock *MBB = MI->getParent();
1169 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1170 bool CanFold = false;
1172 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1173 MachineOperand& mop = MI->getOperand(i);
1176 unsigned Reg = mop.getReg();
1177 unsigned RegI = Reg;
1178 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1183 bool TryFold = !DefIsReMat;
1184 bool FoldSS = true; // Default behavior unless it's a remat.
1185 int FoldSlot = Slot;
1187 // If this is the rematerializable definition MI itself and
1188 // all of its uses are rematerialized, simply delete it.
1189 if (MI == ReMatOrigDefMI && CanDelete) {
1190 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1192 RemoveMachineInstrFromMaps(MI);
1193 vrm.RemoveMachineInstrFromMaps(MI);
1194 MI->eraseFromParent();
1198 // If def for this use can't be rematerialized, then try folding.
1199 // If def is rematerializable and it's a load, also try folding.
1200 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1202 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1208 // Scan all of the operands of this instruction rewriting operands
1209 // to use NewVReg instead of li.reg as appropriate. We do this for
1212 // 1. If the instr reads the same spilled vreg multiple times, we
1213 // want to reuse the NewVReg.
1214 // 2. If the instr is a two-addr instruction, we are required to
1215 // keep the src/dst regs pinned.
1217 // Keep track of whether we replace a use and/or def so that we can
1218 // create the spill interval with the appropriate range.
1220 HasUse = mop.isUse();
1221 HasDef = mop.isDef();
1222 SmallVector<unsigned, 2> Ops;
1224 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1225 const MachineOperand &MOj = MI->getOperand(j);
1228 unsigned RegJ = MOj.getReg();
1229 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1233 HasUse |= MOj.isUse();
1234 HasDef |= MOj.isDef();
1238 if (HasUse && !li.liveAt(getUseIndex(index)))
1239 // Must be defined by an implicit def. It should not be spilled. Note,
1240 // this is for correctness reason. e.g.
1241 // 8 %reg1024<def> = IMPLICIT_DEF
1242 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1243 // The live range [12, 14) are not part of the r1024 live interval since
1244 // it's defined by an implicit def. It will not conflicts with live
1245 // interval of r1025. Now suppose both registers are spilled, you can
1246 // easily see a situation where both registers are reloaded before
1247 // the INSERT_SUBREG and both target registers that would overlap.
1250 // Update stack slot spill weight if we are splitting.
1251 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1255 // Create a new virtual register for the spill interval.
1256 // Create the new register now so we can map the fold instruction
1257 // to the new register so when it is unfolded we get the correct
1259 bool CreatedNewVReg = false;
1261 NewVReg = mri_->createVirtualRegister(rc);
1263 CreatedNewVReg = true;
1269 // Do not fold load / store here if we are splitting. We'll find an
1270 // optimal point to insert a load / store later.
1272 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1273 Ops, FoldSS, FoldSlot, NewVReg)) {
1274 // Folding the load/store can completely change the instruction in
1275 // unpredictable ways, rescan it from the beginning.
1278 // We need to give the new vreg the same stack slot as the
1279 // spilled interval.
1280 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1286 if (isRemoved(MI)) {
1290 goto RestartInstruction;
1293 // We'll try to fold it later if it's profitable.
1294 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1298 mop.setReg(NewVReg);
1299 if (mop.isImplicit())
1300 rewriteImplicitOps(li, MI, NewVReg, vrm);
1302 // Reuse NewVReg for other reads.
1303 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1304 MachineOperand &mopj = MI->getOperand(Ops[j]);
1305 mopj.setReg(NewVReg);
1306 if (mopj.isImplicit())
1307 rewriteImplicitOps(li, MI, NewVReg, vrm);
1310 if (CreatedNewVReg) {
1312 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1313 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1314 // Each valnum may have its own remat id.
1315 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1317 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1319 if (!CanDelete || (HasUse && HasDef)) {
1320 // If this is a two-addr instruction then its use operands are
1321 // rematerializable but its def is not. It should be assigned a
1323 vrm.assignVirt2StackSlot(NewVReg, Slot);
1326 vrm.assignVirt2StackSlot(NewVReg, Slot);
1328 } else if (HasUse && HasDef &&
1329 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1330 // If this interval hasn't been assigned a stack slot (because earlier
1331 // def is a deleted remat def), do it now.
1332 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1333 vrm.assignVirt2StackSlot(NewVReg, Slot);
1336 // Re-matting an instruction with virtual register use. Add the
1337 // register as an implicit use on the use MI.
1338 if (DefIsReMat && ImpUse)
1339 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1341 // create a new register interval for this spill / remat.
1342 LiveInterval &nI = getOrCreateInterval(NewVReg);
1343 if (CreatedNewVReg) {
1344 NewLIs.push_back(&nI);
1345 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1347 vrm.setIsSplitFromReg(NewVReg, li.reg);
1351 if (CreatedNewVReg) {
1352 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1353 nI.getNextValue(~0U, 0, VNInfoAllocator));
1357 // Extend the split live interval to this def / use.
1358 unsigned End = getUseIndex(index)+1;
1359 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1360 nI.getValNumInfo(nI.getNumValNums()-1));
1366 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1367 nI.getNextValue(~0U, 0, VNInfoAllocator));
1372 DOUT << "\t\t\t\tAdded new interval: ";
1373 nI.print(DOUT, tri_);
1378 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1380 MachineBasicBlock *MBB, unsigned Idx) const {
1381 unsigned End = getMBBEndIdx(MBB);
1382 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1383 unsigned KillIdx = VNI->kills[j];
1384 if (KillIdx > Idx && KillIdx < End)
1390 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1391 /// during spilling.
1393 struct RewriteInfo {
1398 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1399 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1402 struct RewriteInfoCompare {
1403 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1404 return LHS.Index < RHS.Index;
1409 void LiveIntervals::
1410 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1411 LiveInterval::Ranges::const_iterator &I,
1412 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1413 unsigned Slot, int LdSlot,
1414 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1416 const TargetRegisterClass* rc,
1417 SmallVector<int, 4> &ReMatIds,
1418 const MachineLoopInfo *loopInfo,
1419 BitVector &SpillMBBs,
1420 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1421 BitVector &RestoreMBBs,
1422 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1423 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1424 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1425 bool AllCanFold = true;
1426 unsigned NewVReg = 0;
1427 unsigned start = getBaseIndex(I->start);
1428 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1430 // First collect all the def / use in this live range that will be rewritten.
1431 // Make sure they are sorted according to instruction index.
1432 std::vector<RewriteInfo> RewriteMIs;
1433 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1434 re = mri_->reg_end(); ri != re; ) {
1435 MachineInstr *MI = &*ri;
1436 MachineOperand &O = ri.getOperand();
1438 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1439 unsigned index = getInstructionIndex(MI);
1440 if (index < start || index >= end)
1442 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1443 // Must be defined by an implicit def. It should not be spilled. Note,
1444 // this is for correctness reason. e.g.
1445 // 8 %reg1024<def> = IMPLICIT_DEF
1446 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1447 // The live range [12, 14) are not part of the r1024 live interval since
1448 // it's defined by an implicit def. It will not conflicts with live
1449 // interval of r1025. Now suppose both registers are spilled, you can
1450 // easily see a situation where both registers are reloaded before
1451 // the INSERT_SUBREG and both target registers that would overlap.
1453 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1455 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1457 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1458 // Now rewrite the defs and uses.
1459 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1460 RewriteInfo &rwi = RewriteMIs[i];
1462 unsigned index = rwi.Index;
1463 bool MIHasUse = rwi.HasUse;
1464 bool MIHasDef = rwi.HasDef;
1465 MachineInstr *MI = rwi.MI;
1466 // If MI def and/or use the same register multiple times, then there
1467 // are multiple entries.
1468 unsigned NumUses = MIHasUse;
1469 while (i != e && RewriteMIs[i].MI == MI) {
1470 assert(RewriteMIs[i].Index == index);
1471 bool isUse = RewriteMIs[i].HasUse;
1472 if (isUse) ++NumUses;
1474 MIHasDef |= RewriteMIs[i].HasDef;
1477 MachineBasicBlock *MBB = MI->getParent();
1479 if (ImpUse && MI != ReMatDefMI) {
1480 // Re-matting an instruction with virtual register use. Update the
1481 // register interval's spill weight to HUGE_VALF to prevent it from
1483 LiveInterval &ImpLi = getInterval(ImpUse);
1484 ImpLi.weight = HUGE_VALF;
1487 unsigned MBBId = MBB->getNumber();
1488 unsigned ThisVReg = 0;
1490 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1491 if (NVI != MBBVRegsMap.end()) {
1492 ThisVReg = NVI->second;
1499 // It's better to start a new interval to avoid artifically
1500 // extend the new interval.
1501 if (MIHasDef && !MIHasUse) {
1502 MBBVRegsMap.erase(MBB->getNumber());
1508 bool IsNew = ThisVReg == 0;
1510 // This ends the previous live interval. If all of its def / use
1511 // can be folded, give it a low spill weight.
1512 if (NewVReg && TrySplit && AllCanFold) {
1513 LiveInterval &nI = getOrCreateInterval(NewVReg);
1520 bool HasDef = false;
1521 bool HasUse = false;
1522 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1523 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1524 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1525 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1526 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1527 if (!HasDef && !HasUse)
1530 AllCanFold &= CanFold;
1532 // Update weight of spill interval.
1533 LiveInterval &nI = getOrCreateInterval(NewVReg);
1535 // The spill weight is now infinity as it cannot be spilled again.
1536 nI.weight = HUGE_VALF;
1540 // Keep track of the last def and first use in each MBB.
1542 if (MI != ReMatOrigDefMI || !CanDelete) {
1543 bool HasKill = false;
1545 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1547 // If this is a two-address code, then this index starts a new VNInfo.
1548 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1550 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1552 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1553 SpillIdxes.find(MBBId);
1555 if (SII == SpillIdxes.end()) {
1556 std::vector<SRInfo> S;
1557 S.push_back(SRInfo(index, NewVReg, true));
1558 SpillIdxes.insert(std::make_pair(MBBId, S));
1559 } else if (SII->second.back().vreg != NewVReg) {
1560 SII->second.push_back(SRInfo(index, NewVReg, true));
1561 } else if ((int)index > SII->second.back().index) {
1562 // If there is an earlier def and this is a two-address
1563 // instruction, then it's not possible to fold the store (which
1564 // would also fold the load).
1565 SRInfo &Info = SII->second.back();
1567 Info.canFold = !HasUse;
1569 SpillMBBs.set(MBBId);
1570 } else if (SII != SpillIdxes.end() &&
1571 SII->second.back().vreg == NewVReg &&
1572 (int)index > SII->second.back().index) {
1573 // There is an earlier def that's not killed (must be two-address).
1574 // The spill is no longer needed.
1575 SII->second.pop_back();
1576 if (SII->second.empty()) {
1577 SpillIdxes.erase(MBBId);
1578 SpillMBBs.reset(MBBId);
1585 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1586 SpillIdxes.find(MBBId);
1587 if (SII != SpillIdxes.end() &&
1588 SII->second.back().vreg == NewVReg &&
1589 (int)index > SII->second.back().index)
1590 // Use(s) following the last def, it's not safe to fold the spill.
1591 SII->second.back().canFold = false;
1592 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1593 RestoreIdxes.find(MBBId);
1594 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1595 // If we are splitting live intervals, only fold if it's the first
1596 // use and there isn't another use later in the MBB.
1597 RII->second.back().canFold = false;
1599 // Only need a reload if there isn't an earlier def / use.
1600 if (RII == RestoreIdxes.end()) {
1601 std::vector<SRInfo> Infos;
1602 Infos.push_back(SRInfo(index, NewVReg, true));
1603 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1605 RII->second.push_back(SRInfo(index, NewVReg, true));
1607 RestoreMBBs.set(MBBId);
1611 // Update spill weight.
1612 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1613 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1616 if (NewVReg && TrySplit && AllCanFold) {
1617 // If all of its def / use can be folded, give it a low spill weight.
1618 LiveInterval &nI = getOrCreateInterval(NewVReg);
1623 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1624 BitVector &RestoreMBBs,
1625 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1626 if (!RestoreMBBs[Id])
1628 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1629 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1630 if (Restores[i].index == index &&
1631 Restores[i].vreg == vr &&
1632 Restores[i].canFold)
1637 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1638 BitVector &RestoreMBBs,
1639 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1640 if (!RestoreMBBs[Id])
1642 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1643 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1644 if (Restores[i].index == index && Restores[i].vreg)
1645 Restores[i].index = -1;
1648 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1649 /// spilled and create empty intervals for their uses.
1651 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1652 const TargetRegisterClass* rc,
1653 std::vector<LiveInterval*> &NewLIs) {
1654 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1655 re = mri_->reg_end(); ri != re; ) {
1656 MachineOperand &O = ri.getOperand();
1657 MachineInstr *MI = &*ri;
1660 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1661 "Register def was not rewritten?");
1662 RemoveMachineInstrFromMaps(MI);
1663 vrm.RemoveMachineInstrFromMaps(MI);
1664 MI->eraseFromParent();
1666 // This must be an use of an implicit_def so it's not part of the live
1667 // interval. Create a new empty live interval for it.
1668 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1669 unsigned NewVReg = mri_->createVirtualRegister(rc);
1671 vrm.setIsImplicitlyDefined(NewVReg);
1672 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1673 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1674 MachineOperand &MO = MI->getOperand(i);
1675 if (MO.isReg() && MO.getReg() == li.reg)
1684 bool operator()(LiveInterval* A, LiveInterval* B) {
1685 return A->beginNumber() < B->beginNumber();
1690 std::vector<LiveInterval*> LiveIntervals::
1691 addIntervalsForSpillsFast(const LiveInterval &li,
1692 const MachineLoopInfo *loopInfo,
1693 VirtRegMap &vrm, float& SSWeight) {
1694 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1696 std::vector<LiveInterval*> added;
1698 assert(li.weight != HUGE_VALF &&
1699 "attempt to spill already spilled interval!");
1701 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1705 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1709 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1710 while (RI != mri_->reg_end()) {
1711 MachineInstr* MI = &*RI;
1713 SmallVector<unsigned, 2> Indices;
1714 bool HasUse = false;
1715 bool HasDef = false;
1717 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1718 MachineOperand& mop = MI->getOperand(i);
1719 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1721 HasUse |= MI->getOperand(i).isUse();
1722 HasDef |= MI->getOperand(i).isDef();
1724 Indices.push_back(i);
1727 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1728 Indices, true, slot, li.reg)) {
1729 unsigned NewVReg = mri_->createVirtualRegister(rc);
1731 vrm.assignVirt2StackSlot(NewVReg, slot);
1733 // create a new register for this spill
1734 LiveInterval &nI = getOrCreateInterval(NewVReg);
1736 // the spill weight is now infinity as it
1737 // cannot be spilled again
1738 nI.weight = HUGE_VALF;
1740 // Rewrite register operands to use the new vreg.
1741 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1742 E = Indices.end(); I != E; ++I) {
1743 MI->getOperand(*I).setReg(NewVReg);
1745 if (MI->getOperand(*I).isUse())
1746 MI->getOperand(*I).setIsKill(true);
1749 // Fill in the new live interval.
1750 unsigned index = getInstructionIndex(MI);
1752 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1753 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1756 vrm.addRestorePoint(NewVReg, MI);
1759 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1760 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1763 vrm.addSpillPoint(NewVReg, true, MI);
1766 added.push_back(&nI);
1768 DOUT << "\t\t\t\tadded new interval: ";
1772 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1775 SSWeight += getSpillWeight(true, true, loopDepth);
1777 SSWeight += getSpillWeight(false, true, loopDepth);
1779 SSWeight += getSpillWeight(true, false, loopDepth);
1783 RI = mri_->reg_begin(li.reg);
1786 // Clients expect the new intervals to be returned in sorted order.
1787 std::sort(added.begin(), added.end(), LISorter());
1792 std::vector<LiveInterval*> LiveIntervals::
1793 addIntervalsForSpills(const LiveInterval &li,
1794 SmallVectorImpl<LiveInterval*> &SpillIs,
1795 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1798 if (EnableFastSpilling)
1799 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1801 assert(li.weight != HUGE_VALF &&
1802 "attempt to spill already spilled interval!");
1804 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1805 li.print(DOUT, tri_);
1808 // Spill slot weight.
1811 // Each bit specify whether it a spill is required in the MBB.
1812 BitVector SpillMBBs(mf_->getNumBlockIDs());
1813 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1814 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1815 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1816 DenseMap<unsigned,unsigned> MBBVRegsMap;
1817 std::vector<LiveInterval*> NewLIs;
1818 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1820 unsigned NumValNums = li.getNumValNums();
1821 SmallVector<MachineInstr*, 4> ReMatDefs;
1822 ReMatDefs.resize(NumValNums, NULL);
1823 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1824 ReMatOrigDefs.resize(NumValNums, NULL);
1825 SmallVector<int, 4> ReMatIds;
1826 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1827 BitVector ReMatDelete(NumValNums);
1828 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1830 // Spilling a split live interval. It cannot be split any further. Also,
1831 // it's also guaranteed to be a single val# / range interval.
1832 if (vrm.getPreSplitReg(li.reg)) {
1833 vrm.setIsSplitFromReg(li.reg, 0);
1834 // Unset the split kill marker on the last use.
1835 unsigned KillIdx = vrm.getKillPoint(li.reg);
1837 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1838 assert(KillMI && "Last use disappeared?");
1839 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1840 assert(KillOp != -1 && "Last use disappeared?");
1841 KillMI->getOperand(KillOp).setIsKill(false);
1843 vrm.removeKillPoint(li.reg);
1844 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1845 Slot = vrm.getStackSlot(li.reg);
1846 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1847 MachineInstr *ReMatDefMI = DefIsReMat ?
1848 vrm.getReMaterializedMI(li.reg) : NULL;
1850 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1851 bool isLoad = isLoadSS ||
1852 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1853 bool IsFirstRange = true;
1854 for (LiveInterval::Ranges::const_iterator
1855 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1856 // If this is a split live interval with multiple ranges, it means there
1857 // are two-address instructions that re-defined the value. Only the
1858 // first def can be rematerialized!
1860 // Note ReMatOrigDefMI has already been deleted.
1861 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1862 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1863 false, vrm, rc, ReMatIds, loopInfo,
1864 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1865 MBBVRegsMap, NewLIs, SSWeight);
1867 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1868 Slot, 0, false, false, false,
1869 false, vrm, rc, ReMatIds, loopInfo,
1870 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1871 MBBVRegsMap, NewLIs, SSWeight);
1873 IsFirstRange = false;
1876 SSWeight = 0.0f; // Already accounted for when split.
1877 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1881 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1882 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1886 bool NeedStackSlot = false;
1887 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1889 const VNInfo *VNI = *i;
1890 unsigned VN = VNI->id;
1891 unsigned DefIdx = VNI->def;
1893 continue; // Dead val#.
1894 // Is the def for the val# rematerializable?
1895 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1896 ? 0 : getInstructionFromIndex(DefIdx);
1898 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1899 // Remember how to remat the def of this val#.
1900 ReMatOrigDefs[VN] = ReMatDefMI;
1901 // Original def may be modified so we have to make a copy here.
1902 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1903 ClonedMIs.push_back(Clone);
1904 ReMatDefs[VN] = Clone;
1906 bool CanDelete = true;
1907 if (VNI->hasPHIKill) {
1908 // A kill is a phi node, not all of its uses can be rematerialized.
1909 // It must not be deleted.
1911 // Need a stack slot if there is any live range where uses cannot be
1913 NeedStackSlot = true;
1916 ReMatDelete.set(VN);
1918 // Need a stack slot if there is any live range where uses cannot be
1920 NeedStackSlot = true;
1924 // One stack slot per live interval.
1925 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1926 Slot = vrm.assignVirt2StackSlot(li.reg);
1928 // Create new intervals and rewrite defs and uses.
1929 for (LiveInterval::Ranges::const_iterator
1930 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1931 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1932 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1933 bool DefIsReMat = ReMatDefMI != NULL;
1934 bool CanDelete = ReMatDelete[I->valno->id];
1936 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1937 bool isLoad = isLoadSS ||
1938 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1939 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1940 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1941 CanDelete, vrm, rc, ReMatIds, loopInfo,
1942 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1943 MBBVRegsMap, NewLIs, SSWeight);
1946 // Insert spills / restores if we are splitting.
1948 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1952 SmallPtrSet<LiveInterval*, 4> AddedKill;
1953 SmallVector<unsigned, 2> Ops;
1954 if (NeedStackSlot) {
1955 int Id = SpillMBBs.find_first();
1957 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1958 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1959 std::vector<SRInfo> &spills = SpillIdxes[Id];
1960 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1961 int index = spills[i].index;
1962 unsigned VReg = spills[i].vreg;
1963 LiveInterval &nI = getOrCreateInterval(VReg);
1964 bool isReMat = vrm.isReMaterialized(VReg);
1965 MachineInstr *MI = getInstructionFromIndex(index);
1966 bool CanFold = false;
1967 bool FoundUse = false;
1969 if (spills[i].canFold) {
1971 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1972 MachineOperand &MO = MI->getOperand(j);
1973 if (!MO.isReg() || MO.getReg() != VReg)
1980 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1981 RestoreMBBs, RestoreIdxes))) {
1982 // MI has two-address uses of the same register. If the use
1983 // isn't the first and only use in the BB, then we can't fold
1984 // it. FIXME: Move this to rewriteInstructionsForSpills.
1991 // Fold the store into the def if possible.
1992 bool Folded = false;
1993 if (CanFold && !Ops.empty()) {
1994 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1997 // Also folded uses, do not issue a load.
1998 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1999 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2001 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2005 // Otherwise tell the spiller to issue a spill.
2007 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2008 bool isKill = LR->end == getStoreIndex(index);
2009 if (!MI->registerDefIsDead(nI.reg))
2010 // No need to spill a dead def.
2011 vrm.addSpillPoint(VReg, isKill, MI);
2013 AddedKill.insert(&nI);
2016 // Update spill slot weight.
2018 SSWeight += getSpillWeight(true, false, loopDepth);
2020 Id = SpillMBBs.find_next(Id);
2024 int Id = RestoreMBBs.find_first();
2026 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2027 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2029 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2030 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2031 int index = restores[i].index;
2034 unsigned VReg = restores[i].vreg;
2035 LiveInterval &nI = getOrCreateInterval(VReg);
2036 bool isReMat = vrm.isReMaterialized(VReg);
2037 MachineInstr *MI = getInstructionFromIndex(index);
2038 bool CanFold = false;
2040 if (restores[i].canFold) {
2042 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2043 MachineOperand &MO = MI->getOperand(j);
2044 if (!MO.isReg() || MO.getReg() != VReg)
2048 // If this restore were to be folded, it would have been folded
2057 // Fold the load into the use if possible.
2058 bool Folded = false;
2059 if (CanFold && !Ops.empty()) {
2061 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2063 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2065 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2066 // If the rematerializable def is a load, also try to fold it.
2067 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2068 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2069 Ops, isLoadSS, LdSlot, VReg);
2070 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2072 // Re-matting an instruction with virtual register use. Add the
2073 // register as an implicit use on the use MI and update the register
2074 // interval's spill weight to HUGE_VALF to prevent it from being
2076 LiveInterval &ImpLi = getInterval(ImpUse);
2077 ImpLi.weight = HUGE_VALF;
2078 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2082 // If folding is not possible / failed, then tell the spiller to issue a
2083 // load / rematerialization for us.
2085 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2087 vrm.addRestorePoint(VReg, MI);
2089 // Update spill slot weight.
2091 SSWeight += getSpillWeight(false, true, loopDepth);
2093 Id = RestoreMBBs.find_next(Id);
2096 // Finalize intervals: add kills, finalize spill weights, and filter out
2098 std::vector<LiveInterval*> RetNewLIs;
2099 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2100 LiveInterval *LI = NewLIs[i];
2102 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2103 if (!AddedKill.count(LI)) {
2104 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2105 unsigned LastUseIdx = getBaseIndex(LR->end);
2106 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2107 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2108 assert(UseIdx != -1);
2109 if (LastUse->getOperand(UseIdx).isImplicit() ||
2110 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2111 LastUse->getOperand(UseIdx).setIsKill();
2112 vrm.addKillPoint(LI->reg, LastUseIdx);
2115 RetNewLIs.push_back(LI);
2119 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2123 /// hasAllocatableSuperReg - Return true if the specified physical register has
2124 /// any super register that's allocatable.
2125 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2126 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2127 if (allocatableRegs_[*AS] && hasInterval(*AS))
2132 /// getRepresentativeReg - Find the largest super register of the specified
2133 /// physical register.
2134 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2135 // Find the largest super-register that is allocatable.
2136 unsigned BestReg = Reg;
2137 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2138 unsigned SuperReg = *AS;
2139 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2147 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2148 /// specified interval that conflicts with the specified physical register.
2149 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2150 unsigned PhysReg) const {
2151 unsigned NumConflicts = 0;
2152 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2153 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2154 E = mri_->reg_end(); I != E; ++I) {
2155 MachineOperand &O = I.getOperand();
2156 MachineInstr *MI = O.getParent();
2157 unsigned Index = getInstructionIndex(MI);
2158 if (pli.liveAt(Index))
2161 return NumConflicts;
2164 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2165 /// around all defs and uses of the specified interval.
2166 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2167 unsigned PhysReg, VirtRegMap &vrm) {
2168 unsigned SpillReg = getRepresentativeReg(PhysReg);
2170 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2171 // If there are registers which alias PhysReg, but which are not a
2172 // sub-register of the chosen representative super register. Assert
2173 // since we can't handle it yet.
2174 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2175 tri_->isSuperRegister(*AS, SpillReg));
2177 LiveInterval &pli = getInterval(SpillReg);
2178 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2179 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2180 E = mri_->reg_end(); I != E; ++I) {
2181 MachineOperand &O = I.getOperand();
2182 MachineInstr *MI = O.getParent();
2183 if (SeenMIs.count(MI))
2186 unsigned Index = getInstructionIndex(MI);
2187 if (pli.liveAt(Index)) {
2188 vrm.addEmergencySpill(SpillReg, MI);
2189 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2190 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2191 if (!hasInterval(*AS))
2193 LiveInterval &spli = getInterval(*AS);
2194 if (spli.liveAt(Index))
2195 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2201 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2202 MachineInstr* startInst) {
2203 LiveInterval& Interval = getOrCreateInterval(reg);
2204 VNInfo* VN = Interval.getNextValue(
2205 getInstructionIndex(startInst) + InstrSlots::DEF,
2206 startInst, getVNInfoAllocator());
2207 VN->hasPHIKill = true;
2208 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2209 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2210 getMBBEndIdx(startInst->getParent()) + 1, VN);
2211 Interval.addRange(LR);