1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
263 DOUT << "********** INTERVALS **********\n";
264 for (iterator I = begin(), E = end(); I != E; ++I) {
265 I->second->print(DOUT, tri_);
269 numIntervalsAfter += getNumIntervals();
274 /// print - Implement the dump method.
275 void LiveIntervals::print(std::ostream &O, const Module* ) const {
276 O << "********** INTERVALS **********\n";
277 for (const_iterator I = begin(), E = end(); I != E; ++I) {
278 I->second->print(O, tri_);
282 O << "********** MACHINEINSTRS **********\n";
283 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
284 mbbi != mbbe; ++mbbi) {
285 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
286 for (MachineBasicBlock::iterator mii = mbbi->begin(),
287 mie = mbbi->end(); mii != mie; ++mii) {
288 O << getInstructionIndex(mii) << '\t' << *mii;
293 /// conflictsWithPhysRegDef - Returns true if the specified register
294 /// is defined during the duration of the specified interval.
295 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
296 VirtRegMap &vrm, unsigned reg) {
297 for (LiveInterval::Ranges::const_iterator
298 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
299 for (unsigned index = getBaseIndex(I->start),
300 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
301 index += InstrSlots::NUM) {
302 // skip deleted instructions
303 while (index != end && !getInstructionFromIndex(index))
304 index += InstrSlots::NUM;
305 if (index == end) break;
307 MachineInstr *MI = getInstructionFromIndex(index);
308 unsigned SrcReg, DstReg;
309 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
310 if (SrcReg == li.reg || DstReg == li.reg)
312 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
313 MachineOperand& mop = MI->getOperand(i);
316 unsigned PhysReg = mop.getReg();
317 if (PhysReg == 0 || PhysReg == li.reg)
319 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
320 if (!vrm.hasPhys(PhysReg))
322 PhysReg = vrm.getPhys(PhysReg);
324 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
333 void LiveIntervals::printRegName(unsigned reg) const {
334 if (TargetRegisterInfo::isPhysicalRegister(reg))
335 cerr << tri_->getName(reg);
337 cerr << "%reg" << reg;
340 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
341 MachineBasicBlock::iterator mi,
342 unsigned MIIdx, MachineOperand& MO,
344 LiveInterval &interval) {
345 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
346 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
348 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
349 DOUT << "is a implicit_def\n";
353 // Virtual registers may be defined multiple times (due to phi
354 // elimination and 2-addr elimination). Much of what we do only has to be
355 // done once for the vreg. We use an empty interval to detect the first
356 // time we see a vreg.
357 if (interval.empty()) {
358 // Get the Idx of the defining instructions.
359 unsigned defIndex = getDefIndex(MIIdx);
360 // Earlyclobbers move back one.
361 if (MO.isEarlyClobber())
362 defIndex = getUseIndex(MIIdx);
364 MachineInstr *CopyMI = NULL;
365 unsigned SrcReg, DstReg;
366 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
367 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
368 tii_->isMoveInstr(*mi, SrcReg, DstReg))
370 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
372 assert(ValNo->id == 0 && "First value in interval is not 0?");
374 // Loop over all of the blocks that the vreg is defined in. There are
375 // two cases we have to handle here. The most common case is a vreg
376 // whose lifetime is contained within a basic block. In this case there
377 // will be a single kill, in MBB, which comes after the definition.
378 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
379 // FIXME: what about dead vars?
381 if (vi.Kills[0] != mi)
382 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
384 killIdx = defIndex+1;
386 // If the kill happens after the definition, we have an intra-block
388 if (killIdx > defIndex) {
389 assert(vi.AliveBlocks.none() &&
390 "Shouldn't be alive across any blocks!");
391 LiveRange LR(defIndex, killIdx, ValNo);
392 interval.addRange(LR);
393 DOUT << " +" << LR << "\n";
394 interval.addKill(ValNo, killIdx);
399 // The other case we handle is when a virtual register lives to the end
400 // of the defining block, potentially live across some blocks, then is
401 // live into some number of blocks, but gets killed. Start by adding a
402 // range that goes from this definition to the end of the defining block.
403 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
404 DOUT << " +" << NewLR;
405 interval.addRange(NewLR);
407 // Iterate over all of the blocks that the variable is completely
408 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
410 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
411 if (vi.AliveBlocks[i]) {
412 LiveRange LR(getMBBStartIdx(i),
413 getMBBEndIdx(i)+1, // MBB ends at -1.
415 interval.addRange(LR);
420 // Finally, this virtual register is live from the start of any killing
421 // block to the 'use' slot of the killing instruction.
422 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
423 MachineInstr *Kill = vi.Kills[i];
424 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
425 LiveRange LR(getMBBStartIdx(Kill->getParent()),
427 interval.addRange(LR);
428 interval.addKill(ValNo, killIdx);
433 // If this is the second time we see a virtual register definition, it
434 // must be due to phi elimination or two addr elimination. If this is
435 // the result of two address elimination, then the vreg is one of the
436 // def-and-use register operand.
437 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
438 // If this is a two-address definition, then we have already processed
439 // the live range. The only problem is that we didn't realize there
440 // are actually two values in the live interval. Because of this we
441 // need to take the LiveRegion that defines this register and split it
443 assert(interval.containsOneValue());
444 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
445 unsigned RedefIndex = getDefIndex(MIIdx);
446 // Earlyclobbers move back one.
447 if (MO.isEarlyClobber())
448 RedefIndex = getUseIndex(MIIdx);
450 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
451 VNInfo *OldValNo = OldLR->valno;
453 // Delete the initial value, which should be short and continuous,
454 // because the 2-addr copy must be in the same MBB as the redef.
455 interval.removeRange(DefIndex, RedefIndex);
457 // Two-address vregs should always only be redefined once. This means
458 // that at this point, there should be exactly one value number in it.
459 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
461 // The new value number (#1) is defined by the instruction we claimed
463 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
466 // Value#0 is now defined by the 2-addr instruction.
467 OldValNo->def = RedefIndex;
470 // Add the new live interval which replaces the range for the input copy.
471 LiveRange LR(DefIndex, RedefIndex, ValNo);
472 DOUT << " replace range with " << LR;
473 interval.addRange(LR);
474 interval.addKill(ValNo, RedefIndex);
476 // If this redefinition is dead, we need to add a dummy unit live
477 // range covering the def slot.
479 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
482 interval.print(DOUT, tri_);
485 // Otherwise, this must be because of phi elimination. If this is the
486 // first redefinition of the vreg that we have seen, go back and change
487 // the live range in the PHI block to be a different value number.
488 if (interval.containsOneValue()) {
489 assert(vi.Kills.size() == 1 &&
490 "PHI elimination vreg should have one kill, the PHI itself!");
492 // Remove the old range that we now know has an incorrect number.
493 VNInfo *VNI = interval.getValNumInfo(0);
494 MachineInstr *Killer = vi.Kills[0];
495 unsigned Start = getMBBStartIdx(Killer->getParent());
496 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
497 DOUT << " Removing [" << Start << "," << End << "] from: ";
498 interval.print(DOUT, tri_); DOUT << "\n";
499 interval.removeRange(Start, End);
500 VNI->hasPHIKill = true;
501 DOUT << " RESULT: "; interval.print(DOUT, tri_);
503 // Replace the interval with one of a NEW value number. Note that this
504 // value number isn't actually defined by an instruction, weird huh? :)
505 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
506 DOUT << " replace range with " << LR;
507 interval.addRange(LR);
508 interval.addKill(LR.valno, End);
509 DOUT << " RESULT: "; interval.print(DOUT, tri_);
512 // In the case of PHI elimination, each variable definition is only
513 // live until the end of the block. We've already taken care of the
514 // rest of the live range.
515 unsigned defIndex = getDefIndex(MIIdx);
516 // Earlyclobbers move back one.
517 if (MO.isEarlyClobber())
518 defIndex = getUseIndex(MIIdx);
521 MachineInstr *CopyMI = NULL;
522 unsigned SrcReg, DstReg;
523 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
524 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
525 tii_->isMoveInstr(*mi, SrcReg, DstReg))
527 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
529 unsigned killIndex = getMBBEndIdx(mbb) + 1;
530 LiveRange LR(defIndex, killIndex, ValNo);
531 interval.addRange(LR);
532 interval.addKill(ValNo, killIndex);
533 ValNo->hasPHIKill = true;
541 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
542 MachineBasicBlock::iterator mi,
545 LiveInterval &interval,
546 MachineInstr *CopyMI) {
547 // A physical register cannot be live across basic block, so its
548 // lifetime must end somewhere in its defining basic block.
549 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
551 unsigned baseIndex = MIIdx;
552 unsigned start = getDefIndex(baseIndex);
553 // Earlyclobbers move back one.
554 if (MO.isEarlyClobber())
555 start = getUseIndex(MIIdx);
556 unsigned end = start;
558 // If it is not used after definition, it is considered dead at
559 // the instruction defining it. Hence its interval is:
560 // [defSlot(def), defSlot(def)+1)
567 // If it is not dead on definition, it must be killed by a
568 // subsequent instruction. Hence its interval is:
569 // [defSlot(def), useSlot(kill)+1)
570 baseIndex += InstrSlots::NUM;
571 while (++mi != MBB->end()) {
572 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
573 getInstructionFromIndex(baseIndex) == 0)
574 baseIndex += InstrSlots::NUM;
575 if (mi->killsRegister(interval.reg, tri_)) {
577 end = getUseIndex(baseIndex) + 1;
579 } else if (mi->modifiesRegister(interval.reg, tri_)) {
580 // Another instruction redefines the register before it is ever read.
581 // Then the register is essentially dead at the instruction that defines
582 // it. Hence its interval is:
583 // [defSlot(def), defSlot(def)+1)
589 baseIndex += InstrSlots::NUM;
592 // The only case we should have a dead physreg here without a killing or
593 // instruction where we know it's dead is if it is live-in to the function
595 assert(!CopyMI && "physreg was not killed in defining block!");
599 assert(start < end && "did not find end of interval?");
601 // Already exists? Extend old live interval.
602 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
603 VNInfo *ValNo = (OldLR != interval.end())
604 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
605 LiveRange LR(start, end, ValNo);
606 interval.addRange(LR);
607 interval.addKill(LR.valno, end);
608 DOUT << " +" << LR << '\n';
611 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
612 MachineBasicBlock::iterator MI,
616 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
617 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
618 getOrCreateInterval(MO.getReg()));
619 else if (allocatableRegs_[MO.getReg()]) {
620 MachineInstr *CopyMI = NULL;
621 unsigned SrcReg, DstReg;
622 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
623 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
624 tii_->isMoveInstr(*MI, SrcReg, DstReg))
626 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
627 getOrCreateInterval(MO.getReg()), CopyMI);
628 // Def of a register also defines its sub-registers.
629 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
630 // If MI also modifies the sub-register explicitly, avoid processing it
631 // more than once. Do not pass in TRI here so it checks for exact match.
632 if (!MI->modifiesRegister(*AS))
633 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
634 getOrCreateInterval(*AS), 0);
638 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
640 LiveInterval &interval, bool isAlias) {
641 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
643 // Look for kills, if it reaches a def before it's killed, then it shouldn't
644 // be considered a livein.
645 MachineBasicBlock::iterator mi = MBB->begin();
646 unsigned baseIndex = MIIdx;
647 unsigned start = baseIndex;
648 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
649 getInstructionFromIndex(baseIndex) == 0)
650 baseIndex += InstrSlots::NUM;
651 unsigned end = baseIndex;
653 while (mi != MBB->end()) {
654 if (mi->killsRegister(interval.reg, tri_)) {
656 end = getUseIndex(baseIndex) + 1;
658 } else if (mi->modifiesRegister(interval.reg, tri_)) {
659 // Another instruction redefines the register before it is ever read.
660 // Then the register is essentially dead at the instruction that defines
661 // it. Hence its interval is:
662 // [defSlot(def), defSlot(def)+1)
664 end = getDefIndex(start) + 1;
668 baseIndex += InstrSlots::NUM;
669 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
670 getInstructionFromIndex(baseIndex) == 0)
671 baseIndex += InstrSlots::NUM;
676 // Live-in register might not be used at all.
680 end = getDefIndex(MIIdx) + 1;
682 DOUT << " live through";
687 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
688 interval.addRange(LR);
689 interval.addKill(LR.valno, end);
690 DOUT << " +" << LR << '\n';
693 /// computeIntervals - computes the live intervals for virtual
694 /// registers. for some ordering of the machine instructions [1,N] a
695 /// live interval is an interval [i, j) where 1 <= i <= j < N for
696 /// which a variable is live
697 void LiveIntervals::computeIntervals() {
699 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
700 << "********** Function: "
701 << ((Value*)mf_->getFunction())->getName() << '\n';
703 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
705 MachineBasicBlock *MBB = MBBI;
706 // Track the index of the current machine instr.
707 unsigned MIIndex = getMBBStartIdx(MBB);
708 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
710 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
712 // Create intervals for live-ins to this BB first.
713 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
714 LE = MBB->livein_end(); LI != LE; ++LI) {
715 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
716 // Multiple live-ins can alias the same register.
717 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
718 if (!hasInterval(*AS))
719 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
723 // Skip over empty initial indices.
724 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
725 getInstructionFromIndex(MIIndex) == 0)
726 MIIndex += InstrSlots::NUM;
728 for (; MI != miEnd; ++MI) {
729 DOUT << MIIndex << "\t" << *MI;
732 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
733 MachineOperand &MO = MI->getOperand(i);
734 // handle register defs - build intervals
735 if (MO.isReg() && MO.getReg() && MO.isDef()) {
736 handleRegisterDef(MBB, MI, MIIndex, MO, i);
740 // Skip over the empty slots after each instruction.
741 unsigned Slots = MI->getDesc().getNumDefs();
744 MIIndex += InstrSlots::NUM * Slots;
746 // Skip over empty indices.
747 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
748 getInstructionFromIndex(MIIndex) == 0)
749 MIIndex += InstrSlots::NUM;
754 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
755 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
756 std::vector<IdxMBBPair>::const_iterator I =
757 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
760 while (I != Idx2MBBMap.end()) {
761 if (LR.end <= I->first)
763 MBBs.push_back(I->second);
770 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
771 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
773 return new LiveInterval(reg, Weight);
776 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
777 /// copy field and returns the source register that defines it.
778 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
782 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
783 return VNI->copy->getOperand(1).getReg();
784 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
785 return VNI->copy->getOperand(2).getReg();
786 unsigned SrcReg, DstReg;
787 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
789 assert(0 && "Unrecognized copy instruction!");
793 //===----------------------------------------------------------------------===//
794 // Register allocator hooks.
797 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
798 /// allow one) virtual register operand, then its uses are implicitly using
799 /// the register. Returns the virtual register.
800 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
801 MachineInstr *MI) const {
803 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
804 MachineOperand &MO = MI->getOperand(i);
805 if (!MO.isReg() || !MO.isUse())
807 unsigned Reg = MO.getReg();
808 if (Reg == 0 || Reg == li.reg)
810 // FIXME: For now, only remat MI with at most one register operand.
812 "Can't rematerialize instruction with multiple register operand!");
821 /// isValNoAvailableAt - Return true if the val# of the specified interval
822 /// which reaches the given instruction also reaches the specified use index.
823 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
824 unsigned UseIdx) const {
825 unsigned Index = getInstructionIndex(MI);
826 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
827 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
828 return UI != li.end() && UI->valno == ValNo;
831 /// isReMaterializable - Returns true if the definition MI of the specified
832 /// val# of the specified interval is re-materializable.
833 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
834 const VNInfo *ValNo, MachineInstr *MI,
835 SmallVectorImpl<LiveInterval*> &SpillIs,
840 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
844 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
845 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
846 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
847 // this but remember this is not safe to fold into a two-address
849 // This is a load from fixed stack slot. It can be rematerialized.
852 // If the target-specific rules don't identify an instruction as
853 // being trivially rematerializable, use some target-independent
855 if (!MI->getDesc().isRematerializable() ||
856 !tii_->isTriviallyReMaterializable(MI)) {
857 if (!EnableAggressiveRemat)
860 // If the instruction accesses memory but the memoperands have been lost,
861 // we can't analyze it.
862 const TargetInstrDesc &TID = MI->getDesc();
863 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
866 // Avoid instructions obviously unsafe for remat.
867 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
870 // If the instruction accesses memory and the memory could be non-constant,
871 // assume the instruction is not rematerializable.
872 for (std::list<MachineMemOperand>::const_iterator
873 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
874 const MachineMemOperand &MMO = *I;
875 if (MMO.isVolatile() || MMO.isStore())
877 const Value *V = MMO.getValue();
880 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
881 if (!PSV->isConstant(mf_->getFrameInfo()))
883 } else if (!aa_->pointsToConstantMemory(V))
887 // If any of the registers accessed are non-constant, conservatively assume
888 // the instruction is not rematerializable.
890 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
891 const MachineOperand &MO = MI->getOperand(i);
893 unsigned Reg = MO.getReg();
896 if (TargetRegisterInfo::isPhysicalRegister(Reg))
899 // Only allow one def, and that in the first operand.
900 if (MO.isDef() != (i == 0))
903 // Only allow constant-valued registers.
904 bool IsLiveIn = mri_->isLiveIn(Reg);
905 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
908 // For the def, it should be the only def.
909 if (MO.isDef() && (next(I) != E || IsLiveIn))
913 // Only allow one use other register use, as that's all the
914 // remat mechanisms support currently.
918 else if (Reg != ImpUse)
921 // For uses, there should be only one associate def.
922 if (I != E && (next(I) != E || IsLiveIn))
929 unsigned ImpUse = getReMatImplicitUse(li, MI);
931 const LiveInterval &ImpLi = getInterval(ImpUse);
932 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
933 re = mri_->use_end(); ri != re; ++ri) {
934 MachineInstr *UseMI = &*ri;
935 unsigned UseIdx = getInstructionIndex(UseMI);
936 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
938 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
942 // If a register operand of the re-materialized instruction is going to
943 // be spilled next, then it's not legal to re-materialize this instruction.
944 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
945 if (ImpUse == SpillIs[i]->reg)
951 /// isReMaterializable - Returns true if the definition MI of the specified
952 /// val# of the specified interval is re-materializable.
953 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
954 const VNInfo *ValNo, MachineInstr *MI) {
955 SmallVector<LiveInterval*, 4> Dummy1;
957 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
960 /// isReMaterializable - Returns true if every definition of MI of every
961 /// val# of the specified interval is re-materializable.
962 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
963 SmallVectorImpl<LiveInterval*> &SpillIs,
966 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
968 const VNInfo *VNI = *i;
969 unsigned DefIdx = VNI->def;
971 continue; // Dead val#.
972 // Is the def for the val# rematerializable?
975 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
976 bool DefIsLoad = false;
978 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
985 /// FilterFoldedOps - Filter out two-address use operands. Return
986 /// true if it finds any issue with the operands that ought to prevent
988 static bool FilterFoldedOps(MachineInstr *MI,
989 SmallVector<unsigned, 2> &Ops,
991 SmallVector<unsigned, 2> &FoldOps) {
992 const TargetInstrDesc &TID = MI->getDesc();
995 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
996 unsigned OpIdx = Ops[i];
997 MachineOperand &MO = MI->getOperand(OpIdx);
998 // FIXME: fold subreg use.
1002 MRInfo |= (unsigned)VirtRegMap::isMod;
1004 // Filter out two-address use operand(s).
1005 if (!MO.isImplicit() &&
1006 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1007 MRInfo = VirtRegMap::isModRef;
1010 MRInfo |= (unsigned)VirtRegMap::isRef;
1012 FoldOps.push_back(OpIdx);
1018 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1019 /// slot / to reg or any rematerialized load into ith operand of specified
1020 /// MI. If it is successul, MI is updated with the newly created MI and
1022 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1023 VirtRegMap &vrm, MachineInstr *DefMI,
1025 SmallVector<unsigned, 2> &Ops,
1026 bool isSS, int Slot, unsigned Reg) {
1027 // If it is an implicit def instruction, just delete it.
1028 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1029 RemoveMachineInstrFromMaps(MI);
1030 vrm.RemoveMachineInstrFromMaps(MI);
1031 MI->eraseFromParent();
1036 // Filter the list of operand indexes that are to be folded. Abort if
1037 // any operand will prevent folding.
1038 unsigned MRInfo = 0;
1039 SmallVector<unsigned, 2> FoldOps;
1040 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1043 // The only time it's safe to fold into a two address instruction is when
1044 // it's folding reload and spill from / into a spill stack slot.
1045 if (DefMI && (MRInfo & VirtRegMap::isMod))
1048 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1049 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1051 // Remember this instruction uses the spill slot.
1052 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1054 // Attempt to fold the memory reference into the instruction. If
1055 // we can do this, we don't need to insert spill code.
1056 MachineBasicBlock &MBB = *MI->getParent();
1057 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1058 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1059 vrm.transferSpillPts(MI, fmi);
1060 vrm.transferRestorePts(MI, fmi);
1061 vrm.transferEmergencySpills(MI, fmi);
1063 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1064 mi2iMap_[fmi] = InstrIdx;
1065 MI = MBB.insert(MBB.erase(MI), fmi);
1072 /// canFoldMemoryOperand - Returns true if the specified load / store
1073 /// folding is possible.
1074 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1075 SmallVector<unsigned, 2> &Ops,
1077 // Filter the list of operand indexes that are to be folded. Abort if
1078 // any operand will prevent folding.
1079 unsigned MRInfo = 0;
1080 SmallVector<unsigned, 2> FoldOps;
1081 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1084 // It's only legal to remat for a use, not a def.
1085 if (ReMat && (MRInfo & VirtRegMap::isMod))
1088 return tii_->canFoldMemoryOperand(MI, FoldOps);
1091 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1092 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1093 for (LiveInterval::Ranges::const_iterator
1094 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1095 std::vector<IdxMBBPair>::const_iterator II =
1096 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1097 if (II == Idx2MBBMap.end())
1099 if (I->end > II->first) // crossing a MBB.
1101 MBBs.insert(II->second);
1102 if (MBBs.size() > 1)
1108 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1109 /// interval on to-be re-materialized operands of MI) with new register.
1110 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1111 MachineInstr *MI, unsigned NewVReg,
1113 // There is an implicit use. That means one of the other operand is
1114 // being remat'ed and the remat'ed instruction has li.reg as an
1115 // use operand. Make sure we rewrite that as well.
1116 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1117 MachineOperand &MO = MI->getOperand(i);
1120 unsigned Reg = MO.getReg();
1121 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1123 if (!vrm.isReMaterialized(Reg))
1125 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1126 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1128 UseMO->setReg(NewVReg);
1132 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1133 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1134 bool LiveIntervals::
1135 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1136 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1137 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1138 unsigned Slot, int LdSlot,
1139 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1141 const TargetRegisterClass* rc,
1142 SmallVector<int, 4> &ReMatIds,
1143 const MachineLoopInfo *loopInfo,
1144 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1145 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1146 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1147 MachineBasicBlock *MBB = MI->getParent();
1148 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1149 bool CanFold = false;
1151 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1152 MachineOperand& mop = MI->getOperand(i);
1155 unsigned Reg = mop.getReg();
1156 unsigned RegI = Reg;
1157 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1162 bool TryFold = !DefIsReMat;
1163 bool FoldSS = true; // Default behavior unless it's a remat.
1164 int FoldSlot = Slot;
1166 // If this is the rematerializable definition MI itself and
1167 // all of its uses are rematerialized, simply delete it.
1168 if (MI == ReMatOrigDefMI && CanDelete) {
1169 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1171 RemoveMachineInstrFromMaps(MI);
1172 vrm.RemoveMachineInstrFromMaps(MI);
1173 MI->eraseFromParent();
1177 // If def for this use can't be rematerialized, then try folding.
1178 // If def is rematerializable and it's a load, also try folding.
1179 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1181 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1187 // Scan all of the operands of this instruction rewriting operands
1188 // to use NewVReg instead of li.reg as appropriate. We do this for
1191 // 1. If the instr reads the same spilled vreg multiple times, we
1192 // want to reuse the NewVReg.
1193 // 2. If the instr is a two-addr instruction, we are required to
1194 // keep the src/dst regs pinned.
1196 // Keep track of whether we replace a use and/or def so that we can
1197 // create the spill interval with the appropriate range.
1199 HasUse = mop.isUse();
1200 HasDef = mop.isDef();
1201 SmallVector<unsigned, 2> Ops;
1203 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1204 const MachineOperand &MOj = MI->getOperand(j);
1207 unsigned RegJ = MOj.getReg();
1208 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1212 HasUse |= MOj.isUse();
1213 HasDef |= MOj.isDef();
1217 if (HasUse && !li.liveAt(getUseIndex(index)))
1218 // Must be defined by an implicit def. It should not be spilled. Note,
1219 // this is for correctness reason. e.g.
1220 // 8 %reg1024<def> = IMPLICIT_DEF
1221 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1222 // The live range [12, 14) are not part of the r1024 live interval since
1223 // it's defined by an implicit def. It will not conflicts with live
1224 // interval of r1025. Now suppose both registers are spilled, you can
1225 // easily see a situation where both registers are reloaded before
1226 // the INSERT_SUBREG and both target registers that would overlap.
1229 // Update stack slot spill weight if we are splitting.
1230 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1237 // Do not fold load / store here if we are splitting. We'll find an
1238 // optimal point to insert a load / store later.
1240 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1241 Ops, FoldSS, FoldSlot, Reg)) {
1242 // Folding the load/store can completely change the instruction in
1243 // unpredictable ways, rescan it from the beginning.
1247 if (isRemoved(MI)) {
1251 goto RestartInstruction;
1254 // We'll try to fold it later if it's profitable.
1255 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1259 // Create a new virtual register for the spill interval.
1260 bool CreatedNewVReg = false;
1262 NewVReg = mri_->createVirtualRegister(rc);
1264 CreatedNewVReg = true;
1266 mop.setReg(NewVReg);
1267 if (mop.isImplicit())
1268 rewriteImplicitOps(li, MI, NewVReg, vrm);
1270 // Reuse NewVReg for other reads.
1271 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1272 MachineOperand &mopj = MI->getOperand(Ops[j]);
1273 mopj.setReg(NewVReg);
1274 if (mopj.isImplicit())
1275 rewriteImplicitOps(li, MI, NewVReg, vrm);
1278 if (CreatedNewVReg) {
1280 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1281 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1282 // Each valnum may have its own remat id.
1283 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1285 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1287 if (!CanDelete || (HasUse && HasDef)) {
1288 // If this is a two-addr instruction then its use operands are
1289 // rematerializable but its def is not. It should be assigned a
1291 vrm.assignVirt2StackSlot(NewVReg, Slot);
1294 vrm.assignVirt2StackSlot(NewVReg, Slot);
1296 } else if (HasUse && HasDef &&
1297 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1298 // If this interval hasn't been assigned a stack slot (because earlier
1299 // def is a deleted remat def), do it now.
1300 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1301 vrm.assignVirt2StackSlot(NewVReg, Slot);
1304 // Re-matting an instruction with virtual register use. Add the
1305 // register as an implicit use on the use MI.
1306 if (DefIsReMat && ImpUse)
1307 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1309 // create a new register interval for this spill / remat.
1310 LiveInterval &nI = getOrCreateInterval(NewVReg);
1311 if (CreatedNewVReg) {
1312 NewLIs.push_back(&nI);
1313 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1315 vrm.setIsSplitFromReg(NewVReg, li.reg);
1319 if (CreatedNewVReg) {
1320 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1321 nI.getNextValue(~0U, 0, VNInfoAllocator));
1325 // Extend the split live interval to this def / use.
1326 unsigned End = getUseIndex(index)+1;
1327 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1328 nI.getValNumInfo(nI.getNumValNums()-1));
1334 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1335 nI.getNextValue(~0U, 0, VNInfoAllocator));
1340 DOUT << "\t\t\t\tAdded new interval: ";
1341 nI.print(DOUT, tri_);
1346 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1348 MachineBasicBlock *MBB, unsigned Idx) const {
1349 unsigned End = getMBBEndIdx(MBB);
1350 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1351 unsigned KillIdx = VNI->kills[j];
1352 if (KillIdx > Idx && KillIdx < End)
1358 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1359 /// during spilling.
1361 struct RewriteInfo {
1366 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1367 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1370 struct RewriteInfoCompare {
1371 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1372 return LHS.Index < RHS.Index;
1377 void LiveIntervals::
1378 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1379 LiveInterval::Ranges::const_iterator &I,
1380 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1381 unsigned Slot, int LdSlot,
1382 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1384 const TargetRegisterClass* rc,
1385 SmallVector<int, 4> &ReMatIds,
1386 const MachineLoopInfo *loopInfo,
1387 BitVector &SpillMBBs,
1388 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1389 BitVector &RestoreMBBs,
1390 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1391 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1392 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1393 bool AllCanFold = true;
1394 unsigned NewVReg = 0;
1395 unsigned start = getBaseIndex(I->start);
1396 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1398 // First collect all the def / use in this live range that will be rewritten.
1399 // Make sure they are sorted according to instruction index.
1400 std::vector<RewriteInfo> RewriteMIs;
1401 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1402 re = mri_->reg_end(); ri != re; ) {
1403 MachineInstr *MI = &*ri;
1404 MachineOperand &O = ri.getOperand();
1406 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1407 unsigned index = getInstructionIndex(MI);
1408 if (index < start || index >= end)
1410 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1411 // Must be defined by an implicit def. It should not be spilled. Note,
1412 // this is for correctness reason. e.g.
1413 // 8 %reg1024<def> = IMPLICIT_DEF
1414 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1415 // The live range [12, 14) are not part of the r1024 live interval since
1416 // it's defined by an implicit def. It will not conflicts with live
1417 // interval of r1025. Now suppose both registers are spilled, you can
1418 // easily see a situation where both registers are reloaded before
1419 // the INSERT_SUBREG and both target registers that would overlap.
1421 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1423 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1425 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1426 // Now rewrite the defs and uses.
1427 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1428 RewriteInfo &rwi = RewriteMIs[i];
1430 unsigned index = rwi.Index;
1431 bool MIHasUse = rwi.HasUse;
1432 bool MIHasDef = rwi.HasDef;
1433 MachineInstr *MI = rwi.MI;
1434 // If MI def and/or use the same register multiple times, then there
1435 // are multiple entries.
1436 unsigned NumUses = MIHasUse;
1437 while (i != e && RewriteMIs[i].MI == MI) {
1438 assert(RewriteMIs[i].Index == index);
1439 bool isUse = RewriteMIs[i].HasUse;
1440 if (isUse) ++NumUses;
1442 MIHasDef |= RewriteMIs[i].HasDef;
1445 MachineBasicBlock *MBB = MI->getParent();
1447 if (ImpUse && MI != ReMatDefMI) {
1448 // Re-matting an instruction with virtual register use. Update the
1449 // register interval's spill weight to HUGE_VALF to prevent it from
1451 LiveInterval &ImpLi = getInterval(ImpUse);
1452 ImpLi.weight = HUGE_VALF;
1455 unsigned MBBId = MBB->getNumber();
1456 unsigned ThisVReg = 0;
1458 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1459 if (NVI != MBBVRegsMap.end()) {
1460 ThisVReg = NVI->second;
1467 // It's better to start a new interval to avoid artifically
1468 // extend the new interval.
1469 if (MIHasDef && !MIHasUse) {
1470 MBBVRegsMap.erase(MBB->getNumber());
1476 bool IsNew = ThisVReg == 0;
1478 // This ends the previous live interval. If all of its def / use
1479 // can be folded, give it a low spill weight.
1480 if (NewVReg && TrySplit && AllCanFold) {
1481 LiveInterval &nI = getOrCreateInterval(NewVReg);
1488 bool HasDef = false;
1489 bool HasUse = false;
1490 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1491 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1492 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1493 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1494 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1495 if (!HasDef && !HasUse)
1498 AllCanFold &= CanFold;
1500 // Update weight of spill interval.
1501 LiveInterval &nI = getOrCreateInterval(NewVReg);
1503 // The spill weight is now infinity as it cannot be spilled again.
1504 nI.weight = HUGE_VALF;
1508 // Keep track of the last def and first use in each MBB.
1510 if (MI != ReMatOrigDefMI || !CanDelete) {
1511 bool HasKill = false;
1513 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1515 // If this is a two-address code, then this index starts a new VNInfo.
1516 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1518 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1520 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1521 SpillIdxes.find(MBBId);
1523 if (SII == SpillIdxes.end()) {
1524 std::vector<SRInfo> S;
1525 S.push_back(SRInfo(index, NewVReg, true));
1526 SpillIdxes.insert(std::make_pair(MBBId, S));
1527 } else if (SII->second.back().vreg != NewVReg) {
1528 SII->second.push_back(SRInfo(index, NewVReg, true));
1529 } else if ((int)index > SII->second.back().index) {
1530 // If there is an earlier def and this is a two-address
1531 // instruction, then it's not possible to fold the store (which
1532 // would also fold the load).
1533 SRInfo &Info = SII->second.back();
1535 Info.canFold = !HasUse;
1537 SpillMBBs.set(MBBId);
1538 } else if (SII != SpillIdxes.end() &&
1539 SII->second.back().vreg == NewVReg &&
1540 (int)index > SII->second.back().index) {
1541 // There is an earlier def that's not killed (must be two-address).
1542 // The spill is no longer needed.
1543 SII->second.pop_back();
1544 if (SII->second.empty()) {
1545 SpillIdxes.erase(MBBId);
1546 SpillMBBs.reset(MBBId);
1553 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1554 SpillIdxes.find(MBBId);
1555 if (SII != SpillIdxes.end() &&
1556 SII->second.back().vreg == NewVReg &&
1557 (int)index > SII->second.back().index)
1558 // Use(s) following the last def, it's not safe to fold the spill.
1559 SII->second.back().canFold = false;
1560 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1561 RestoreIdxes.find(MBBId);
1562 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1563 // If we are splitting live intervals, only fold if it's the first
1564 // use and there isn't another use later in the MBB.
1565 RII->second.back().canFold = false;
1567 // Only need a reload if there isn't an earlier def / use.
1568 if (RII == RestoreIdxes.end()) {
1569 std::vector<SRInfo> Infos;
1570 Infos.push_back(SRInfo(index, NewVReg, true));
1571 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1573 RII->second.push_back(SRInfo(index, NewVReg, true));
1575 RestoreMBBs.set(MBBId);
1579 // Update spill weight.
1580 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1581 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1584 if (NewVReg && TrySplit && AllCanFold) {
1585 // If all of its def / use can be folded, give it a low spill weight.
1586 LiveInterval &nI = getOrCreateInterval(NewVReg);
1591 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1592 BitVector &RestoreMBBs,
1593 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1594 if (!RestoreMBBs[Id])
1596 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1597 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1598 if (Restores[i].index == index &&
1599 Restores[i].vreg == vr &&
1600 Restores[i].canFold)
1605 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1606 BitVector &RestoreMBBs,
1607 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1608 if (!RestoreMBBs[Id])
1610 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1611 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1612 if (Restores[i].index == index && Restores[i].vreg)
1613 Restores[i].index = -1;
1616 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1617 /// spilled and create empty intervals for their uses.
1619 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1620 const TargetRegisterClass* rc,
1621 std::vector<LiveInterval*> &NewLIs) {
1622 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1623 re = mri_->reg_end(); ri != re; ) {
1624 MachineOperand &O = ri.getOperand();
1625 MachineInstr *MI = &*ri;
1628 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1629 "Register def was not rewritten?");
1630 RemoveMachineInstrFromMaps(MI);
1631 vrm.RemoveMachineInstrFromMaps(MI);
1632 MI->eraseFromParent();
1634 // This must be an use of an implicit_def so it's not part of the live
1635 // interval. Create a new empty live interval for it.
1636 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1637 unsigned NewVReg = mri_->createVirtualRegister(rc);
1639 vrm.setIsImplicitlyDefined(NewVReg);
1640 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1641 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1642 MachineOperand &MO = MI->getOperand(i);
1643 if (MO.isReg() && MO.getReg() == li.reg)
1652 bool operator()(LiveInterval* A, LiveInterval* B) {
1653 return A->beginNumber() < B->beginNumber();
1658 std::vector<LiveInterval*> LiveIntervals::
1659 addIntervalsForSpillsFast(const LiveInterval &li,
1660 const MachineLoopInfo *loopInfo,
1661 VirtRegMap &vrm, float& SSWeight) {
1662 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1664 std::vector<LiveInterval*> added;
1666 assert(li.weight != HUGE_VALF &&
1667 "attempt to spill already spilled interval!");
1669 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1673 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1677 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1678 while (RI != mri_->reg_end()) {
1679 MachineInstr* MI = &*RI;
1681 SmallVector<unsigned, 2> Indices;
1682 bool HasUse = false;
1683 bool HasDef = false;
1685 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1686 MachineOperand& mop = MI->getOperand(i);
1687 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1689 HasUse |= MI->getOperand(i).isUse();
1690 HasDef |= MI->getOperand(i).isDef();
1692 Indices.push_back(i);
1695 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1696 Indices, true, slot, li.reg)) {
1697 unsigned NewVReg = mri_->createVirtualRegister(rc);
1699 vrm.assignVirt2StackSlot(NewVReg, slot);
1701 // create a new register for this spill
1702 LiveInterval &nI = getOrCreateInterval(NewVReg);
1704 // the spill weight is now infinity as it
1705 // cannot be spilled again
1706 nI.weight = HUGE_VALF;
1708 // Rewrite register operands to use the new vreg.
1709 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1710 E = Indices.end(); I != E; ++I) {
1711 MI->getOperand(*I).setReg(NewVReg);
1713 if (MI->getOperand(*I).isUse())
1714 MI->getOperand(*I).setIsKill(true);
1717 // Fill in the new live interval.
1718 unsigned index = getInstructionIndex(MI);
1720 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1721 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1724 vrm.addRestorePoint(NewVReg, MI);
1727 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1728 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1731 vrm.addSpillPoint(NewVReg, true, MI);
1734 added.push_back(&nI);
1736 DOUT << "\t\t\t\tadded new interval: ";
1740 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1743 SSWeight += getSpillWeight(true, true, loopDepth);
1745 SSWeight += getSpillWeight(false, true, loopDepth);
1747 SSWeight += getSpillWeight(true, false, loopDepth);
1751 RI = mri_->reg_begin(li.reg);
1754 // Clients expect the new intervals to be returned in sorted order.
1755 std::sort(added.begin(), added.end(), LISorter());
1760 std::vector<LiveInterval*> LiveIntervals::
1761 addIntervalsForSpills(const LiveInterval &li,
1762 SmallVectorImpl<LiveInterval*> &SpillIs,
1763 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1766 if (EnableFastSpilling)
1767 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1769 assert(li.weight != HUGE_VALF &&
1770 "attempt to spill already spilled interval!");
1772 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1773 li.print(DOUT, tri_);
1776 // Spill slot weight.
1779 // Each bit specify whether it a spill is required in the MBB.
1780 BitVector SpillMBBs(mf_->getNumBlockIDs());
1781 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1782 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1783 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1784 DenseMap<unsigned,unsigned> MBBVRegsMap;
1785 std::vector<LiveInterval*> NewLIs;
1786 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1788 unsigned NumValNums = li.getNumValNums();
1789 SmallVector<MachineInstr*, 4> ReMatDefs;
1790 ReMatDefs.resize(NumValNums, NULL);
1791 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1792 ReMatOrigDefs.resize(NumValNums, NULL);
1793 SmallVector<int, 4> ReMatIds;
1794 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1795 BitVector ReMatDelete(NumValNums);
1796 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1798 // Spilling a split live interval. It cannot be split any further. Also,
1799 // it's also guaranteed to be a single val# / range interval.
1800 if (vrm.getPreSplitReg(li.reg)) {
1801 vrm.setIsSplitFromReg(li.reg, 0);
1802 // Unset the split kill marker on the last use.
1803 unsigned KillIdx = vrm.getKillPoint(li.reg);
1805 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1806 assert(KillMI && "Last use disappeared?");
1807 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1808 assert(KillOp != -1 && "Last use disappeared?");
1809 KillMI->getOperand(KillOp).setIsKill(false);
1811 vrm.removeKillPoint(li.reg);
1812 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1813 Slot = vrm.getStackSlot(li.reg);
1814 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1815 MachineInstr *ReMatDefMI = DefIsReMat ?
1816 vrm.getReMaterializedMI(li.reg) : NULL;
1818 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1819 bool isLoad = isLoadSS ||
1820 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1821 bool IsFirstRange = true;
1822 for (LiveInterval::Ranges::const_iterator
1823 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1824 // If this is a split live interval with multiple ranges, it means there
1825 // are two-address instructions that re-defined the value. Only the
1826 // first def can be rematerialized!
1828 // Note ReMatOrigDefMI has already been deleted.
1829 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1830 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1831 false, vrm, rc, ReMatIds, loopInfo,
1832 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1833 MBBVRegsMap, NewLIs, SSWeight);
1835 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1836 Slot, 0, false, false, false,
1837 false, vrm, rc, ReMatIds, loopInfo,
1838 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1839 MBBVRegsMap, NewLIs, SSWeight);
1841 IsFirstRange = false;
1844 SSWeight = 0.0f; // Already accounted for when split.
1845 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1849 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1850 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1854 bool NeedStackSlot = false;
1855 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1857 const VNInfo *VNI = *i;
1858 unsigned VN = VNI->id;
1859 unsigned DefIdx = VNI->def;
1861 continue; // Dead val#.
1862 // Is the def for the val# rematerializable?
1863 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1864 ? 0 : getInstructionFromIndex(DefIdx);
1866 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1867 // Remember how to remat the def of this val#.
1868 ReMatOrigDefs[VN] = ReMatDefMI;
1869 // Original def may be modified so we have to make a copy here.
1870 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1871 ClonedMIs.push_back(Clone);
1872 ReMatDefs[VN] = Clone;
1874 bool CanDelete = true;
1875 if (VNI->hasPHIKill) {
1876 // A kill is a phi node, not all of its uses can be rematerialized.
1877 // It must not be deleted.
1879 // Need a stack slot if there is any live range where uses cannot be
1881 NeedStackSlot = true;
1884 ReMatDelete.set(VN);
1886 // Need a stack slot if there is any live range where uses cannot be
1888 NeedStackSlot = true;
1892 // One stack slot per live interval.
1893 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1894 Slot = vrm.assignVirt2StackSlot(li.reg);
1896 // Create new intervals and rewrite defs and uses.
1897 for (LiveInterval::Ranges::const_iterator
1898 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1899 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1900 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1901 bool DefIsReMat = ReMatDefMI != NULL;
1902 bool CanDelete = ReMatDelete[I->valno->id];
1904 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1905 bool isLoad = isLoadSS ||
1906 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1907 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1908 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1909 CanDelete, vrm, rc, ReMatIds, loopInfo,
1910 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1911 MBBVRegsMap, NewLIs, SSWeight);
1914 // Insert spills / restores if we are splitting.
1916 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1920 SmallPtrSet<LiveInterval*, 4> AddedKill;
1921 SmallVector<unsigned, 2> Ops;
1922 if (NeedStackSlot) {
1923 int Id = SpillMBBs.find_first();
1925 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1926 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1927 std::vector<SRInfo> &spills = SpillIdxes[Id];
1928 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1929 int index = spills[i].index;
1930 unsigned VReg = spills[i].vreg;
1931 LiveInterval &nI = getOrCreateInterval(VReg);
1932 bool isReMat = vrm.isReMaterialized(VReg);
1933 MachineInstr *MI = getInstructionFromIndex(index);
1934 bool CanFold = false;
1935 bool FoundUse = false;
1937 if (spills[i].canFold) {
1939 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1940 MachineOperand &MO = MI->getOperand(j);
1941 if (!MO.isReg() || MO.getReg() != VReg)
1948 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1949 RestoreMBBs, RestoreIdxes))) {
1950 // MI has two-address uses of the same register. If the use
1951 // isn't the first and only use in the BB, then we can't fold
1952 // it. FIXME: Move this to rewriteInstructionsForSpills.
1959 // Fold the store into the def if possible.
1960 bool Folded = false;
1961 if (CanFold && !Ops.empty()) {
1962 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1965 // Also folded uses, do not issue a load.
1966 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1967 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1969 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1973 // Otherwise tell the spiller to issue a spill.
1975 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1976 bool isKill = LR->end == getStoreIndex(index);
1977 if (!MI->registerDefIsDead(nI.reg))
1978 // No need to spill a dead def.
1979 vrm.addSpillPoint(VReg, isKill, MI);
1981 AddedKill.insert(&nI);
1984 // Update spill slot weight.
1986 SSWeight += getSpillWeight(true, false, loopDepth);
1988 Id = SpillMBBs.find_next(Id);
1992 int Id = RestoreMBBs.find_first();
1994 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1995 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1997 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1998 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1999 int index = restores[i].index;
2002 unsigned VReg = restores[i].vreg;
2003 LiveInterval &nI = getOrCreateInterval(VReg);
2004 bool isReMat = vrm.isReMaterialized(VReg);
2005 MachineInstr *MI = getInstructionFromIndex(index);
2006 bool CanFold = false;
2008 if (restores[i].canFold) {
2010 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2011 MachineOperand &MO = MI->getOperand(j);
2012 if (!MO.isReg() || MO.getReg() != VReg)
2016 // If this restore were to be folded, it would have been folded
2025 // Fold the load into the use if possible.
2026 bool Folded = false;
2027 if (CanFold && !Ops.empty()) {
2029 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2031 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2033 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2034 // If the rematerializable def is a load, also try to fold it.
2035 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2036 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2037 Ops, isLoadSS, LdSlot, VReg);
2038 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2040 // Re-matting an instruction with virtual register use. Add the
2041 // register as an implicit use on the use MI and update the register
2042 // interval's spill weight to HUGE_VALF to prevent it from being
2044 LiveInterval &ImpLi = getInterval(ImpUse);
2045 ImpLi.weight = HUGE_VALF;
2046 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2050 // If folding is not possible / failed, then tell the spiller to issue a
2051 // load / rematerialization for us.
2053 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2055 vrm.addRestorePoint(VReg, MI);
2057 // Update spill slot weight.
2059 SSWeight += getSpillWeight(false, true, loopDepth);
2061 Id = RestoreMBBs.find_next(Id);
2064 // Finalize intervals: add kills, finalize spill weights, and filter out
2066 std::vector<LiveInterval*> RetNewLIs;
2067 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2068 LiveInterval *LI = NewLIs[i];
2070 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2071 if (!AddedKill.count(LI)) {
2072 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2073 unsigned LastUseIdx = getBaseIndex(LR->end);
2074 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2075 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2076 assert(UseIdx != -1);
2077 if (LastUse->getOperand(UseIdx).isImplicit() ||
2078 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2079 LastUse->getOperand(UseIdx).setIsKill();
2080 vrm.addKillPoint(LI->reg, LastUseIdx);
2083 RetNewLIs.push_back(LI);
2087 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2091 /// hasAllocatableSuperReg - Return true if the specified physical register has
2092 /// any super register that's allocatable.
2093 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2094 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2095 if (allocatableRegs_[*AS] && hasInterval(*AS))
2100 /// getRepresentativeReg - Find the largest super register of the specified
2101 /// physical register.
2102 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2103 // Find the largest super-register that is allocatable.
2104 unsigned BestReg = Reg;
2105 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2106 unsigned SuperReg = *AS;
2107 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2115 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2116 /// specified interval that conflicts with the specified physical register.
2117 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2118 unsigned PhysReg) const {
2119 unsigned NumConflicts = 0;
2120 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2121 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2122 E = mri_->reg_end(); I != E; ++I) {
2123 MachineOperand &O = I.getOperand();
2124 MachineInstr *MI = O.getParent();
2125 unsigned Index = getInstructionIndex(MI);
2126 if (pli.liveAt(Index))
2129 return NumConflicts;
2132 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2133 /// around all defs and uses of the specified interval.
2134 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2135 unsigned PhysReg, VirtRegMap &vrm) {
2136 unsigned SpillReg = getRepresentativeReg(PhysReg);
2138 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2139 // If there are registers which alias PhysReg, but which are not a
2140 // sub-register of the chosen representative super register. Assert
2141 // since we can't handle it yet.
2142 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2143 tri_->isSuperRegister(*AS, SpillReg));
2145 LiveInterval &pli = getInterval(SpillReg);
2146 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2147 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2148 E = mri_->reg_end(); I != E; ++I) {
2149 MachineOperand &O = I.getOperand();
2150 MachineInstr *MI = O.getParent();
2151 if (SeenMIs.count(MI))
2154 unsigned Index = getInstructionIndex(MI);
2155 if (pli.liveAt(Index)) {
2156 vrm.addEmergencySpill(SpillReg, MI);
2157 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2158 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2159 if (!hasInterval(*AS))
2161 LiveInterval &spli = getInterval(*AS);
2162 if (spli.liveAt(Index))
2163 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2169 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2170 MachineInstr* startInst) {
2171 LiveInterval& Interval = getOrCreateInterval(reg);
2172 VNInfo* VN = Interval.getNextValue(
2173 getInstructionIndex(startInst) + InstrSlots::DEF,
2174 startInst, getVNInfoAllocator());
2175 VN->hasPHIKill = true;
2176 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2177 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2178 getMBBEndIdx(startInst->getParent()) + 1, VN);
2179 Interval.addRange(LR);