1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
263 DOUT << "********** INTERVALS **********\n";
264 for (iterator I = begin(), E = end(); I != E; ++I) {
265 I->second->print(DOUT, tri_);
269 numIntervalsAfter += getNumIntervals();
274 /// print - Implement the dump method.
275 void LiveIntervals::print(std::ostream &O, const Module* ) const {
276 O << "********** INTERVALS **********\n";
277 for (const_iterator I = begin(), E = end(); I != E; ++I) {
278 I->second->print(O, tri_);
282 O << "********** MACHINEINSTRS **********\n";
283 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
284 mbbi != mbbe; ++mbbi) {
285 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
286 for (MachineBasicBlock::iterator mii = mbbi->begin(),
287 mie = mbbi->end(); mii != mie; ++mii) {
288 O << getInstructionIndex(mii) << '\t' << *mii;
293 /// conflictsWithPhysRegDef - Returns true if the specified register
294 /// is defined during the duration of the specified interval.
295 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
296 VirtRegMap &vrm, unsigned reg) {
297 for (LiveInterval::Ranges::const_iterator
298 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
299 for (unsigned index = getBaseIndex(I->start),
300 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
301 index += InstrSlots::NUM) {
302 // skip deleted instructions
303 while (index != end && !getInstructionFromIndex(index))
304 index += InstrSlots::NUM;
305 if (index == end) break;
307 MachineInstr *MI = getInstructionFromIndex(index);
308 unsigned SrcReg, DstReg;
309 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
310 if (SrcReg == li.reg || DstReg == li.reg)
312 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
313 MachineOperand& mop = MI->getOperand(i);
316 unsigned PhysReg = mop.getReg();
317 if (PhysReg == 0 || PhysReg == li.reg)
319 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
320 if (!vrm.hasPhys(PhysReg))
322 PhysReg = vrm.getPhys(PhysReg);
324 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
333 void LiveIntervals::printRegName(unsigned reg) const {
334 if (TargetRegisterInfo::isPhysicalRegister(reg))
335 cerr << tri_->getName(reg);
337 cerr << "%reg" << reg;
340 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
341 MachineBasicBlock::iterator mi,
342 unsigned MIIdx, MachineOperand& MO,
344 LiveInterval &interval) {
345 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
346 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
348 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
349 DOUT << "is a implicit_def\n";
353 // Virtual registers may be defined multiple times (due to phi
354 // elimination and 2-addr elimination). Much of what we do only has to be
355 // done once for the vreg. We use an empty interval to detect the first
356 // time we see a vreg.
357 if (interval.empty()) {
358 // Get the Idx of the defining instructions.
359 unsigned defIndex = getDefIndex(MIIdx);
360 // Earlyclobbers move back one.
361 if (MO.isEarlyClobber())
362 defIndex = getUseIndex(MIIdx);
364 MachineInstr *CopyMI = NULL;
365 unsigned SrcReg, DstReg;
366 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
367 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
368 tii_->isMoveInstr(*mi, SrcReg, DstReg))
370 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
372 assert(ValNo->id == 0 && "First value in interval is not 0?");
374 // Loop over all of the blocks that the vreg is defined in. There are
375 // two cases we have to handle here. The most common case is a vreg
376 // whose lifetime is contained within a basic block. In this case there
377 // will be a single kill, in MBB, which comes after the definition.
378 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
379 // FIXME: what about dead vars?
381 if (vi.Kills[0] != mi)
382 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
384 killIdx = defIndex+1;
386 // If the kill happens after the definition, we have an intra-block
388 if (killIdx > defIndex) {
389 assert(vi.AliveBlocks.none() &&
390 "Shouldn't be alive across any blocks!");
391 LiveRange LR(defIndex, killIdx, ValNo);
392 interval.addRange(LR);
393 DOUT << " +" << LR << "\n";
394 interval.addKill(ValNo, killIdx);
399 // The other case we handle is when a virtual register lives to the end
400 // of the defining block, potentially live across some blocks, then is
401 // live into some number of blocks, but gets killed. Start by adding a
402 // range that goes from this definition to the end of the defining block.
403 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
404 DOUT << " +" << NewLR;
405 interval.addRange(NewLR);
407 // Iterate over all of the blocks that the variable is completely
408 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
410 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
411 if (vi.AliveBlocks[i]) {
412 LiveRange LR(getMBBStartIdx(i),
413 getMBBEndIdx(i)+1, // MBB ends at -1.
415 interval.addRange(LR);
420 // Finally, this virtual register is live from the start of any killing
421 // block to the 'use' slot of the killing instruction.
422 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
423 MachineInstr *Kill = vi.Kills[i];
424 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
425 LiveRange LR(getMBBStartIdx(Kill->getParent()),
427 interval.addRange(LR);
428 interval.addKill(ValNo, killIdx);
433 // If this is the second time we see a virtual register definition, it
434 // must be due to phi elimination or two addr elimination. If this is
435 // the result of two address elimination, then the vreg is one of the
436 // def-and-use register operand.
437 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
438 // If this is a two-address definition, then we have already processed
439 // the live range. The only problem is that we didn't realize there
440 // are actually two values in the live interval. Because of this we
441 // need to take the LiveRegion that defines this register and split it
443 assert(interval.containsOneValue());
444 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
445 unsigned RedefIndex = getDefIndex(MIIdx);
446 // Earlyclobbers move back one.
447 if (MO.isEarlyClobber())
448 RedefIndex = getUseIndex(MIIdx);
450 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
451 VNInfo *OldValNo = OldLR->valno;
453 // Delete the initial value, which should be short and continuous,
454 // because the 2-addr copy must be in the same MBB as the redef.
455 interval.removeRange(DefIndex, RedefIndex);
457 // Two-address vregs should always only be redefined once. This means
458 // that at this point, there should be exactly one value number in it.
459 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
461 // The new value number (#1) is defined by the instruction we claimed
463 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
466 // Value#0 is now defined by the 2-addr instruction.
467 OldValNo->def = RedefIndex;
470 // Add the new live interval which replaces the range for the input copy.
471 LiveRange LR(DefIndex, RedefIndex, ValNo);
472 DOUT << " replace range with " << LR;
473 interval.addRange(LR);
474 interval.addKill(ValNo, RedefIndex);
476 // If this redefinition is dead, we need to add a dummy unit live
477 // range covering the def slot.
479 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
482 interval.print(DOUT, tri_);
485 // Otherwise, this must be because of phi elimination. If this is the
486 // first redefinition of the vreg that we have seen, go back and change
487 // the live range in the PHI block to be a different value number.
488 if (interval.containsOneValue()) {
489 assert(vi.Kills.size() == 1 &&
490 "PHI elimination vreg should have one kill, the PHI itself!");
492 // Remove the old range that we now know has an incorrect number.
493 VNInfo *VNI = interval.getValNumInfo(0);
494 MachineInstr *Killer = vi.Kills[0];
495 unsigned Start = getMBBStartIdx(Killer->getParent());
496 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
497 DOUT << " Removing [" << Start << "," << End << "] from: ";
498 interval.print(DOUT, tri_); DOUT << "\n";
499 interval.removeRange(Start, End);
500 VNI->hasPHIKill = true;
501 DOUT << " RESULT: "; interval.print(DOUT, tri_);
503 // Replace the interval with one of a NEW value number. Note that this
504 // value number isn't actually defined by an instruction, weird huh? :)
505 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
506 DOUT << " replace range with " << LR;
507 interval.addRange(LR);
508 interval.addKill(LR.valno, End);
509 DOUT << " RESULT: "; interval.print(DOUT, tri_);
512 // In the case of PHI elimination, each variable definition is only
513 // live until the end of the block. We've already taken care of the
514 // rest of the live range.
515 unsigned defIndex = getDefIndex(MIIdx);
516 // Earlyclobbers move back one.
517 if (MO.isEarlyClobber())
518 defIndex = getUseIndex(MIIdx);
521 MachineInstr *CopyMI = NULL;
522 unsigned SrcReg, DstReg;
523 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
524 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
525 tii_->isMoveInstr(*mi, SrcReg, DstReg))
527 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
529 unsigned killIndex = getMBBEndIdx(mbb) + 1;
530 LiveRange LR(defIndex, killIndex, ValNo);
531 interval.addRange(LR);
532 interval.addKill(ValNo, killIndex);
533 ValNo->hasPHIKill = true;
541 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
542 MachineBasicBlock::iterator mi,
545 LiveInterval &interval,
546 MachineInstr *CopyMI) {
547 // A physical register cannot be live across basic block, so its
548 // lifetime must end somewhere in its defining basic block.
549 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
551 unsigned baseIndex = MIIdx;
552 unsigned start = getDefIndex(baseIndex);
553 // Earlyclobbers move back one.
554 if (MO.isEarlyClobber())
555 start = getUseIndex(MIIdx);
556 unsigned end = start;
558 // If it is not used after definition, it is considered dead at
559 // the instruction defining it. Hence its interval is:
560 // [defSlot(def), defSlot(def)+1)
567 // If it is not dead on definition, it must be killed by a
568 // subsequent instruction. Hence its interval is:
569 // [defSlot(def), useSlot(kill)+1)
570 baseIndex += InstrSlots::NUM;
571 while (++mi != MBB->end()) {
572 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
573 getInstructionFromIndex(baseIndex) == 0)
574 baseIndex += InstrSlots::NUM;
575 if (mi->killsRegister(interval.reg, tri_)) {
577 end = getUseIndex(baseIndex) + 1;
579 } else if (mi->modifiesRegister(interval.reg, tri_)) {
580 // Another instruction redefines the register before it is ever read.
581 // Then the register is essentially dead at the instruction that defines
582 // it. Hence its interval is:
583 // [defSlot(def), defSlot(def)+1)
589 baseIndex += InstrSlots::NUM;
592 // The only case we should have a dead physreg here without a killing or
593 // instruction where we know it's dead is if it is live-in to the function
595 assert(!CopyMI && "physreg was not killed in defining block!");
599 assert(start < end && "did not find end of interval?");
601 // Already exists? Extend old live interval.
602 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
603 VNInfo *ValNo = (OldLR != interval.end())
604 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
605 LiveRange LR(start, end, ValNo);
606 interval.addRange(LR);
607 interval.addKill(LR.valno, end);
608 DOUT << " +" << LR << '\n';
611 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
612 MachineBasicBlock::iterator MI,
616 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
617 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
618 getOrCreateInterval(MO.getReg()));
619 else if (allocatableRegs_[MO.getReg()]) {
620 MachineInstr *CopyMI = NULL;
621 unsigned SrcReg, DstReg;
622 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
623 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
624 tii_->isMoveInstr(*MI, SrcReg, DstReg))
626 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
627 getOrCreateInterval(MO.getReg()), CopyMI);
628 // Def of a register also defines its sub-registers.
629 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
630 // If MI also modifies the sub-register explicitly, avoid processing it
631 // more than once. Do not pass in TRI here so it checks for exact match.
632 if (!MI->modifiesRegister(*AS))
633 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
634 getOrCreateInterval(*AS), 0);
638 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
640 LiveInterval &interval, bool isAlias) {
641 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
643 // Look for kills, if it reaches a def before it's killed, then it shouldn't
644 // be considered a livein.
645 MachineBasicBlock::iterator mi = MBB->begin();
646 unsigned baseIndex = MIIdx;
647 unsigned start = baseIndex;
648 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
649 getInstructionFromIndex(baseIndex) == 0)
650 baseIndex += InstrSlots::NUM;
651 unsigned end = baseIndex;
653 while (mi != MBB->end()) {
654 if (mi->killsRegister(interval.reg, tri_)) {
656 end = getUseIndex(baseIndex) + 1;
658 } else if (mi->modifiesRegister(interval.reg, tri_)) {
659 // Another instruction redefines the register before it is ever read.
660 // Then the register is essentially dead at the instruction that defines
661 // it. Hence its interval is:
662 // [defSlot(def), defSlot(def)+1)
664 end = getDefIndex(start) + 1;
668 baseIndex += InstrSlots::NUM;
669 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
670 getInstructionFromIndex(baseIndex) == 0)
671 baseIndex += InstrSlots::NUM;
676 // Live-in register might not be used at all.
680 end = getDefIndex(MIIdx) + 1;
682 DOUT << " live through";
687 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
688 interval.addRange(LR);
689 interval.addKill(LR.valno, end);
690 DOUT << " +" << LR << '\n';
693 /// computeIntervals - computes the live intervals for virtual
694 /// registers. for some ordering of the machine instructions [1,N] a
695 /// live interval is an interval [i, j) where 1 <= i <= j < N for
696 /// which a variable is live
697 void LiveIntervals::computeIntervals() {
699 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
700 << "********** Function: "
701 << ((Value*)mf_->getFunction())->getName() << '\n';
703 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
705 MachineBasicBlock *MBB = MBBI;
706 // Track the index of the current machine instr.
707 unsigned MIIndex = getMBBStartIdx(MBB);
708 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
710 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
712 // Create intervals for live-ins to this BB first.
713 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
714 LE = MBB->livein_end(); LI != LE; ++LI) {
715 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
716 // Multiple live-ins can alias the same register.
717 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
718 if (!hasInterval(*AS))
719 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
723 // Skip over empty initial indices.
724 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
725 getInstructionFromIndex(MIIndex) == 0)
726 MIIndex += InstrSlots::NUM;
728 for (; MI != miEnd; ++MI) {
729 DOUT << MIIndex << "\t" << *MI;
732 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
733 MachineOperand &MO = MI->getOperand(i);
734 // handle register defs - build intervals
735 if (MO.isReg() && MO.getReg() && MO.isDef()) {
736 handleRegisterDef(MBB, MI, MIIndex, MO, i);
740 // Skip over the empty slots after each instruction.
741 unsigned Slots = MI->getDesc().getNumDefs();
744 MIIndex += InstrSlots::NUM * Slots;
746 // Skip over empty indices.
747 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
748 getInstructionFromIndex(MIIndex) == 0)
749 MIIndex += InstrSlots::NUM;
754 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
755 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
756 std::vector<IdxMBBPair>::const_iterator I =
757 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
760 while (I != Idx2MBBMap.end()) {
761 if (LR.end <= I->first)
763 MBBs.push_back(I->second);
770 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
771 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
773 return new LiveInterval(reg, Weight);
776 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
777 /// copy field and returns the source register that defines it.
778 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
782 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
783 return VNI->copy->getOperand(1).getReg();
784 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
785 return VNI->copy->getOperand(2).getReg();
786 unsigned SrcReg, DstReg;
787 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
789 assert(0 && "Unrecognized copy instruction!");
793 //===----------------------------------------------------------------------===//
794 // Register allocator hooks.
797 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
798 /// allow one) virtual register operand, then its uses are implicitly using
799 /// the register. Returns the virtual register.
800 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
801 MachineInstr *MI) const {
803 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
804 MachineOperand &MO = MI->getOperand(i);
805 if (!MO.isReg() || !MO.isUse())
807 unsigned Reg = MO.getReg();
808 if (Reg == 0 || Reg == li.reg)
810 // FIXME: For now, only remat MI with at most one register operand.
812 "Can't rematerialize instruction with multiple register operand!");
821 /// isValNoAvailableAt - Return true if the val# of the specified interval
822 /// which reaches the given instruction also reaches the specified use index.
823 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
824 unsigned UseIdx) const {
825 unsigned Index = getInstructionIndex(MI);
826 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
827 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
828 return UI != li.end() && UI->valno == ValNo;
831 /// isReMaterializable - Returns true if the definition MI of the specified
832 /// val# of the specified interval is re-materializable.
833 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
834 const VNInfo *ValNo, MachineInstr *MI,
835 SmallVectorImpl<LiveInterval*> &SpillIs,
840 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
844 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
845 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
846 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
847 // this but remember this is not safe to fold into a two-address
849 // This is a load from fixed stack slot. It can be rematerialized.
852 // If the target-specific rules don't identify an instruction as
853 // being trivially rematerializable, use some target-independent
855 if (!MI->getDesc().isRematerializable() ||
856 !tii_->isTriviallyReMaterializable(MI)) {
857 if (!EnableAggressiveRemat)
860 // If the instruction accesses memory but the memoperands have been lost,
861 // we can't analyze it.
862 const TargetInstrDesc &TID = MI->getDesc();
863 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
866 // Avoid instructions obviously unsafe for remat.
867 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
870 // If the instruction accesses memory and the memory could be non-constant,
871 // assume the instruction is not rematerializable.
872 for (std::list<MachineMemOperand>::const_iterator
873 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
874 const MachineMemOperand &MMO = *I;
875 if (MMO.isVolatile() || MMO.isStore())
877 const Value *V = MMO.getValue();
880 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
881 if (!PSV->isConstant(mf_->getFrameInfo()))
883 } else if (!aa_->pointsToConstantMemory(V))
887 // If any of the registers accessed are non-constant, conservatively assume
888 // the instruction is not rematerializable.
890 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
891 const MachineOperand &MO = MI->getOperand(i);
893 unsigned Reg = MO.getReg();
896 if (TargetRegisterInfo::isPhysicalRegister(Reg))
899 // Only allow one def, and that in the first operand.
900 if (MO.isDef() != (i == 0))
903 // Only allow constant-valued registers.
904 bool IsLiveIn = mri_->isLiveIn(Reg);
905 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
908 // For the def, it should be the only def.
909 if (MO.isDef() && (next(I) != E || IsLiveIn))
913 // Only allow one use other register use, as that's all the
914 // remat mechanisms support currently.
918 else if (Reg != ImpUse)
921 // For uses, there should be only one associate def.
922 if (I != E && (next(I) != E || IsLiveIn))
929 unsigned ImpUse = getReMatImplicitUse(li, MI);
931 const LiveInterval &ImpLi = getInterval(ImpUse);
932 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
933 re = mri_->use_end(); ri != re; ++ri) {
934 MachineInstr *UseMI = &*ri;
935 unsigned UseIdx = getInstructionIndex(UseMI);
936 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
938 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
942 // If a register operand of the re-materialized instruction is going to
943 // be spilled next, then it's not legal to re-materialize this instruction.
944 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
945 if (ImpUse == SpillIs[i]->reg)
951 /// isReMaterializable - Returns true if every definition of MI of every
952 /// val# of the specified interval is re-materializable.
953 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
954 SmallVectorImpl<LiveInterval*> &SpillIs,
957 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
959 const VNInfo *VNI = *i;
960 unsigned DefIdx = VNI->def;
962 continue; // Dead val#.
963 // Is the def for the val# rematerializable?
966 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
967 bool DefIsLoad = false;
969 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
976 /// FilterFoldedOps - Filter out two-address use operands. Return
977 /// true if it finds any issue with the operands that ought to prevent
979 static bool FilterFoldedOps(MachineInstr *MI,
980 SmallVector<unsigned, 2> &Ops,
982 SmallVector<unsigned, 2> &FoldOps) {
983 const TargetInstrDesc &TID = MI->getDesc();
986 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
987 unsigned OpIdx = Ops[i];
988 MachineOperand &MO = MI->getOperand(OpIdx);
989 // FIXME: fold subreg use.
993 MRInfo |= (unsigned)VirtRegMap::isMod;
995 // Filter out two-address use operand(s).
996 if (!MO.isImplicit() &&
997 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
998 MRInfo = VirtRegMap::isModRef;
1001 MRInfo |= (unsigned)VirtRegMap::isRef;
1003 FoldOps.push_back(OpIdx);
1009 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1010 /// slot / to reg or any rematerialized load into ith operand of specified
1011 /// MI. If it is successul, MI is updated with the newly created MI and
1013 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1014 VirtRegMap &vrm, MachineInstr *DefMI,
1016 SmallVector<unsigned, 2> &Ops,
1017 bool isSS, int Slot, unsigned Reg) {
1018 // If it is an implicit def instruction, just delete it.
1019 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1020 RemoveMachineInstrFromMaps(MI);
1021 vrm.RemoveMachineInstrFromMaps(MI);
1022 MI->eraseFromParent();
1027 // Filter the list of operand indexes that are to be folded. Abort if
1028 // any operand will prevent folding.
1029 unsigned MRInfo = 0;
1030 SmallVector<unsigned, 2> FoldOps;
1031 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1034 // The only time it's safe to fold into a two address instruction is when
1035 // it's folding reload and spill from / into a spill stack slot.
1036 if (DefMI && (MRInfo & VirtRegMap::isMod))
1039 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1040 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1042 // Remember this instruction uses the spill slot.
1043 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1045 // Attempt to fold the memory reference into the instruction. If
1046 // we can do this, we don't need to insert spill code.
1047 MachineBasicBlock &MBB = *MI->getParent();
1048 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1049 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1050 vrm.transferSpillPts(MI, fmi);
1051 vrm.transferRestorePts(MI, fmi);
1052 vrm.transferEmergencySpills(MI, fmi);
1054 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1055 mi2iMap_[fmi] = InstrIdx;
1056 MI = MBB.insert(MBB.erase(MI), fmi);
1063 /// canFoldMemoryOperand - Returns true if the specified load / store
1064 /// folding is possible.
1065 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1066 SmallVector<unsigned, 2> &Ops,
1068 // Filter the list of operand indexes that are to be folded. Abort if
1069 // any operand will prevent folding.
1070 unsigned MRInfo = 0;
1071 SmallVector<unsigned, 2> FoldOps;
1072 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1075 // It's only legal to remat for a use, not a def.
1076 if (ReMat && (MRInfo & VirtRegMap::isMod))
1079 return tii_->canFoldMemoryOperand(MI, FoldOps);
1082 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1083 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1084 for (LiveInterval::Ranges::const_iterator
1085 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1086 std::vector<IdxMBBPair>::const_iterator II =
1087 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1088 if (II == Idx2MBBMap.end())
1090 if (I->end > II->first) // crossing a MBB.
1092 MBBs.insert(II->second);
1093 if (MBBs.size() > 1)
1099 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1100 /// interval on to-be re-materialized operands of MI) with new register.
1101 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1102 MachineInstr *MI, unsigned NewVReg,
1104 // There is an implicit use. That means one of the other operand is
1105 // being remat'ed and the remat'ed instruction has li.reg as an
1106 // use operand. Make sure we rewrite that as well.
1107 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1108 MachineOperand &MO = MI->getOperand(i);
1111 unsigned Reg = MO.getReg();
1112 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1114 if (!vrm.isReMaterialized(Reg))
1116 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1117 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1119 UseMO->setReg(NewVReg);
1123 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1124 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1125 bool LiveIntervals::
1126 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1127 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1128 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1129 unsigned Slot, int LdSlot,
1130 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1132 const TargetRegisterClass* rc,
1133 SmallVector<int, 4> &ReMatIds,
1134 const MachineLoopInfo *loopInfo,
1135 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1136 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1137 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1138 MachineBasicBlock *MBB = MI->getParent();
1139 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1140 bool CanFold = false;
1142 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1143 MachineOperand& mop = MI->getOperand(i);
1146 unsigned Reg = mop.getReg();
1147 unsigned RegI = Reg;
1148 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1153 bool TryFold = !DefIsReMat;
1154 bool FoldSS = true; // Default behavior unless it's a remat.
1155 int FoldSlot = Slot;
1157 // If this is the rematerializable definition MI itself and
1158 // all of its uses are rematerialized, simply delete it.
1159 if (MI == ReMatOrigDefMI && CanDelete) {
1160 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1162 RemoveMachineInstrFromMaps(MI);
1163 vrm.RemoveMachineInstrFromMaps(MI);
1164 MI->eraseFromParent();
1168 // If def for this use can't be rematerialized, then try folding.
1169 // If def is rematerializable and it's a load, also try folding.
1170 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1172 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1178 // Scan all of the operands of this instruction rewriting operands
1179 // to use NewVReg instead of li.reg as appropriate. We do this for
1182 // 1. If the instr reads the same spilled vreg multiple times, we
1183 // want to reuse the NewVReg.
1184 // 2. If the instr is a two-addr instruction, we are required to
1185 // keep the src/dst regs pinned.
1187 // Keep track of whether we replace a use and/or def so that we can
1188 // create the spill interval with the appropriate range.
1190 HasUse = mop.isUse();
1191 HasDef = mop.isDef();
1192 SmallVector<unsigned, 2> Ops;
1194 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1195 const MachineOperand &MOj = MI->getOperand(j);
1198 unsigned RegJ = MOj.getReg();
1199 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1203 HasUse |= MOj.isUse();
1204 HasDef |= MOj.isDef();
1208 if (HasUse && !li.liveAt(getUseIndex(index)))
1209 // Must be defined by an implicit def. It should not be spilled. Note,
1210 // this is for correctness reason. e.g.
1211 // 8 %reg1024<def> = IMPLICIT_DEF
1212 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1213 // The live range [12, 14) are not part of the r1024 live interval since
1214 // it's defined by an implicit def. It will not conflicts with live
1215 // interval of r1025. Now suppose both registers are spilled, you can
1216 // easily see a situation where both registers are reloaded before
1217 // the INSERT_SUBREG and both target registers that would overlap.
1220 // Update stack slot spill weight if we are splitting.
1221 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1228 // Do not fold load / store here if we are splitting. We'll find an
1229 // optimal point to insert a load / store later.
1231 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1232 Ops, FoldSS, FoldSlot, Reg)) {
1233 // Folding the load/store can completely change the instruction in
1234 // unpredictable ways, rescan it from the beginning.
1238 if (isRemoved(MI)) {
1242 goto RestartInstruction;
1245 // We'll try to fold it later if it's profitable.
1246 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1250 // Create a new virtual register for the spill interval.
1251 bool CreatedNewVReg = false;
1253 NewVReg = mri_->createVirtualRegister(rc);
1255 CreatedNewVReg = true;
1257 mop.setReg(NewVReg);
1258 if (mop.isImplicit())
1259 rewriteImplicitOps(li, MI, NewVReg, vrm);
1261 // Reuse NewVReg for other reads.
1262 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1263 MachineOperand &mopj = MI->getOperand(Ops[j]);
1264 mopj.setReg(NewVReg);
1265 if (mopj.isImplicit())
1266 rewriteImplicitOps(li, MI, NewVReg, vrm);
1269 if (CreatedNewVReg) {
1271 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1272 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1273 // Each valnum may have its own remat id.
1274 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1276 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1278 if (!CanDelete || (HasUse && HasDef)) {
1279 // If this is a two-addr instruction then its use operands are
1280 // rematerializable but its def is not. It should be assigned a
1282 vrm.assignVirt2StackSlot(NewVReg, Slot);
1285 vrm.assignVirt2StackSlot(NewVReg, Slot);
1287 } else if (HasUse && HasDef &&
1288 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1289 // If this interval hasn't been assigned a stack slot (because earlier
1290 // def is a deleted remat def), do it now.
1291 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1292 vrm.assignVirt2StackSlot(NewVReg, Slot);
1295 // Re-matting an instruction with virtual register use. Add the
1296 // register as an implicit use on the use MI.
1297 if (DefIsReMat && ImpUse)
1298 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1300 // create a new register interval for this spill / remat.
1301 LiveInterval &nI = getOrCreateInterval(NewVReg);
1302 if (CreatedNewVReg) {
1303 NewLIs.push_back(&nI);
1304 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1306 vrm.setIsSplitFromReg(NewVReg, li.reg);
1310 if (CreatedNewVReg) {
1311 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1312 nI.getNextValue(~0U, 0, VNInfoAllocator));
1316 // Extend the split live interval to this def / use.
1317 unsigned End = getUseIndex(index)+1;
1318 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1319 nI.getValNumInfo(nI.getNumValNums()-1));
1325 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1326 nI.getNextValue(~0U, 0, VNInfoAllocator));
1331 DOUT << "\t\t\t\tAdded new interval: ";
1332 nI.print(DOUT, tri_);
1337 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1339 MachineBasicBlock *MBB, unsigned Idx) const {
1340 unsigned End = getMBBEndIdx(MBB);
1341 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1342 unsigned KillIdx = VNI->kills[j];
1343 if (KillIdx > Idx && KillIdx < End)
1349 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1350 /// during spilling.
1352 struct RewriteInfo {
1357 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1358 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1361 struct RewriteInfoCompare {
1362 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1363 return LHS.Index < RHS.Index;
1368 void LiveIntervals::
1369 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1370 LiveInterval::Ranges::const_iterator &I,
1371 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1372 unsigned Slot, int LdSlot,
1373 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1375 const TargetRegisterClass* rc,
1376 SmallVector<int, 4> &ReMatIds,
1377 const MachineLoopInfo *loopInfo,
1378 BitVector &SpillMBBs,
1379 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1380 BitVector &RestoreMBBs,
1381 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1382 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1383 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1384 bool AllCanFold = true;
1385 unsigned NewVReg = 0;
1386 unsigned start = getBaseIndex(I->start);
1387 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1389 // First collect all the def / use in this live range that will be rewritten.
1390 // Make sure they are sorted according to instruction index.
1391 std::vector<RewriteInfo> RewriteMIs;
1392 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1393 re = mri_->reg_end(); ri != re; ) {
1394 MachineInstr *MI = &*ri;
1395 MachineOperand &O = ri.getOperand();
1397 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1398 unsigned index = getInstructionIndex(MI);
1399 if (index < start || index >= end)
1401 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1402 // Must be defined by an implicit def. It should not be spilled. Note,
1403 // this is for correctness reason. e.g.
1404 // 8 %reg1024<def> = IMPLICIT_DEF
1405 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1406 // The live range [12, 14) are not part of the r1024 live interval since
1407 // it's defined by an implicit def. It will not conflicts with live
1408 // interval of r1025. Now suppose both registers are spilled, you can
1409 // easily see a situation where both registers are reloaded before
1410 // the INSERT_SUBREG and both target registers that would overlap.
1412 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1414 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1416 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1417 // Now rewrite the defs and uses.
1418 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1419 RewriteInfo &rwi = RewriteMIs[i];
1421 unsigned index = rwi.Index;
1422 bool MIHasUse = rwi.HasUse;
1423 bool MIHasDef = rwi.HasDef;
1424 MachineInstr *MI = rwi.MI;
1425 // If MI def and/or use the same register multiple times, then there
1426 // are multiple entries.
1427 unsigned NumUses = MIHasUse;
1428 while (i != e && RewriteMIs[i].MI == MI) {
1429 assert(RewriteMIs[i].Index == index);
1430 bool isUse = RewriteMIs[i].HasUse;
1431 if (isUse) ++NumUses;
1433 MIHasDef |= RewriteMIs[i].HasDef;
1436 MachineBasicBlock *MBB = MI->getParent();
1438 if (ImpUse && MI != ReMatDefMI) {
1439 // Re-matting an instruction with virtual register use. Update the
1440 // register interval's spill weight to HUGE_VALF to prevent it from
1442 LiveInterval &ImpLi = getInterval(ImpUse);
1443 ImpLi.weight = HUGE_VALF;
1446 unsigned MBBId = MBB->getNumber();
1447 unsigned ThisVReg = 0;
1449 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1450 if (NVI != MBBVRegsMap.end()) {
1451 ThisVReg = NVI->second;
1458 // It's better to start a new interval to avoid artifically
1459 // extend the new interval.
1460 if (MIHasDef && !MIHasUse) {
1461 MBBVRegsMap.erase(MBB->getNumber());
1467 bool IsNew = ThisVReg == 0;
1469 // This ends the previous live interval. If all of its def / use
1470 // can be folded, give it a low spill weight.
1471 if (NewVReg && TrySplit && AllCanFold) {
1472 LiveInterval &nI = getOrCreateInterval(NewVReg);
1479 bool HasDef = false;
1480 bool HasUse = false;
1481 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1482 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1483 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1484 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1485 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1486 if (!HasDef && !HasUse)
1489 AllCanFold &= CanFold;
1491 // Update weight of spill interval.
1492 LiveInterval &nI = getOrCreateInterval(NewVReg);
1494 // The spill weight is now infinity as it cannot be spilled again.
1495 nI.weight = HUGE_VALF;
1499 // Keep track of the last def and first use in each MBB.
1501 if (MI != ReMatOrigDefMI || !CanDelete) {
1502 bool HasKill = false;
1504 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1506 // If this is a two-address code, then this index starts a new VNInfo.
1507 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1509 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1511 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1512 SpillIdxes.find(MBBId);
1514 if (SII == SpillIdxes.end()) {
1515 std::vector<SRInfo> S;
1516 S.push_back(SRInfo(index, NewVReg, true));
1517 SpillIdxes.insert(std::make_pair(MBBId, S));
1518 } else if (SII->second.back().vreg != NewVReg) {
1519 SII->second.push_back(SRInfo(index, NewVReg, true));
1520 } else if ((int)index > SII->second.back().index) {
1521 // If there is an earlier def and this is a two-address
1522 // instruction, then it's not possible to fold the store (which
1523 // would also fold the load).
1524 SRInfo &Info = SII->second.back();
1526 Info.canFold = !HasUse;
1528 SpillMBBs.set(MBBId);
1529 } else if (SII != SpillIdxes.end() &&
1530 SII->second.back().vreg == NewVReg &&
1531 (int)index > SII->second.back().index) {
1532 // There is an earlier def that's not killed (must be two-address).
1533 // The spill is no longer needed.
1534 SII->second.pop_back();
1535 if (SII->second.empty()) {
1536 SpillIdxes.erase(MBBId);
1537 SpillMBBs.reset(MBBId);
1544 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1545 SpillIdxes.find(MBBId);
1546 if (SII != SpillIdxes.end() &&
1547 SII->second.back().vreg == NewVReg &&
1548 (int)index > SII->second.back().index)
1549 // Use(s) following the last def, it's not safe to fold the spill.
1550 SII->second.back().canFold = false;
1551 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1552 RestoreIdxes.find(MBBId);
1553 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1554 // If we are splitting live intervals, only fold if it's the first
1555 // use and there isn't another use later in the MBB.
1556 RII->second.back().canFold = false;
1558 // Only need a reload if there isn't an earlier def / use.
1559 if (RII == RestoreIdxes.end()) {
1560 std::vector<SRInfo> Infos;
1561 Infos.push_back(SRInfo(index, NewVReg, true));
1562 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1564 RII->second.push_back(SRInfo(index, NewVReg, true));
1566 RestoreMBBs.set(MBBId);
1570 // Update spill weight.
1571 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1572 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1575 if (NewVReg && TrySplit && AllCanFold) {
1576 // If all of its def / use can be folded, give it a low spill weight.
1577 LiveInterval &nI = getOrCreateInterval(NewVReg);
1582 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1583 BitVector &RestoreMBBs,
1584 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1585 if (!RestoreMBBs[Id])
1587 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1588 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1589 if (Restores[i].index == index &&
1590 Restores[i].vreg == vr &&
1591 Restores[i].canFold)
1596 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1597 BitVector &RestoreMBBs,
1598 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1599 if (!RestoreMBBs[Id])
1601 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1602 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1603 if (Restores[i].index == index && Restores[i].vreg)
1604 Restores[i].index = -1;
1607 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1608 /// spilled and create empty intervals for their uses.
1610 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1611 const TargetRegisterClass* rc,
1612 std::vector<LiveInterval*> &NewLIs) {
1613 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1614 re = mri_->reg_end(); ri != re; ) {
1615 MachineOperand &O = ri.getOperand();
1616 MachineInstr *MI = &*ri;
1619 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1620 "Register def was not rewritten?");
1621 RemoveMachineInstrFromMaps(MI);
1622 vrm.RemoveMachineInstrFromMaps(MI);
1623 MI->eraseFromParent();
1625 // This must be an use of an implicit_def so it's not part of the live
1626 // interval. Create a new empty live interval for it.
1627 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1628 unsigned NewVReg = mri_->createVirtualRegister(rc);
1630 vrm.setIsImplicitlyDefined(NewVReg);
1631 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1632 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1633 MachineOperand &MO = MI->getOperand(i);
1634 if (MO.isReg() && MO.getReg() == li.reg)
1643 bool operator()(LiveInterval* A, LiveInterval* B) {
1644 return A->beginNumber() < B->beginNumber();
1649 std::vector<LiveInterval*> LiveIntervals::
1650 addIntervalsForSpillsFast(const LiveInterval &li,
1651 const MachineLoopInfo *loopInfo,
1652 VirtRegMap &vrm, float& SSWeight) {
1653 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1655 std::vector<LiveInterval*> added;
1657 assert(li.weight != HUGE_VALF &&
1658 "attempt to spill already spilled interval!");
1660 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1664 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1668 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1669 while (RI != mri_->reg_end()) {
1670 MachineInstr* MI = &*RI;
1672 SmallVector<unsigned, 2> Indices;
1673 bool HasUse = false;
1674 bool HasDef = false;
1676 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1677 MachineOperand& mop = MI->getOperand(i);
1678 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1680 HasUse |= MI->getOperand(i).isUse();
1681 HasDef |= MI->getOperand(i).isDef();
1683 Indices.push_back(i);
1686 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1687 Indices, true, slot, li.reg)) {
1688 unsigned NewVReg = mri_->createVirtualRegister(rc);
1690 vrm.assignVirt2StackSlot(NewVReg, slot);
1692 // create a new register for this spill
1693 LiveInterval &nI = getOrCreateInterval(NewVReg);
1695 // the spill weight is now infinity as it
1696 // cannot be spilled again
1697 nI.weight = HUGE_VALF;
1699 // Rewrite register operands to use the new vreg.
1700 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1701 E = Indices.end(); I != E; ++I) {
1702 MI->getOperand(*I).setReg(NewVReg);
1704 if (MI->getOperand(*I).isUse())
1705 MI->getOperand(*I).setIsKill(true);
1708 // Fill in the new live interval.
1709 unsigned index = getInstructionIndex(MI);
1711 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1712 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1715 vrm.addRestorePoint(NewVReg, MI);
1718 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1719 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1722 vrm.addSpillPoint(NewVReg, true, MI);
1725 added.push_back(&nI);
1727 DOUT << "\t\t\t\tadded new interval: ";
1731 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1734 SSWeight += getSpillWeight(true, true, loopDepth);
1736 SSWeight += getSpillWeight(false, true, loopDepth);
1738 SSWeight += getSpillWeight(true, false, loopDepth);
1742 RI = mri_->reg_begin(li.reg);
1745 // Clients expect the new intervals to be returned in sorted order.
1746 std::sort(added.begin(), added.end(), LISorter());
1751 std::vector<LiveInterval*> LiveIntervals::
1752 addIntervalsForSpills(const LiveInterval &li,
1753 SmallVectorImpl<LiveInterval*> &SpillIs,
1754 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1757 if (EnableFastSpilling)
1758 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1760 assert(li.weight != HUGE_VALF &&
1761 "attempt to spill already spilled interval!");
1763 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1764 li.print(DOUT, tri_);
1767 // Spill slot weight.
1770 // Each bit specify whether it a spill is required in the MBB.
1771 BitVector SpillMBBs(mf_->getNumBlockIDs());
1772 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1773 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1774 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1775 DenseMap<unsigned,unsigned> MBBVRegsMap;
1776 std::vector<LiveInterval*> NewLIs;
1777 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1779 unsigned NumValNums = li.getNumValNums();
1780 SmallVector<MachineInstr*, 4> ReMatDefs;
1781 ReMatDefs.resize(NumValNums, NULL);
1782 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1783 ReMatOrigDefs.resize(NumValNums, NULL);
1784 SmallVector<int, 4> ReMatIds;
1785 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1786 BitVector ReMatDelete(NumValNums);
1787 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1789 // Spilling a split live interval. It cannot be split any further. Also,
1790 // it's also guaranteed to be a single val# / range interval.
1791 if (vrm.getPreSplitReg(li.reg)) {
1792 vrm.setIsSplitFromReg(li.reg, 0);
1793 // Unset the split kill marker on the last use.
1794 unsigned KillIdx = vrm.getKillPoint(li.reg);
1796 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1797 assert(KillMI && "Last use disappeared?");
1798 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1799 assert(KillOp != -1 && "Last use disappeared?");
1800 KillMI->getOperand(KillOp).setIsKill(false);
1802 vrm.removeKillPoint(li.reg);
1803 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1804 Slot = vrm.getStackSlot(li.reg);
1805 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1806 MachineInstr *ReMatDefMI = DefIsReMat ?
1807 vrm.getReMaterializedMI(li.reg) : NULL;
1809 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1810 bool isLoad = isLoadSS ||
1811 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1812 bool IsFirstRange = true;
1813 for (LiveInterval::Ranges::const_iterator
1814 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1815 // If this is a split live interval with multiple ranges, it means there
1816 // are two-address instructions that re-defined the value. Only the
1817 // first def can be rematerialized!
1819 // Note ReMatOrigDefMI has already been deleted.
1820 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1821 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1822 false, vrm, rc, ReMatIds, loopInfo,
1823 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1824 MBBVRegsMap, NewLIs, SSWeight);
1826 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1827 Slot, 0, false, false, false,
1828 false, vrm, rc, ReMatIds, loopInfo,
1829 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1830 MBBVRegsMap, NewLIs, SSWeight);
1832 IsFirstRange = false;
1835 SSWeight = 0.0f; // Already accounted for when split.
1836 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1840 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1841 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1845 bool NeedStackSlot = false;
1846 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1848 const VNInfo *VNI = *i;
1849 unsigned VN = VNI->id;
1850 unsigned DefIdx = VNI->def;
1852 continue; // Dead val#.
1853 // Is the def for the val# rematerializable?
1854 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1855 ? 0 : getInstructionFromIndex(DefIdx);
1857 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1858 // Remember how to remat the def of this val#.
1859 ReMatOrigDefs[VN] = ReMatDefMI;
1860 // Original def may be modified so we have to make a copy here.
1861 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1862 ClonedMIs.push_back(Clone);
1863 ReMatDefs[VN] = Clone;
1865 bool CanDelete = true;
1866 if (VNI->hasPHIKill) {
1867 // A kill is a phi node, not all of its uses can be rematerialized.
1868 // It must not be deleted.
1870 // Need a stack slot if there is any live range where uses cannot be
1872 NeedStackSlot = true;
1875 ReMatDelete.set(VN);
1877 // Need a stack slot if there is any live range where uses cannot be
1879 NeedStackSlot = true;
1883 // One stack slot per live interval.
1884 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1885 Slot = vrm.assignVirt2StackSlot(li.reg);
1887 // Create new intervals and rewrite defs and uses.
1888 for (LiveInterval::Ranges::const_iterator
1889 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1890 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1891 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1892 bool DefIsReMat = ReMatDefMI != NULL;
1893 bool CanDelete = ReMatDelete[I->valno->id];
1895 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1896 bool isLoad = isLoadSS ||
1897 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1898 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1899 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1900 CanDelete, vrm, rc, ReMatIds, loopInfo,
1901 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1902 MBBVRegsMap, NewLIs, SSWeight);
1905 // Insert spills / restores if we are splitting.
1907 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1911 SmallPtrSet<LiveInterval*, 4> AddedKill;
1912 SmallVector<unsigned, 2> Ops;
1913 if (NeedStackSlot) {
1914 int Id = SpillMBBs.find_first();
1916 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1917 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1918 std::vector<SRInfo> &spills = SpillIdxes[Id];
1919 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1920 int index = spills[i].index;
1921 unsigned VReg = spills[i].vreg;
1922 LiveInterval &nI = getOrCreateInterval(VReg);
1923 bool isReMat = vrm.isReMaterialized(VReg);
1924 MachineInstr *MI = getInstructionFromIndex(index);
1925 bool CanFold = false;
1926 bool FoundUse = false;
1928 if (spills[i].canFold) {
1930 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1931 MachineOperand &MO = MI->getOperand(j);
1932 if (!MO.isReg() || MO.getReg() != VReg)
1939 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1940 RestoreMBBs, RestoreIdxes))) {
1941 // MI has two-address uses of the same register. If the use
1942 // isn't the first and only use in the BB, then we can't fold
1943 // it. FIXME: Move this to rewriteInstructionsForSpills.
1950 // Fold the store into the def if possible.
1951 bool Folded = false;
1952 if (CanFold && !Ops.empty()) {
1953 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1956 // Also folded uses, do not issue a load.
1957 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1958 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1960 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1964 // Otherwise tell the spiller to issue a spill.
1966 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1967 bool isKill = LR->end == getStoreIndex(index);
1968 if (!MI->registerDefIsDead(nI.reg))
1969 // No need to spill a dead def.
1970 vrm.addSpillPoint(VReg, isKill, MI);
1972 AddedKill.insert(&nI);
1975 // Update spill slot weight.
1977 SSWeight += getSpillWeight(true, false, loopDepth);
1979 Id = SpillMBBs.find_next(Id);
1983 int Id = RestoreMBBs.find_first();
1985 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1986 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1988 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1989 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1990 int index = restores[i].index;
1993 unsigned VReg = restores[i].vreg;
1994 LiveInterval &nI = getOrCreateInterval(VReg);
1995 bool isReMat = vrm.isReMaterialized(VReg);
1996 MachineInstr *MI = getInstructionFromIndex(index);
1997 bool CanFold = false;
1999 if (restores[i].canFold) {
2001 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2002 MachineOperand &MO = MI->getOperand(j);
2003 if (!MO.isReg() || MO.getReg() != VReg)
2007 // If this restore were to be folded, it would have been folded
2016 // Fold the load into the use if possible.
2017 bool Folded = false;
2018 if (CanFold && !Ops.empty()) {
2020 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2022 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2024 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2025 // If the rematerializable def is a load, also try to fold it.
2026 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2027 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2028 Ops, isLoadSS, LdSlot, VReg);
2029 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2031 // Re-matting an instruction with virtual register use. Add the
2032 // register as an implicit use on the use MI and update the register
2033 // interval's spill weight to HUGE_VALF to prevent it from being
2035 LiveInterval &ImpLi = getInterval(ImpUse);
2036 ImpLi.weight = HUGE_VALF;
2037 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2041 // If folding is not possible / failed, then tell the spiller to issue a
2042 // load / rematerialization for us.
2044 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2046 vrm.addRestorePoint(VReg, MI);
2048 // Update spill slot weight.
2050 SSWeight += getSpillWeight(false, true, loopDepth);
2052 Id = RestoreMBBs.find_next(Id);
2055 // Finalize intervals: add kills, finalize spill weights, and filter out
2057 std::vector<LiveInterval*> RetNewLIs;
2058 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2059 LiveInterval *LI = NewLIs[i];
2061 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2062 if (!AddedKill.count(LI)) {
2063 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2064 unsigned LastUseIdx = getBaseIndex(LR->end);
2065 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2066 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2067 assert(UseIdx != -1);
2068 if (LastUse->getOperand(UseIdx).isImplicit() ||
2069 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2070 LastUse->getOperand(UseIdx).setIsKill();
2071 vrm.addKillPoint(LI->reg, LastUseIdx);
2074 RetNewLIs.push_back(LI);
2078 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2082 /// hasAllocatableSuperReg - Return true if the specified physical register has
2083 /// any super register that's allocatable.
2084 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2085 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2086 if (allocatableRegs_[*AS] && hasInterval(*AS))
2091 /// getRepresentativeReg - Find the largest super register of the specified
2092 /// physical register.
2093 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2094 // Find the largest super-register that is allocatable.
2095 unsigned BestReg = Reg;
2096 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2097 unsigned SuperReg = *AS;
2098 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2106 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2107 /// specified interval that conflicts with the specified physical register.
2108 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2109 unsigned PhysReg) const {
2110 unsigned NumConflicts = 0;
2111 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2112 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2113 E = mri_->reg_end(); I != E; ++I) {
2114 MachineOperand &O = I.getOperand();
2115 MachineInstr *MI = O.getParent();
2116 unsigned Index = getInstructionIndex(MI);
2117 if (pli.liveAt(Index))
2120 return NumConflicts;
2123 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2124 /// around all defs and uses of the specified interval.
2125 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2126 unsigned PhysReg, VirtRegMap &vrm) {
2127 unsigned SpillReg = getRepresentativeReg(PhysReg);
2129 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2130 // If there are registers which alias PhysReg, but which are not a
2131 // sub-register of the chosen representative super register. Assert
2132 // since we can't handle it yet.
2133 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2134 tri_->isSuperRegister(*AS, SpillReg));
2136 LiveInterval &pli = getInterval(SpillReg);
2137 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2138 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2139 E = mri_->reg_end(); I != E; ++I) {
2140 MachineOperand &O = I.getOperand();
2141 MachineInstr *MI = O.getParent();
2142 if (SeenMIs.count(MI))
2145 unsigned Index = getInstructionIndex(MI);
2146 if (pli.liveAt(Index)) {
2147 vrm.addEmergencySpill(SpillReg, MI);
2148 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2149 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2150 if (!hasInterval(*AS))
2152 LiveInterval &spli = getInterval(*AS);
2153 if (spli.liveAt(Index))
2154 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2160 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2161 MachineInstr* startInst) {
2162 LiveInterval& Interval = getOrCreateInterval(reg);
2163 VNInfo* VN = Interval.getNextValue(
2164 getInstructionIndex(startInst) + InstrSlots::DEF,
2165 startInst, getVNInfoAllocator());
2166 VN->hasPHIKill = true;
2167 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2168 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2169 getMBBEndIdx(startInst->getParent()) + 1, VN);
2170 Interval.addRange(LR);