1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
128 i2miMap_.push_back(I);
129 MIIndex += InstrSlots::NUM;
132 // Insert max(1, numdefs) empty slots after every instruction.
133 unsigned Slots = I->getDesc().getNumDefs();
136 MIIndex += InstrSlots::NUM * Slots;
138 i2miMap_.push_back(0);
141 // Set the MBB2IdxMap entry for this MBB.
142 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
143 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
145 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
147 if (!OldI2MI.empty())
148 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
149 for (LiveInterval::iterator LI = OI->second->begin(),
150 LE = OI->second->end(); LI != LE; ++LI) {
152 // Remap the start index of the live range to the corresponding new
153 // number, or our best guess at what it _should_ correspond to if the
154 // original instruction has been erased. This is either the following
155 // instruction or its predecessor.
156 unsigned index = LI->start / InstrSlots::NUM;
157 unsigned offset = LI->start % InstrSlots::NUM;
158 if (offset == InstrSlots::LOAD) {
159 std::vector<IdxMBBPair>::const_iterator I =
160 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
161 // Take the pair containing the index
162 std::vector<IdxMBBPair>::const_iterator J =
163 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
165 LI->start = getMBBStartIdx(J->second);
167 LI->start = mi2iMap_[OldI2MI[index]] + offset;
170 // Remap the ending index in the same way that we remapped the start,
171 // except for the final step where we always map to the immediately
172 // following instruction.
173 index = (LI->end - 1) / InstrSlots::NUM;
174 offset = LI->end % InstrSlots::NUM;
175 if (offset == InstrSlots::LOAD) {
176 // VReg dies at end of block.
177 std::vector<IdxMBBPair>::const_iterator I =
178 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
181 LI->end = getMBBEndIdx(I->second) + 1;
183 unsigned idx = index;
184 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
186 if (index != OldI2MI.size())
187 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
189 LI->end = InstrSlots::NUM * i2miMap_.size();
193 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
194 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
197 // Remap the VNInfo def index, which works the same as the
198 // start indices above. VN's with special sentinel defs
199 // don't need to be remapped.
200 if (vni->def != ~0U && vni->def != ~1U) {
201 unsigned index = vni->def / InstrSlots::NUM;
202 unsigned offset = vni->def % InstrSlots::NUM;
203 if (offset == InstrSlots::LOAD) {
204 std::vector<IdxMBBPair>::const_iterator I =
205 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
206 // Take the pair containing the index
207 std::vector<IdxMBBPair>::const_iterator J =
208 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
210 vni->def = getMBBStartIdx(J->second);
212 vni->def = mi2iMap_[OldI2MI[index]] + offset;
216 // Remap the VNInfo kill indices, which works the same as
217 // the end indices above.
218 for (size_t i = 0; i < vni->kills.size(); ++i) {
219 // PHI kills don't need to be remapped.
220 if (!vni->kills[i]) continue;
222 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
223 unsigned offset = vni->kills[i] % InstrSlots::NUM;
224 if (offset == InstrSlots::LOAD) {
225 std::vector<IdxMBBPair>::const_iterator I =
226 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
229 vni->kills[i] = getMBBEndIdx(I->second);
231 unsigned idx = index;
232 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
234 if (index != OldI2MI.size())
235 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
236 (idx == index ? offset : 0);
238 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
245 /// runOnMachineFunction - Register allocate the whole function
247 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
249 mri_ = &mf_->getRegInfo();
250 tm_ = &fn.getTarget();
251 tri_ = tm_->getRegisterInfo();
252 tii_ = tm_->getInstrInfo();
253 aa_ = &getAnalysis<AliasAnalysis>();
254 lv_ = &getAnalysis<LiveVariables>();
255 allocatableRegs_ = tri_->getAllocatableSet(fn);
260 numIntervals += getNumIntervals();
266 /// print - Implement the dump method.
267 void LiveIntervals::print(std::ostream &O, const Module* ) const {
268 O << "********** INTERVALS **********\n";
269 for (const_iterator I = begin(), E = end(); I != E; ++I) {
270 I->second->print(O, tri_);
274 O << "********** MACHINEINSTRS **********\n";
275 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
276 mbbi != mbbe; ++mbbi) {
277 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
278 for (MachineBasicBlock::iterator mii = mbbi->begin(),
279 mie = mbbi->end(); mii != mie; ++mii) {
280 O << getInstructionIndex(mii) << '\t' << *mii;
285 /// conflictsWithPhysRegDef - Returns true if the specified register
286 /// is defined during the duration of the specified interval.
287 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
288 VirtRegMap &vrm, unsigned reg) {
289 for (LiveInterval::Ranges::const_iterator
290 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
291 for (unsigned index = getBaseIndex(I->start),
292 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
293 index += InstrSlots::NUM) {
294 // skip deleted instructions
295 while (index != end && !getInstructionFromIndex(index))
296 index += InstrSlots::NUM;
297 if (index == end) break;
299 MachineInstr *MI = getInstructionFromIndex(index);
300 unsigned SrcReg, DstReg;
301 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
302 if (SrcReg == li.reg || DstReg == li.reg)
304 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
305 MachineOperand& mop = MI->getOperand(i);
308 unsigned PhysReg = mop.getReg();
309 if (PhysReg == 0 || PhysReg == li.reg)
311 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
312 if (!vrm.hasPhys(PhysReg))
314 PhysReg = vrm.getPhys(PhysReg);
316 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
325 void LiveIntervals::printRegName(unsigned reg) const {
326 if (TargetRegisterInfo::isPhysicalRegister(reg))
327 cerr << tri_->getName(reg);
329 cerr << "%reg" << reg;
332 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
333 MachineBasicBlock::iterator mi,
334 unsigned MIIdx, MachineOperand& MO,
336 LiveInterval &interval) {
337 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
338 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
340 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
341 DOUT << "is a implicit_def\n";
345 // Virtual registers may be defined multiple times (due to phi
346 // elimination and 2-addr elimination). Much of what we do only has to be
347 // done once for the vreg. We use an empty interval to detect the first
348 // time we see a vreg.
349 if (interval.empty()) {
350 // Get the Idx of the defining instructions.
351 unsigned defIndex = getDefIndex(MIIdx);
352 // Earlyclobbers move back one.
353 if (MO.isEarlyClobber())
354 defIndex = getUseIndex(MIIdx);
356 MachineInstr *CopyMI = NULL;
357 unsigned SrcReg, DstReg;
358 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
359 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
360 tii_->isMoveInstr(*mi, SrcReg, DstReg))
362 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
364 assert(ValNo->id == 0 && "First value in interval is not 0?");
366 // Loop over all of the blocks that the vreg is defined in. There are
367 // two cases we have to handle here. The most common case is a vreg
368 // whose lifetime is contained within a basic block. In this case there
369 // will be a single kill, in MBB, which comes after the definition.
370 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
371 // FIXME: what about dead vars?
373 if (vi.Kills[0] != mi)
374 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
376 killIdx = defIndex+1;
378 // If the kill happens after the definition, we have an intra-block
380 if (killIdx > defIndex) {
381 assert(vi.AliveBlocks.none() &&
382 "Shouldn't be alive across any blocks!");
383 LiveRange LR(defIndex, killIdx, ValNo);
384 interval.addRange(LR);
385 DOUT << " +" << LR << "\n";
386 interval.addKill(ValNo, killIdx);
391 // The other case we handle is when a virtual register lives to the end
392 // of the defining block, potentially live across some blocks, then is
393 // live into some number of blocks, but gets killed. Start by adding a
394 // range that goes from this definition to the end of the defining block.
395 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
396 DOUT << " +" << NewLR;
397 interval.addRange(NewLR);
399 // Iterate over all of the blocks that the variable is completely
400 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
402 for (int i = vi.AliveBlocks.find_first(); i != -1;
403 i = vi.AliveBlocks.find_next(i)) {
404 LiveRange LR(getMBBStartIdx(i),
405 getMBBEndIdx(i)+1, // MBB ends at -1.
407 interval.addRange(LR);
411 // Finally, this virtual register is live from the start of any killing
412 // block to the 'use' slot of the killing instruction.
413 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
414 MachineInstr *Kill = vi.Kills[i];
415 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
416 LiveRange LR(getMBBStartIdx(Kill->getParent()),
418 interval.addRange(LR);
419 interval.addKill(ValNo, killIdx);
424 // If this is the second time we see a virtual register definition, it
425 // must be due to phi elimination or two addr elimination. If this is
426 // the result of two address elimination, then the vreg is one of the
427 // def-and-use register operand.
428 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
429 // If this is a two-address definition, then we have already processed
430 // the live range. The only problem is that we didn't realize there
431 // are actually two values in the live interval. Because of this we
432 // need to take the LiveRegion that defines this register and split it
434 assert(interval.containsOneValue());
435 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
436 unsigned RedefIndex = getDefIndex(MIIdx);
437 // Earlyclobbers move back one.
438 if (MO.isEarlyClobber())
439 RedefIndex = getUseIndex(MIIdx);
441 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
442 VNInfo *OldValNo = OldLR->valno;
444 // Delete the initial value, which should be short and continuous,
445 // because the 2-addr copy must be in the same MBB as the redef.
446 interval.removeRange(DefIndex, RedefIndex);
448 // Two-address vregs should always only be redefined once. This means
449 // that at this point, there should be exactly one value number in it.
450 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
452 // The new value number (#1) is defined by the instruction we claimed
454 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
457 // Value#0 is now defined by the 2-addr instruction.
458 OldValNo->def = RedefIndex;
461 // Add the new live interval which replaces the range for the input copy.
462 LiveRange LR(DefIndex, RedefIndex, ValNo);
463 DOUT << " replace range with " << LR;
464 interval.addRange(LR);
465 interval.addKill(ValNo, RedefIndex);
467 // If this redefinition is dead, we need to add a dummy unit live
468 // range covering the def slot.
470 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
473 interval.print(DOUT, tri_);
476 // Otherwise, this must be because of phi elimination. If this is the
477 // first redefinition of the vreg that we have seen, go back and change
478 // the live range in the PHI block to be a different value number.
479 if (interval.containsOneValue()) {
480 assert(vi.Kills.size() == 1 &&
481 "PHI elimination vreg should have one kill, the PHI itself!");
483 // Remove the old range that we now know has an incorrect number.
484 VNInfo *VNI = interval.getValNumInfo(0);
485 MachineInstr *Killer = vi.Kills[0];
486 unsigned Start = getMBBStartIdx(Killer->getParent());
487 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
488 DOUT << " Removing [" << Start << "," << End << "] from: ";
489 interval.print(DOUT, tri_); DOUT << "\n";
490 interval.removeRange(Start, End);
491 VNI->hasPHIKill = true;
492 DOUT << " RESULT: "; interval.print(DOUT, tri_);
494 // Replace the interval with one of a NEW value number. Note that this
495 // value number isn't actually defined by an instruction, weird huh? :)
496 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
497 DOUT << " replace range with " << LR;
498 interval.addRange(LR);
499 interval.addKill(LR.valno, End);
500 DOUT << " RESULT: "; interval.print(DOUT, tri_);
503 // In the case of PHI elimination, each variable definition is only
504 // live until the end of the block. We've already taken care of the
505 // rest of the live range.
506 unsigned defIndex = getDefIndex(MIIdx);
507 // Earlyclobbers move back one.
508 if (MO.isEarlyClobber())
509 defIndex = getUseIndex(MIIdx);
512 MachineInstr *CopyMI = NULL;
513 unsigned SrcReg, DstReg;
514 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
515 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
516 tii_->isMoveInstr(*mi, SrcReg, DstReg))
518 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
520 unsigned killIndex = getMBBEndIdx(mbb) + 1;
521 LiveRange LR(defIndex, killIndex, ValNo);
522 interval.addRange(LR);
523 interval.addKill(ValNo, killIndex);
524 ValNo->hasPHIKill = true;
532 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
533 MachineBasicBlock::iterator mi,
536 LiveInterval &interval,
537 MachineInstr *CopyMI) {
538 // A physical register cannot be live across basic block, so its
539 // lifetime must end somewhere in its defining basic block.
540 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
542 unsigned baseIndex = MIIdx;
543 unsigned start = getDefIndex(baseIndex);
544 // Earlyclobbers move back one.
545 if (MO.isEarlyClobber())
546 start = getUseIndex(MIIdx);
547 unsigned end = start;
549 // If it is not used after definition, it is considered dead at
550 // the instruction defining it. Hence its interval is:
551 // [defSlot(def), defSlot(def)+1)
558 // If it is not dead on definition, it must be killed by a
559 // subsequent instruction. Hence its interval is:
560 // [defSlot(def), useSlot(kill)+1)
561 baseIndex += InstrSlots::NUM;
562 while (++mi != MBB->end()) {
563 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
564 getInstructionFromIndex(baseIndex) == 0)
565 baseIndex += InstrSlots::NUM;
566 if (mi->killsRegister(interval.reg, tri_)) {
568 end = getUseIndex(baseIndex) + 1;
570 } else if (mi->modifiesRegister(interval.reg, tri_)) {
571 // Another instruction redefines the register before it is ever read.
572 // Then the register is essentially dead at the instruction that defines
573 // it. Hence its interval is:
574 // [defSlot(def), defSlot(def)+1)
580 baseIndex += InstrSlots::NUM;
583 // The only case we should have a dead physreg here without a killing or
584 // instruction where we know it's dead is if it is live-in to the function
586 assert(!CopyMI && "physreg was not killed in defining block!");
590 assert(start < end && "did not find end of interval?");
592 // Already exists? Extend old live interval.
593 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
594 VNInfo *ValNo = (OldLR != interval.end())
595 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
596 LiveRange LR(start, end, ValNo);
597 interval.addRange(LR);
598 interval.addKill(LR.valno, end);
599 DOUT << " +" << LR << '\n';
602 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
603 MachineBasicBlock::iterator MI,
607 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
608 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
609 getOrCreateInterval(MO.getReg()));
610 else if (allocatableRegs_[MO.getReg()]) {
611 MachineInstr *CopyMI = NULL;
612 unsigned SrcReg, DstReg;
613 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
614 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
615 tii_->isMoveInstr(*MI, SrcReg, DstReg))
617 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618 getOrCreateInterval(MO.getReg()), CopyMI);
619 // Def of a register also defines its sub-registers.
620 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
621 // If MI also modifies the sub-register explicitly, avoid processing it
622 // more than once. Do not pass in TRI here so it checks for exact match.
623 if (!MI->modifiesRegister(*AS))
624 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625 getOrCreateInterval(*AS), 0);
629 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
631 LiveInterval &interval, bool isAlias) {
632 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
634 // Look for kills, if it reaches a def before it's killed, then it shouldn't
635 // be considered a livein.
636 MachineBasicBlock::iterator mi = MBB->begin();
637 unsigned baseIndex = MIIdx;
638 unsigned start = baseIndex;
639 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
640 getInstructionFromIndex(baseIndex) == 0)
641 baseIndex += InstrSlots::NUM;
642 unsigned end = baseIndex;
644 while (mi != MBB->end()) {
645 if (mi->killsRegister(interval.reg, tri_)) {
647 end = getUseIndex(baseIndex) + 1;
649 } else if (mi->modifiesRegister(interval.reg, tri_)) {
650 // Another instruction redefines the register before it is ever read.
651 // Then the register is essentially dead at the instruction that defines
652 // it. Hence its interval is:
653 // [defSlot(def), defSlot(def)+1)
655 end = getDefIndex(start) + 1;
659 baseIndex += InstrSlots::NUM;
660 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
661 getInstructionFromIndex(baseIndex) == 0)
662 baseIndex += InstrSlots::NUM;
667 // Live-in register might not be used at all.
671 end = getDefIndex(MIIdx) + 1;
673 DOUT << " live through";
678 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
679 interval.addRange(LR);
680 interval.addKill(LR.valno, end);
681 DOUT << " +" << LR << '\n';
684 /// computeIntervals - computes the live intervals for virtual
685 /// registers. for some ordering of the machine instructions [1,N] a
686 /// live interval is an interval [i, j) where 1 <= i <= j < N for
687 /// which a variable is live
688 void LiveIntervals::computeIntervals() {
690 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
691 << "********** Function: "
692 << ((Value*)mf_->getFunction())->getName() << '\n';
694 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
696 MachineBasicBlock *MBB = MBBI;
697 // Track the index of the current machine instr.
698 unsigned MIIndex = getMBBStartIdx(MBB);
699 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
701 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
703 // Create intervals for live-ins to this BB first.
704 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
705 LE = MBB->livein_end(); LI != LE; ++LI) {
706 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
707 // Multiple live-ins can alias the same register.
708 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
709 if (!hasInterval(*AS))
710 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
714 // Skip over empty initial indices.
715 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
716 getInstructionFromIndex(MIIndex) == 0)
717 MIIndex += InstrSlots::NUM;
719 for (; MI != miEnd; ++MI) {
720 DOUT << MIIndex << "\t" << *MI;
723 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
724 MachineOperand &MO = MI->getOperand(i);
725 // handle register defs - build intervals
726 if (MO.isReg() && MO.getReg() && MO.isDef()) {
727 handleRegisterDef(MBB, MI, MIIndex, MO, i);
731 // Skip over the empty slots after each instruction.
732 unsigned Slots = MI->getDesc().getNumDefs();
735 MIIndex += InstrSlots::NUM * Slots;
737 // Skip over empty indices.
738 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
739 getInstructionFromIndex(MIIndex) == 0)
740 MIIndex += InstrSlots::NUM;
745 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
746 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
747 std::vector<IdxMBBPair>::const_iterator I =
748 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
751 while (I != Idx2MBBMap.end()) {
754 MBBs.push_back(I->second);
761 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
762 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
763 std::vector<IdxMBBPair>::const_iterator I =
764 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
767 while (I != Idx2MBBMap.end()) {
770 MachineBasicBlock *MBB = I->second;
771 if (getMBBEndIdx(MBB) > End)
773 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
774 SE = MBB->succ_end(); SI != SE; ++SI)
782 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
783 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
785 return new LiveInterval(reg, Weight);
788 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
789 /// copy field and returns the source register that defines it.
790 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
794 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
795 return VNI->copy->getOperand(1).getReg();
796 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
797 return VNI->copy->getOperand(2).getReg();
798 unsigned SrcReg, DstReg;
799 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
801 assert(0 && "Unrecognized copy instruction!");
805 //===----------------------------------------------------------------------===//
806 // Register allocator hooks.
809 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
810 /// allow one) virtual register operand, then its uses are implicitly using
811 /// the register. Returns the virtual register.
812 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
813 MachineInstr *MI) const {
815 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
816 MachineOperand &MO = MI->getOperand(i);
817 if (!MO.isReg() || !MO.isUse())
819 unsigned Reg = MO.getReg();
820 if (Reg == 0 || Reg == li.reg)
822 // FIXME: For now, only remat MI with at most one register operand.
824 "Can't rematerialize instruction with multiple register operand!");
833 /// isValNoAvailableAt - Return true if the val# of the specified interval
834 /// which reaches the given instruction also reaches the specified use index.
835 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
836 unsigned UseIdx) const {
837 unsigned Index = getInstructionIndex(MI);
838 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
839 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
840 return UI != li.end() && UI->valno == ValNo;
843 /// isReMaterializable - Returns true if the definition MI of the specified
844 /// val# of the specified interval is re-materializable.
845 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
846 const VNInfo *ValNo, MachineInstr *MI,
847 SmallVectorImpl<LiveInterval*> &SpillIs,
852 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
856 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
857 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
858 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
859 // this but remember this is not safe to fold into a two-address
861 // This is a load from fixed stack slot. It can be rematerialized.
864 // If the target-specific rules don't identify an instruction as
865 // being trivially rematerializable, use some target-independent
867 if (!MI->getDesc().isRematerializable() ||
868 !tii_->isTriviallyReMaterializable(MI)) {
869 if (!EnableAggressiveRemat)
872 // If the instruction accesses memory but the memoperands have been lost,
873 // we can't analyze it.
874 const TargetInstrDesc &TID = MI->getDesc();
875 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
878 // Avoid instructions obviously unsafe for remat.
879 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
882 // If the instruction accesses memory and the memory could be non-constant,
883 // assume the instruction is not rematerializable.
884 for (std::list<MachineMemOperand>::const_iterator
885 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
886 const MachineMemOperand &MMO = *I;
887 if (MMO.isVolatile() || MMO.isStore())
889 const Value *V = MMO.getValue();
892 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
893 if (!PSV->isConstant(mf_->getFrameInfo()))
895 } else if (!aa_->pointsToConstantMemory(V))
899 // If any of the registers accessed are non-constant, conservatively assume
900 // the instruction is not rematerializable.
902 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
903 const MachineOperand &MO = MI->getOperand(i);
905 unsigned Reg = MO.getReg();
908 if (TargetRegisterInfo::isPhysicalRegister(Reg))
911 // Only allow one def, and that in the first operand.
912 if (MO.isDef() != (i == 0))
915 // Only allow constant-valued registers.
916 bool IsLiveIn = mri_->isLiveIn(Reg);
917 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
920 // For the def, it should be the only def.
921 if (MO.isDef() && (next(I) != E || IsLiveIn))
925 // Only allow one use other register use, as that's all the
926 // remat mechanisms support currently.
930 else if (Reg != ImpUse)
933 // For uses, there should be only one associate def.
934 if (I != E && (next(I) != E || IsLiveIn))
941 unsigned ImpUse = getReMatImplicitUse(li, MI);
943 const LiveInterval &ImpLi = getInterval(ImpUse);
944 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
945 re = mri_->use_end(); ri != re; ++ri) {
946 MachineInstr *UseMI = &*ri;
947 unsigned UseIdx = getInstructionIndex(UseMI);
948 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
950 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
954 // If a register operand of the re-materialized instruction is going to
955 // be spilled next, then it's not legal to re-materialize this instruction.
956 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
957 if (ImpUse == SpillIs[i]->reg)
963 /// isReMaterializable - Returns true if the definition MI of the specified
964 /// val# of the specified interval is re-materializable.
965 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
966 const VNInfo *ValNo, MachineInstr *MI) {
967 SmallVector<LiveInterval*, 4> Dummy1;
969 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
972 /// isReMaterializable - Returns true if every definition of MI of every
973 /// val# of the specified interval is re-materializable.
974 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
975 SmallVectorImpl<LiveInterval*> &SpillIs,
978 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
980 const VNInfo *VNI = *i;
981 unsigned DefIdx = VNI->def;
983 continue; // Dead val#.
984 // Is the def for the val# rematerializable?
987 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
988 bool DefIsLoad = false;
990 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
997 /// FilterFoldedOps - Filter out two-address use operands. Return
998 /// true if it finds any issue with the operands that ought to prevent
1000 static bool FilterFoldedOps(MachineInstr *MI,
1001 SmallVector<unsigned, 2> &Ops,
1003 SmallVector<unsigned, 2> &FoldOps) {
1004 const TargetInstrDesc &TID = MI->getDesc();
1007 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1008 unsigned OpIdx = Ops[i];
1009 MachineOperand &MO = MI->getOperand(OpIdx);
1010 // FIXME: fold subreg use.
1014 MRInfo |= (unsigned)VirtRegMap::isMod;
1016 // Filter out two-address use operand(s).
1017 if (!MO.isImplicit() &&
1018 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1019 MRInfo = VirtRegMap::isModRef;
1022 MRInfo |= (unsigned)VirtRegMap::isRef;
1024 FoldOps.push_back(OpIdx);
1030 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1031 /// slot / to reg or any rematerialized load into ith operand of specified
1032 /// MI. If it is successul, MI is updated with the newly created MI and
1034 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1035 VirtRegMap &vrm, MachineInstr *DefMI,
1037 SmallVector<unsigned, 2> &Ops,
1038 bool isSS, int Slot, unsigned Reg) {
1039 // If it is an implicit def instruction, just delete it.
1040 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1041 RemoveMachineInstrFromMaps(MI);
1042 vrm.RemoveMachineInstrFromMaps(MI);
1043 MI->eraseFromParent();
1048 // Filter the list of operand indexes that are to be folded. Abort if
1049 // any operand will prevent folding.
1050 unsigned MRInfo = 0;
1051 SmallVector<unsigned, 2> FoldOps;
1052 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1055 // The only time it's safe to fold into a two address instruction is when
1056 // it's folding reload and spill from / into a spill stack slot.
1057 if (DefMI && (MRInfo & VirtRegMap::isMod))
1060 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1061 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1063 // Remember this instruction uses the spill slot.
1064 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1066 // Attempt to fold the memory reference into the instruction. If
1067 // we can do this, we don't need to insert spill code.
1068 MachineBasicBlock &MBB = *MI->getParent();
1069 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1070 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1071 vrm.transferSpillPts(MI, fmi);
1072 vrm.transferRestorePts(MI, fmi);
1073 vrm.transferEmergencySpills(MI, fmi);
1075 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1076 mi2iMap_[fmi] = InstrIdx;
1077 MI = MBB.insert(MBB.erase(MI), fmi);
1084 /// canFoldMemoryOperand - Returns true if the specified load / store
1085 /// folding is possible.
1086 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1087 SmallVector<unsigned, 2> &Ops,
1089 // Filter the list of operand indexes that are to be folded. Abort if
1090 // any operand will prevent folding.
1091 unsigned MRInfo = 0;
1092 SmallVector<unsigned, 2> FoldOps;
1093 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1096 // It's only legal to remat for a use, not a def.
1097 if (ReMat && (MRInfo & VirtRegMap::isMod))
1100 return tii_->canFoldMemoryOperand(MI, FoldOps);
1103 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1104 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1105 for (LiveInterval::Ranges::const_iterator
1106 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1107 std::vector<IdxMBBPair>::const_iterator II =
1108 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1109 if (II == Idx2MBBMap.end())
1111 if (I->end > II->first) // crossing a MBB.
1113 MBBs.insert(II->second);
1114 if (MBBs.size() > 1)
1120 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1121 /// interval on to-be re-materialized operands of MI) with new register.
1122 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1123 MachineInstr *MI, unsigned NewVReg,
1125 // There is an implicit use. That means one of the other operand is
1126 // being remat'ed and the remat'ed instruction has li.reg as an
1127 // use operand. Make sure we rewrite that as well.
1128 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1129 MachineOperand &MO = MI->getOperand(i);
1132 unsigned Reg = MO.getReg();
1133 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1135 if (!vrm.isReMaterialized(Reg))
1137 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1138 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1140 UseMO->setReg(NewVReg);
1144 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1145 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1146 bool LiveIntervals::
1147 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1148 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1149 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1150 unsigned Slot, int LdSlot,
1151 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1153 const TargetRegisterClass* rc,
1154 SmallVector<int, 4> &ReMatIds,
1155 const MachineLoopInfo *loopInfo,
1156 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1157 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1158 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1159 MachineBasicBlock *MBB = MI->getParent();
1160 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1161 bool CanFold = false;
1163 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1164 MachineOperand& mop = MI->getOperand(i);
1167 unsigned Reg = mop.getReg();
1168 unsigned RegI = Reg;
1169 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1174 bool TryFold = !DefIsReMat;
1175 bool FoldSS = true; // Default behavior unless it's a remat.
1176 int FoldSlot = Slot;
1178 // If this is the rematerializable definition MI itself and
1179 // all of its uses are rematerialized, simply delete it.
1180 if (MI == ReMatOrigDefMI && CanDelete) {
1181 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1183 RemoveMachineInstrFromMaps(MI);
1184 vrm.RemoveMachineInstrFromMaps(MI);
1185 MI->eraseFromParent();
1189 // If def for this use can't be rematerialized, then try folding.
1190 // If def is rematerializable and it's a load, also try folding.
1191 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1193 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1199 // Scan all of the operands of this instruction rewriting operands
1200 // to use NewVReg instead of li.reg as appropriate. We do this for
1203 // 1. If the instr reads the same spilled vreg multiple times, we
1204 // want to reuse the NewVReg.
1205 // 2. If the instr is a two-addr instruction, we are required to
1206 // keep the src/dst regs pinned.
1208 // Keep track of whether we replace a use and/or def so that we can
1209 // create the spill interval with the appropriate range.
1211 HasUse = mop.isUse();
1212 HasDef = mop.isDef();
1213 SmallVector<unsigned, 2> Ops;
1215 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1216 const MachineOperand &MOj = MI->getOperand(j);
1219 unsigned RegJ = MOj.getReg();
1220 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1224 HasUse |= MOj.isUse();
1225 HasDef |= MOj.isDef();
1229 if (HasUse && !li.liveAt(getUseIndex(index)))
1230 // Must be defined by an implicit def. It should not be spilled. Note,
1231 // this is for correctness reason. e.g.
1232 // 8 %reg1024<def> = IMPLICIT_DEF
1233 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1234 // The live range [12, 14) are not part of the r1024 live interval since
1235 // it's defined by an implicit def. It will not conflicts with live
1236 // interval of r1025. Now suppose both registers are spilled, you can
1237 // easily see a situation where both registers are reloaded before
1238 // the INSERT_SUBREG and both target registers that would overlap.
1241 // Update stack slot spill weight if we are splitting.
1242 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1246 // Create a new virtual register for the spill interval.
1247 // Create the new register now so we can map the fold instruction
1248 // to the new register so when it is unfolded we get the correct
1250 bool CreatedNewVReg = false;
1252 NewVReg = mri_->createVirtualRegister(rc);
1254 CreatedNewVReg = true;
1260 // Do not fold load / store here if we are splitting. We'll find an
1261 // optimal point to insert a load / store later.
1263 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1264 Ops, FoldSS, FoldSlot, NewVReg)) {
1265 // Folding the load/store can completely change the instruction in
1266 // unpredictable ways, rescan it from the beginning.
1269 // We need to give the new vreg the same stack slot as the
1270 // spilled interval.
1271 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1277 if (isRemoved(MI)) {
1281 goto RestartInstruction;
1284 // We'll try to fold it later if it's profitable.
1285 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1289 mop.setReg(NewVReg);
1290 if (mop.isImplicit())
1291 rewriteImplicitOps(li, MI, NewVReg, vrm);
1293 // Reuse NewVReg for other reads.
1294 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1295 MachineOperand &mopj = MI->getOperand(Ops[j]);
1296 mopj.setReg(NewVReg);
1297 if (mopj.isImplicit())
1298 rewriteImplicitOps(li, MI, NewVReg, vrm);
1301 if (CreatedNewVReg) {
1303 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1304 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1305 // Each valnum may have its own remat id.
1306 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1308 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1310 if (!CanDelete || (HasUse && HasDef)) {
1311 // If this is a two-addr instruction then its use operands are
1312 // rematerializable but its def is not. It should be assigned a
1314 vrm.assignVirt2StackSlot(NewVReg, Slot);
1317 vrm.assignVirt2StackSlot(NewVReg, Slot);
1319 } else if (HasUse && HasDef &&
1320 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1321 // If this interval hasn't been assigned a stack slot (because earlier
1322 // def is a deleted remat def), do it now.
1323 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1324 vrm.assignVirt2StackSlot(NewVReg, Slot);
1327 // Re-matting an instruction with virtual register use. Add the
1328 // register as an implicit use on the use MI.
1329 if (DefIsReMat && ImpUse)
1330 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1332 // create a new register interval for this spill / remat.
1333 LiveInterval &nI = getOrCreateInterval(NewVReg);
1334 if (CreatedNewVReg) {
1335 NewLIs.push_back(&nI);
1336 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1338 vrm.setIsSplitFromReg(NewVReg, li.reg);
1342 if (CreatedNewVReg) {
1343 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1344 nI.getNextValue(~0U, 0, VNInfoAllocator));
1348 // Extend the split live interval to this def / use.
1349 unsigned End = getUseIndex(index)+1;
1350 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1351 nI.getValNumInfo(nI.getNumValNums()-1));
1357 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1358 nI.getNextValue(~0U, 0, VNInfoAllocator));
1363 DOUT << "\t\t\t\tAdded new interval: ";
1364 nI.print(DOUT, tri_);
1369 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1371 MachineBasicBlock *MBB, unsigned Idx) const {
1372 unsigned End = getMBBEndIdx(MBB);
1373 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1374 unsigned KillIdx = VNI->kills[j];
1375 if (KillIdx > Idx && KillIdx < End)
1381 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1382 /// during spilling.
1384 struct RewriteInfo {
1389 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1390 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1393 struct RewriteInfoCompare {
1394 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1395 return LHS.Index < RHS.Index;
1400 void LiveIntervals::
1401 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1402 LiveInterval::Ranges::const_iterator &I,
1403 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1404 unsigned Slot, int LdSlot,
1405 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1407 const TargetRegisterClass* rc,
1408 SmallVector<int, 4> &ReMatIds,
1409 const MachineLoopInfo *loopInfo,
1410 BitVector &SpillMBBs,
1411 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1412 BitVector &RestoreMBBs,
1413 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1414 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1415 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1416 bool AllCanFold = true;
1417 unsigned NewVReg = 0;
1418 unsigned start = getBaseIndex(I->start);
1419 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1421 // First collect all the def / use in this live range that will be rewritten.
1422 // Make sure they are sorted according to instruction index.
1423 std::vector<RewriteInfo> RewriteMIs;
1424 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1425 re = mri_->reg_end(); ri != re; ) {
1426 MachineInstr *MI = &*ri;
1427 MachineOperand &O = ri.getOperand();
1429 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1430 unsigned index = getInstructionIndex(MI);
1431 if (index < start || index >= end)
1433 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1434 // Must be defined by an implicit def. It should not be spilled. Note,
1435 // this is for correctness reason. e.g.
1436 // 8 %reg1024<def> = IMPLICIT_DEF
1437 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1438 // The live range [12, 14) are not part of the r1024 live interval since
1439 // it's defined by an implicit def. It will not conflicts with live
1440 // interval of r1025. Now suppose both registers are spilled, you can
1441 // easily see a situation where both registers are reloaded before
1442 // the INSERT_SUBREG and both target registers that would overlap.
1444 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1446 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1448 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1449 // Now rewrite the defs and uses.
1450 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1451 RewriteInfo &rwi = RewriteMIs[i];
1453 unsigned index = rwi.Index;
1454 bool MIHasUse = rwi.HasUse;
1455 bool MIHasDef = rwi.HasDef;
1456 MachineInstr *MI = rwi.MI;
1457 // If MI def and/or use the same register multiple times, then there
1458 // are multiple entries.
1459 unsigned NumUses = MIHasUse;
1460 while (i != e && RewriteMIs[i].MI == MI) {
1461 assert(RewriteMIs[i].Index == index);
1462 bool isUse = RewriteMIs[i].HasUse;
1463 if (isUse) ++NumUses;
1465 MIHasDef |= RewriteMIs[i].HasDef;
1468 MachineBasicBlock *MBB = MI->getParent();
1470 if (ImpUse && MI != ReMatDefMI) {
1471 // Re-matting an instruction with virtual register use. Update the
1472 // register interval's spill weight to HUGE_VALF to prevent it from
1474 LiveInterval &ImpLi = getInterval(ImpUse);
1475 ImpLi.weight = HUGE_VALF;
1478 unsigned MBBId = MBB->getNumber();
1479 unsigned ThisVReg = 0;
1481 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1482 if (NVI != MBBVRegsMap.end()) {
1483 ThisVReg = NVI->second;
1490 // It's better to start a new interval to avoid artifically
1491 // extend the new interval.
1492 if (MIHasDef && !MIHasUse) {
1493 MBBVRegsMap.erase(MBB->getNumber());
1499 bool IsNew = ThisVReg == 0;
1501 // This ends the previous live interval. If all of its def / use
1502 // can be folded, give it a low spill weight.
1503 if (NewVReg && TrySplit && AllCanFold) {
1504 LiveInterval &nI = getOrCreateInterval(NewVReg);
1511 bool HasDef = false;
1512 bool HasUse = false;
1513 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1514 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1515 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1516 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1517 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1518 if (!HasDef && !HasUse)
1521 AllCanFold &= CanFold;
1523 // Update weight of spill interval.
1524 LiveInterval &nI = getOrCreateInterval(NewVReg);
1526 // The spill weight is now infinity as it cannot be spilled again.
1527 nI.weight = HUGE_VALF;
1531 // Keep track of the last def and first use in each MBB.
1533 if (MI != ReMatOrigDefMI || !CanDelete) {
1534 bool HasKill = false;
1536 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1538 // If this is a two-address code, then this index starts a new VNInfo.
1539 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1541 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1543 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1544 SpillIdxes.find(MBBId);
1546 if (SII == SpillIdxes.end()) {
1547 std::vector<SRInfo> S;
1548 S.push_back(SRInfo(index, NewVReg, true));
1549 SpillIdxes.insert(std::make_pair(MBBId, S));
1550 } else if (SII->second.back().vreg != NewVReg) {
1551 SII->second.push_back(SRInfo(index, NewVReg, true));
1552 } else if ((int)index > SII->second.back().index) {
1553 // If there is an earlier def and this is a two-address
1554 // instruction, then it's not possible to fold the store (which
1555 // would also fold the load).
1556 SRInfo &Info = SII->second.back();
1558 Info.canFold = !HasUse;
1560 SpillMBBs.set(MBBId);
1561 } else if (SII != SpillIdxes.end() &&
1562 SII->second.back().vreg == NewVReg &&
1563 (int)index > SII->second.back().index) {
1564 // There is an earlier def that's not killed (must be two-address).
1565 // The spill is no longer needed.
1566 SII->second.pop_back();
1567 if (SII->second.empty()) {
1568 SpillIdxes.erase(MBBId);
1569 SpillMBBs.reset(MBBId);
1576 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1577 SpillIdxes.find(MBBId);
1578 if (SII != SpillIdxes.end() &&
1579 SII->second.back().vreg == NewVReg &&
1580 (int)index > SII->second.back().index)
1581 // Use(s) following the last def, it's not safe to fold the spill.
1582 SII->second.back().canFold = false;
1583 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1584 RestoreIdxes.find(MBBId);
1585 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1586 // If we are splitting live intervals, only fold if it's the first
1587 // use and there isn't another use later in the MBB.
1588 RII->second.back().canFold = false;
1590 // Only need a reload if there isn't an earlier def / use.
1591 if (RII == RestoreIdxes.end()) {
1592 std::vector<SRInfo> Infos;
1593 Infos.push_back(SRInfo(index, NewVReg, true));
1594 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1596 RII->second.push_back(SRInfo(index, NewVReg, true));
1598 RestoreMBBs.set(MBBId);
1602 // Update spill weight.
1603 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1604 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1607 if (NewVReg && TrySplit && AllCanFold) {
1608 // If all of its def / use can be folded, give it a low spill weight.
1609 LiveInterval &nI = getOrCreateInterval(NewVReg);
1614 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1615 BitVector &RestoreMBBs,
1616 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1617 if (!RestoreMBBs[Id])
1619 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1620 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1621 if (Restores[i].index == index &&
1622 Restores[i].vreg == vr &&
1623 Restores[i].canFold)
1628 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1629 BitVector &RestoreMBBs,
1630 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1631 if (!RestoreMBBs[Id])
1633 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1634 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1635 if (Restores[i].index == index && Restores[i].vreg)
1636 Restores[i].index = -1;
1639 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1640 /// spilled and create empty intervals for their uses.
1642 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1643 const TargetRegisterClass* rc,
1644 std::vector<LiveInterval*> &NewLIs) {
1645 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1646 re = mri_->reg_end(); ri != re; ) {
1647 MachineOperand &O = ri.getOperand();
1648 MachineInstr *MI = &*ri;
1651 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1652 "Register def was not rewritten?");
1653 RemoveMachineInstrFromMaps(MI);
1654 vrm.RemoveMachineInstrFromMaps(MI);
1655 MI->eraseFromParent();
1657 // This must be an use of an implicit_def so it's not part of the live
1658 // interval. Create a new empty live interval for it.
1659 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1660 unsigned NewVReg = mri_->createVirtualRegister(rc);
1662 vrm.setIsImplicitlyDefined(NewVReg);
1663 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1664 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1665 MachineOperand &MO = MI->getOperand(i);
1666 if (MO.isReg() && MO.getReg() == li.reg)
1675 bool operator()(LiveInterval* A, LiveInterval* B) {
1676 return A->beginNumber() < B->beginNumber();
1681 std::vector<LiveInterval*> LiveIntervals::
1682 addIntervalsForSpillsFast(const LiveInterval &li,
1683 const MachineLoopInfo *loopInfo,
1684 VirtRegMap &vrm, float& SSWeight) {
1685 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1687 std::vector<LiveInterval*> added;
1689 assert(li.weight != HUGE_VALF &&
1690 "attempt to spill already spilled interval!");
1692 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1696 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1700 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1701 while (RI != mri_->reg_end()) {
1702 MachineInstr* MI = &*RI;
1704 SmallVector<unsigned, 2> Indices;
1705 bool HasUse = false;
1706 bool HasDef = false;
1708 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1709 MachineOperand& mop = MI->getOperand(i);
1710 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1712 HasUse |= MI->getOperand(i).isUse();
1713 HasDef |= MI->getOperand(i).isDef();
1715 Indices.push_back(i);
1718 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1719 Indices, true, slot, li.reg)) {
1720 unsigned NewVReg = mri_->createVirtualRegister(rc);
1722 vrm.assignVirt2StackSlot(NewVReg, slot);
1724 // create a new register for this spill
1725 LiveInterval &nI = getOrCreateInterval(NewVReg);
1727 // the spill weight is now infinity as it
1728 // cannot be spilled again
1729 nI.weight = HUGE_VALF;
1731 // Rewrite register operands to use the new vreg.
1732 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1733 E = Indices.end(); I != E; ++I) {
1734 MI->getOperand(*I).setReg(NewVReg);
1736 if (MI->getOperand(*I).isUse())
1737 MI->getOperand(*I).setIsKill(true);
1740 // Fill in the new live interval.
1741 unsigned index = getInstructionIndex(MI);
1743 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1744 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1747 vrm.addRestorePoint(NewVReg, MI);
1750 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1751 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1754 vrm.addSpillPoint(NewVReg, true, MI);
1757 added.push_back(&nI);
1759 DOUT << "\t\t\t\tadded new interval: ";
1763 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1766 SSWeight += getSpillWeight(true, true, loopDepth);
1768 SSWeight += getSpillWeight(false, true, loopDepth);
1770 SSWeight += getSpillWeight(true, false, loopDepth);
1774 RI = mri_->reg_begin(li.reg);
1777 // Clients expect the new intervals to be returned in sorted order.
1778 std::sort(added.begin(), added.end(), LISorter());
1783 std::vector<LiveInterval*> LiveIntervals::
1784 addIntervalsForSpills(const LiveInterval &li,
1785 SmallVectorImpl<LiveInterval*> &SpillIs,
1786 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1789 if (EnableFastSpilling)
1790 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1792 assert(li.weight != HUGE_VALF &&
1793 "attempt to spill already spilled interval!");
1795 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1796 li.print(DOUT, tri_);
1799 // Spill slot weight.
1802 // Each bit specify whether it a spill is required in the MBB.
1803 BitVector SpillMBBs(mf_->getNumBlockIDs());
1804 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1805 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1806 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1807 DenseMap<unsigned,unsigned> MBBVRegsMap;
1808 std::vector<LiveInterval*> NewLIs;
1809 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1811 unsigned NumValNums = li.getNumValNums();
1812 SmallVector<MachineInstr*, 4> ReMatDefs;
1813 ReMatDefs.resize(NumValNums, NULL);
1814 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1815 ReMatOrigDefs.resize(NumValNums, NULL);
1816 SmallVector<int, 4> ReMatIds;
1817 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1818 BitVector ReMatDelete(NumValNums);
1819 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1821 // Spilling a split live interval. It cannot be split any further. Also,
1822 // it's also guaranteed to be a single val# / range interval.
1823 if (vrm.getPreSplitReg(li.reg)) {
1824 vrm.setIsSplitFromReg(li.reg, 0);
1825 // Unset the split kill marker on the last use.
1826 unsigned KillIdx = vrm.getKillPoint(li.reg);
1828 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1829 assert(KillMI && "Last use disappeared?");
1830 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1831 assert(KillOp != -1 && "Last use disappeared?");
1832 KillMI->getOperand(KillOp).setIsKill(false);
1834 vrm.removeKillPoint(li.reg);
1835 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1836 Slot = vrm.getStackSlot(li.reg);
1837 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1838 MachineInstr *ReMatDefMI = DefIsReMat ?
1839 vrm.getReMaterializedMI(li.reg) : NULL;
1841 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1842 bool isLoad = isLoadSS ||
1843 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1844 bool IsFirstRange = true;
1845 for (LiveInterval::Ranges::const_iterator
1846 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1847 // If this is a split live interval with multiple ranges, it means there
1848 // are two-address instructions that re-defined the value. Only the
1849 // first def can be rematerialized!
1851 // Note ReMatOrigDefMI has already been deleted.
1852 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1853 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1854 false, vrm, rc, ReMatIds, loopInfo,
1855 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1856 MBBVRegsMap, NewLIs, SSWeight);
1858 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1859 Slot, 0, false, false, false,
1860 false, vrm, rc, ReMatIds, loopInfo,
1861 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1862 MBBVRegsMap, NewLIs, SSWeight);
1864 IsFirstRange = false;
1867 SSWeight = 0.0f; // Already accounted for when split.
1868 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1872 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1873 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1877 bool NeedStackSlot = false;
1878 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1880 const VNInfo *VNI = *i;
1881 unsigned VN = VNI->id;
1882 unsigned DefIdx = VNI->def;
1884 continue; // Dead val#.
1885 // Is the def for the val# rematerializable?
1886 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1887 ? 0 : getInstructionFromIndex(DefIdx);
1889 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1890 // Remember how to remat the def of this val#.
1891 ReMatOrigDefs[VN] = ReMatDefMI;
1892 // Original def may be modified so we have to make a copy here.
1893 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1894 ClonedMIs.push_back(Clone);
1895 ReMatDefs[VN] = Clone;
1897 bool CanDelete = true;
1898 if (VNI->hasPHIKill) {
1899 // A kill is a phi node, not all of its uses can be rematerialized.
1900 // It must not be deleted.
1902 // Need a stack slot if there is any live range where uses cannot be
1904 NeedStackSlot = true;
1907 ReMatDelete.set(VN);
1909 // Need a stack slot if there is any live range where uses cannot be
1911 NeedStackSlot = true;
1915 // One stack slot per live interval.
1916 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1917 Slot = vrm.assignVirt2StackSlot(li.reg);
1919 // Create new intervals and rewrite defs and uses.
1920 for (LiveInterval::Ranges::const_iterator
1921 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1922 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1923 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1924 bool DefIsReMat = ReMatDefMI != NULL;
1925 bool CanDelete = ReMatDelete[I->valno->id];
1927 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1928 bool isLoad = isLoadSS ||
1929 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1930 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1931 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1932 CanDelete, vrm, rc, ReMatIds, loopInfo,
1933 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1934 MBBVRegsMap, NewLIs, SSWeight);
1937 // Insert spills / restores if we are splitting.
1939 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1943 SmallPtrSet<LiveInterval*, 4> AddedKill;
1944 SmallVector<unsigned, 2> Ops;
1945 if (NeedStackSlot) {
1946 int Id = SpillMBBs.find_first();
1948 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1949 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1950 std::vector<SRInfo> &spills = SpillIdxes[Id];
1951 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1952 int index = spills[i].index;
1953 unsigned VReg = spills[i].vreg;
1954 LiveInterval &nI = getOrCreateInterval(VReg);
1955 bool isReMat = vrm.isReMaterialized(VReg);
1956 MachineInstr *MI = getInstructionFromIndex(index);
1957 bool CanFold = false;
1958 bool FoundUse = false;
1960 if (spills[i].canFold) {
1962 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1963 MachineOperand &MO = MI->getOperand(j);
1964 if (!MO.isReg() || MO.getReg() != VReg)
1971 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1972 RestoreMBBs, RestoreIdxes))) {
1973 // MI has two-address uses of the same register. If the use
1974 // isn't the first and only use in the BB, then we can't fold
1975 // it. FIXME: Move this to rewriteInstructionsForSpills.
1982 // Fold the store into the def if possible.
1983 bool Folded = false;
1984 if (CanFold && !Ops.empty()) {
1985 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1988 // Also folded uses, do not issue a load.
1989 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1990 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1992 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1996 // Otherwise tell the spiller to issue a spill.
1998 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1999 bool isKill = LR->end == getStoreIndex(index);
2000 if (!MI->registerDefIsDead(nI.reg))
2001 // No need to spill a dead def.
2002 vrm.addSpillPoint(VReg, isKill, MI);
2004 AddedKill.insert(&nI);
2007 // Update spill slot weight.
2009 SSWeight += getSpillWeight(true, false, loopDepth);
2011 Id = SpillMBBs.find_next(Id);
2015 int Id = RestoreMBBs.find_first();
2017 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2018 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2020 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2021 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2022 int index = restores[i].index;
2025 unsigned VReg = restores[i].vreg;
2026 LiveInterval &nI = getOrCreateInterval(VReg);
2027 bool isReMat = vrm.isReMaterialized(VReg);
2028 MachineInstr *MI = getInstructionFromIndex(index);
2029 bool CanFold = false;
2031 if (restores[i].canFold) {
2033 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2034 MachineOperand &MO = MI->getOperand(j);
2035 if (!MO.isReg() || MO.getReg() != VReg)
2039 // If this restore were to be folded, it would have been folded
2048 // Fold the load into the use if possible.
2049 bool Folded = false;
2050 if (CanFold && !Ops.empty()) {
2052 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2054 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2056 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2057 // If the rematerializable def is a load, also try to fold it.
2058 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2059 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2060 Ops, isLoadSS, LdSlot, VReg);
2061 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2063 // Re-matting an instruction with virtual register use. Add the
2064 // register as an implicit use on the use MI and update the register
2065 // interval's spill weight to HUGE_VALF to prevent it from being
2067 LiveInterval &ImpLi = getInterval(ImpUse);
2068 ImpLi.weight = HUGE_VALF;
2069 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2073 // If folding is not possible / failed, then tell the spiller to issue a
2074 // load / rematerialization for us.
2076 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2078 vrm.addRestorePoint(VReg, MI);
2080 // Update spill slot weight.
2082 SSWeight += getSpillWeight(false, true, loopDepth);
2084 Id = RestoreMBBs.find_next(Id);
2087 // Finalize intervals: add kills, finalize spill weights, and filter out
2089 std::vector<LiveInterval*> RetNewLIs;
2090 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2091 LiveInterval *LI = NewLIs[i];
2093 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2094 if (!AddedKill.count(LI)) {
2095 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2096 unsigned LastUseIdx = getBaseIndex(LR->end);
2097 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2098 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2099 assert(UseIdx != -1);
2100 if (LastUse->getOperand(UseIdx).isImplicit() ||
2101 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2102 LastUse->getOperand(UseIdx).setIsKill();
2103 vrm.addKillPoint(LI->reg, LastUseIdx);
2106 RetNewLIs.push_back(LI);
2110 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2114 /// hasAllocatableSuperReg - Return true if the specified physical register has
2115 /// any super register that's allocatable.
2116 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2117 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2118 if (allocatableRegs_[*AS] && hasInterval(*AS))
2123 /// getRepresentativeReg - Find the largest super register of the specified
2124 /// physical register.
2125 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2126 // Find the largest super-register that is allocatable.
2127 unsigned BestReg = Reg;
2128 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2129 unsigned SuperReg = *AS;
2130 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2138 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2139 /// specified interval that conflicts with the specified physical register.
2140 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2141 unsigned PhysReg) const {
2142 unsigned NumConflicts = 0;
2143 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2144 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2145 E = mri_->reg_end(); I != E; ++I) {
2146 MachineOperand &O = I.getOperand();
2147 MachineInstr *MI = O.getParent();
2148 unsigned Index = getInstructionIndex(MI);
2149 if (pli.liveAt(Index))
2152 return NumConflicts;
2155 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2156 /// around all defs and uses of the specified interval.
2157 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2158 unsigned PhysReg, VirtRegMap &vrm) {
2159 unsigned SpillReg = getRepresentativeReg(PhysReg);
2161 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2162 // If there are registers which alias PhysReg, but which are not a
2163 // sub-register of the chosen representative super register. Assert
2164 // since we can't handle it yet.
2165 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2166 tri_->isSuperRegister(*AS, SpillReg));
2168 LiveInterval &pli = getInterval(SpillReg);
2169 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2170 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2171 E = mri_->reg_end(); I != E; ++I) {
2172 MachineOperand &O = I.getOperand();
2173 MachineInstr *MI = O.getParent();
2174 if (SeenMIs.count(MI))
2177 unsigned Index = getInstructionIndex(MI);
2178 if (pli.liveAt(Index)) {
2179 vrm.addEmergencySpill(SpillReg, MI);
2180 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2181 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2182 if (!hasInterval(*AS))
2184 LiveInterval &spli = getInterval(*AS);
2185 if (spli.liveAt(Index))
2186 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2192 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2193 MachineInstr* startInst) {
2194 LiveInterval& Interval = getOrCreateInterval(reg);
2195 VNInfo* VN = Interval.getNextValue(
2196 getInstructionIndex(startInst) + InstrSlots::DEF,
2197 startInst, getVNInfoAllocator());
2198 VN->hasPHIKill = true;
2199 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2200 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2201 getMBBEndIdx(startInst->getParent()) + 1, VN);
2202 Interval.addRange(LR);