1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 static cl::opt<bool> EnableFastSpilling("fast-spill",
53 cl::init(false), cl::Hidden);
55 STATISTIC(numIntervals, "Number of original intervals");
56 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
72 AU.addRequiredID(TwoAddressInstructionPassID);
73 MachineFunctionPass::getAnalysisUsage(AU);
76 void LiveIntervals::releaseMemory() {
77 // Free the live intervals themselves.
78 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79 E = r2iMap_.end(); I != E; ++I)
87 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88 VNInfoAllocator.Reset();
89 while (!ClonedMIs.empty()) {
90 MachineInstr *MI = ClonedMIs.back();
92 mf_->DeleteMachineInstr(MI);
96 void LiveIntervals::computeNumbering() {
97 Index2MiMap OldI2MI = i2miMap_;
98 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
107 // Number MachineInstrs and MachineBasicBlocks.
108 // Initialize MBB indexes to a sentinal.
109 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111 unsigned MIIndex = 0;
112 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114 unsigned StartIdx = MIIndex;
116 // Insert an empty slot at the beginning of each block.
117 MIIndex += InstrSlots::NUM;
118 i2miMap_.push_back(0);
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123 assert(inserted && "multiple MachineInstr -> index mappings");
124 i2miMap_.push_back(I);
125 MIIndex += InstrSlots::NUM;
128 // Insert an empty slot after every instruction.
129 MIIndex += InstrSlots::NUM;
130 i2miMap_.push_back(0);
133 // Set the MBB2IdxMap entry for this MBB.
134 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
137 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
139 if (!OldI2MI.empty())
140 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141 for (LiveInterval::iterator LI = OI->second->begin(),
142 LE = OI->second->end(); LI != LE; ++LI) {
144 // Remap the start index of the live range to the corresponding new
145 // number, or our best guess at what it _should_ correspond to if the
146 // original instruction has been erased. This is either the following
147 // instruction or its predecessor.
148 unsigned index = LI->start / InstrSlots::NUM;
149 unsigned offset = LI->start % InstrSlots::NUM;
150 if (offset == InstrSlots::LOAD) {
151 std::vector<IdxMBBPair>::const_iterator I =
152 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153 // Take the pair containing the index
154 std::vector<IdxMBBPair>::const_iterator J =
155 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
157 LI->start = getMBBStartIdx(J->second);
159 LI->start = mi2iMap_[OldI2MI[index]] + offset;
162 // Remap the ending index in the same way that we remapped the start,
163 // except for the final step where we always map to the immediately
164 // following instruction.
165 index = (LI->end - 1) / InstrSlots::NUM;
166 offset = LI->end % InstrSlots::NUM;
167 if (offset == InstrSlots::LOAD) {
168 // VReg dies at end of block.
169 std::vector<IdxMBBPair>::const_iterator I =
170 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
173 LI->end = getMBBEndIdx(I->second) + 1;
175 unsigned idx = index;
176 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
178 if (index != OldI2MI.size())
179 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
181 LI->end = InstrSlots::NUM * i2miMap_.size();
185 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
189 // Remap the VNInfo def index, which works the same as the
190 // start indices above. VN's with special sentinel defs
191 // don't need to be remapped.
192 if (vni->def != ~0U && vni->def != ~1U) {
193 unsigned index = vni->def / InstrSlots::NUM;
194 unsigned offset = vni->def % InstrSlots::NUM;
195 if (offset == InstrSlots::LOAD) {
196 std::vector<IdxMBBPair>::const_iterator I =
197 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198 // Take the pair containing the index
199 std::vector<IdxMBBPair>::const_iterator J =
200 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
202 vni->def = getMBBStartIdx(J->second);
204 vni->def = mi2iMap_[OldI2MI[index]] + offset;
208 // Remap the VNInfo kill indices, which works the same as
209 // the end indices above.
210 for (size_t i = 0; i < vni->kills.size(); ++i) {
211 // PHI kills don't need to be remapped.
212 if (!vni->kills[i]) continue;
214 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215 unsigned offset = vni->kills[i] % InstrSlots::NUM;
216 if (offset == InstrSlots::LOAD) {
217 std::vector<IdxMBBPair>::const_iterator I =
218 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
221 vni->kills[i] = getMBBEndIdx(I->second);
223 unsigned idx = index;
224 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
226 if (index != OldI2MI.size())
227 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228 (idx == index ? offset : 0);
230 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
237 /// runOnMachineFunction - Register allocate the whole function
239 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
241 mri_ = &mf_->getRegInfo();
242 tm_ = &fn.getTarget();
243 tri_ = tm_->getRegisterInfo();
244 tii_ = tm_->getInstrInfo();
245 aa_ = &getAnalysis<AliasAnalysis>();
246 lv_ = &getAnalysis<LiveVariables>();
247 allocatableRegs_ = tri_->getAllocatableSet(fn);
252 numIntervals += getNumIntervals();
254 DOUT << "********** INTERVALS **********\n";
255 for (iterator I = begin(), E = end(); I != E; ++I) {
256 I->second->print(DOUT, tri_);
260 numIntervalsAfter += getNumIntervals();
265 /// print - Implement the dump method.
266 void LiveIntervals::print(std::ostream &O, const Module* ) const {
267 O << "********** INTERVALS **********\n";
268 for (const_iterator I = begin(), E = end(); I != E; ++I) {
269 I->second->print(O, tri_);
273 O << "********** MACHINEINSTRS **********\n";
274 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275 mbbi != mbbe; ++mbbi) {
276 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277 for (MachineBasicBlock::iterator mii = mbbi->begin(),
278 mie = mbbi->end(); mii != mie; ++mii) {
279 O << getInstructionIndex(mii) << '\t' << *mii;
284 /// conflictsWithPhysRegDef - Returns true if the specified register
285 /// is defined during the duration of the specified interval.
286 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287 VirtRegMap &vrm, unsigned reg) {
288 for (LiveInterval::Ranges::const_iterator
289 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290 for (unsigned index = getBaseIndex(I->start),
291 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292 index += InstrSlots::NUM) {
293 // skip deleted instructions
294 while (index != end && !getInstructionFromIndex(index))
295 index += InstrSlots::NUM;
296 if (index == end) break;
298 MachineInstr *MI = getInstructionFromIndex(index);
299 unsigned SrcReg, DstReg;
300 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301 if (SrcReg == li.reg || DstReg == li.reg)
303 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304 MachineOperand& mop = MI->getOperand(i);
307 unsigned PhysReg = mop.getReg();
308 if (PhysReg == 0 || PhysReg == li.reg)
310 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311 if (!vrm.hasPhys(PhysReg))
313 PhysReg = vrm.getPhys(PhysReg);
315 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
324 void LiveIntervals::printRegName(unsigned reg) const {
325 if (TargetRegisterInfo::isPhysicalRegister(reg))
326 cerr << tri_->getName(reg);
328 cerr << "%reg" << reg;
331 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332 MachineBasicBlock::iterator mi,
333 unsigned MIIdx, MachineOperand& MO,
335 LiveInterval &interval) {
336 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
339 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340 DOUT << "is a implicit_def\n";
344 // Virtual registers may be defined multiple times (due to phi
345 // elimination and 2-addr elimination). Much of what we do only has to be
346 // done once for the vreg. We use an empty interval to detect the first
347 // time we see a vreg.
348 if (interval.empty()) {
349 // Get the Idx of the defining instructions.
350 unsigned defIndex = getDefIndex(MIIdx);
351 // Earlyclobbers move back one.
352 if (MO.isEarlyClobber())
353 defIndex = getUseIndex(MIIdx);
355 MachineInstr *CopyMI = NULL;
356 unsigned SrcReg, DstReg;
357 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
358 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
359 tii_->isMoveInstr(*mi, SrcReg, DstReg))
361 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
363 assert(ValNo->id == 0 && "First value in interval is not 0?");
365 // Loop over all of the blocks that the vreg is defined in. There are
366 // two cases we have to handle here. The most common case is a vreg
367 // whose lifetime is contained within a basic block. In this case there
368 // will be a single kill, in MBB, which comes after the definition.
369 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
370 // FIXME: what about dead vars?
372 if (vi.Kills[0] != mi)
373 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
375 killIdx = defIndex+1;
377 // If the kill happens after the definition, we have an intra-block
379 if (killIdx > defIndex) {
380 assert(vi.AliveBlocks.none() &&
381 "Shouldn't be alive across any blocks!");
382 LiveRange LR(defIndex, killIdx, ValNo);
383 interval.addRange(LR);
384 DOUT << " +" << LR << "\n";
385 interval.addKill(ValNo, killIdx);
390 // The other case we handle is when a virtual register lives to the end
391 // of the defining block, potentially live across some blocks, then is
392 // live into some number of blocks, but gets killed. Start by adding a
393 // range that goes from this definition to the end of the defining block.
394 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
395 DOUT << " +" << NewLR;
396 interval.addRange(NewLR);
398 // Iterate over all of the blocks that the variable is completely
399 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
401 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
402 if (vi.AliveBlocks[i]) {
403 LiveRange LR(getMBBStartIdx(i),
404 getMBBEndIdx(i)+1, // MBB ends at -1.
406 interval.addRange(LR);
411 // Finally, this virtual register is live from the start of any killing
412 // block to the 'use' slot of the killing instruction.
413 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
414 MachineInstr *Kill = vi.Kills[i];
415 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
416 LiveRange LR(getMBBStartIdx(Kill->getParent()),
418 interval.addRange(LR);
419 interval.addKill(ValNo, killIdx);
424 // If this is the second time we see a virtual register definition, it
425 // must be due to phi elimination or two addr elimination. If this is
426 // the result of two address elimination, then the vreg is one of the
427 // def-and-use register operand.
428 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
429 // If this is a two-address definition, then we have already processed
430 // the live range. The only problem is that we didn't realize there
431 // are actually two values in the live interval. Because of this we
432 // need to take the LiveRegion that defines this register and split it
434 assert(interval.containsOneValue());
435 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
436 unsigned RedefIndex = getDefIndex(MIIdx);
437 // Earlyclobbers move back one.
438 if (MO.isEarlyClobber())
439 RedefIndex = getUseIndex(MIIdx);
441 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
442 VNInfo *OldValNo = OldLR->valno;
444 // Delete the initial value, which should be short and continuous,
445 // because the 2-addr copy must be in the same MBB as the redef.
446 interval.removeRange(DefIndex, RedefIndex);
448 // Two-address vregs should always only be redefined once. This means
449 // that at this point, there should be exactly one value number in it.
450 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
452 // The new value number (#1) is defined by the instruction we claimed
454 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
457 // Value#0 is now defined by the 2-addr instruction.
458 OldValNo->def = RedefIndex;
461 // Add the new live interval which replaces the range for the input copy.
462 LiveRange LR(DefIndex, RedefIndex, ValNo);
463 DOUT << " replace range with " << LR;
464 interval.addRange(LR);
465 interval.addKill(ValNo, RedefIndex);
467 // If this redefinition is dead, we need to add a dummy unit live
468 // range covering the def slot.
470 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
473 interval.print(DOUT, tri_);
476 // Otherwise, this must be because of phi elimination. If this is the
477 // first redefinition of the vreg that we have seen, go back and change
478 // the live range in the PHI block to be a different value number.
479 if (interval.containsOneValue()) {
480 assert(vi.Kills.size() == 1 &&
481 "PHI elimination vreg should have one kill, the PHI itself!");
483 // Remove the old range that we now know has an incorrect number.
484 VNInfo *VNI = interval.getValNumInfo(0);
485 MachineInstr *Killer = vi.Kills[0];
486 unsigned Start = getMBBStartIdx(Killer->getParent());
487 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
488 DOUT << " Removing [" << Start << "," << End << "] from: ";
489 interval.print(DOUT, tri_); DOUT << "\n";
490 interval.removeRange(Start, End);
491 VNI->hasPHIKill = true;
492 DOUT << " RESULT: "; interval.print(DOUT, tri_);
494 // Replace the interval with one of a NEW value number. Note that this
495 // value number isn't actually defined by an instruction, weird huh? :)
496 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
497 DOUT << " replace range with " << LR;
498 interval.addRange(LR);
499 interval.addKill(LR.valno, End);
500 DOUT << " RESULT: "; interval.print(DOUT, tri_);
503 // In the case of PHI elimination, each variable definition is only
504 // live until the end of the block. We've already taken care of the
505 // rest of the live range.
506 unsigned defIndex = getDefIndex(MIIdx);
507 // Earlyclobbers move back one.
508 if (MO.isEarlyClobber())
509 defIndex = getUseIndex(MIIdx);
512 MachineInstr *CopyMI = NULL;
513 unsigned SrcReg, DstReg;
514 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
515 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
516 tii_->isMoveInstr(*mi, SrcReg, DstReg))
518 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
520 unsigned killIndex = getMBBEndIdx(mbb) + 1;
521 LiveRange LR(defIndex, killIndex, ValNo);
522 interval.addRange(LR);
523 interval.addKill(ValNo, killIndex);
524 ValNo->hasPHIKill = true;
532 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
533 MachineBasicBlock::iterator mi,
536 LiveInterval &interval,
537 MachineInstr *CopyMI) {
538 // A physical register cannot be live across basic block, so its
539 // lifetime must end somewhere in its defining basic block.
540 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
542 unsigned baseIndex = MIIdx;
543 unsigned start = getDefIndex(baseIndex);
544 // Earlyclobbers move back one.
545 if (MO.isEarlyClobber())
546 start = getUseIndex(MIIdx);
547 unsigned end = start;
549 // If it is not used after definition, it is considered dead at
550 // the instruction defining it. Hence its interval is:
551 // [defSlot(def), defSlot(def)+1)
558 // If it is not dead on definition, it must be killed by a
559 // subsequent instruction. Hence its interval is:
560 // [defSlot(def), useSlot(kill)+1)
561 baseIndex += InstrSlots::NUM;
562 while (++mi != MBB->end()) {
563 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
564 getInstructionFromIndex(baseIndex) == 0)
565 baseIndex += InstrSlots::NUM;
566 if (mi->killsRegister(interval.reg, tri_)) {
568 end = getUseIndex(baseIndex) + 1;
570 } else if (mi->modifiesRegister(interval.reg, tri_)) {
571 // Another instruction redefines the register before it is ever read.
572 // Then the register is essentially dead at the instruction that defines
573 // it. Hence its interval is:
574 // [defSlot(def), defSlot(def)+1)
580 baseIndex += InstrSlots::NUM;
583 // The only case we should have a dead physreg here without a killing or
584 // instruction where we know it's dead is if it is live-in to the function
586 assert(!CopyMI && "physreg was not killed in defining block!");
590 assert(start < end && "did not find end of interval?");
592 // Already exists? Extend old live interval.
593 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
594 VNInfo *ValNo = (OldLR != interval.end())
595 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
596 LiveRange LR(start, end, ValNo);
597 interval.addRange(LR);
598 interval.addKill(LR.valno, end);
599 DOUT << " +" << LR << '\n';
602 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
603 MachineBasicBlock::iterator MI,
607 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
608 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
609 getOrCreateInterval(MO.getReg()));
610 else if (allocatableRegs_[MO.getReg()]) {
611 MachineInstr *CopyMI = NULL;
612 unsigned SrcReg, DstReg;
613 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
614 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
615 tii_->isMoveInstr(*MI, SrcReg, DstReg))
617 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618 getOrCreateInterval(MO.getReg()), CopyMI);
619 // Def of a register also defines its sub-registers.
620 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
621 // If MI also modifies the sub-register explicitly, avoid processing it
622 // more than once. Do not pass in TRI here so it checks for exact match.
623 if (!MI->modifiesRegister(*AS))
624 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625 getOrCreateInterval(*AS), 0);
629 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
631 LiveInterval &interval, bool isAlias) {
632 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
634 // Look for kills, if it reaches a def before it's killed, then it shouldn't
635 // be considered a livein.
636 MachineBasicBlock::iterator mi = MBB->begin();
637 unsigned baseIndex = MIIdx;
638 unsigned start = baseIndex;
639 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
640 getInstructionFromIndex(baseIndex) == 0)
641 baseIndex += InstrSlots::NUM;
642 unsigned end = baseIndex;
644 while (mi != MBB->end()) {
645 if (mi->killsRegister(interval.reg, tri_)) {
647 end = getUseIndex(baseIndex) + 1;
649 } else if (mi->modifiesRegister(interval.reg, tri_)) {
650 // Another instruction redefines the register before it is ever read.
651 // Then the register is essentially dead at the instruction that defines
652 // it. Hence its interval is:
653 // [defSlot(def), defSlot(def)+1)
655 end = getDefIndex(start) + 1;
659 baseIndex += InstrSlots::NUM;
660 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
661 getInstructionFromIndex(baseIndex) == 0)
662 baseIndex += InstrSlots::NUM;
667 // Live-in register might not be used at all.
671 end = getDefIndex(MIIdx) + 1;
673 DOUT << " live through";
678 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
679 interval.addRange(LR);
680 interval.addKill(LR.valno, end);
681 DOUT << " +" << LR << '\n';
684 /// computeIntervals - computes the live intervals for virtual
685 /// registers. for some ordering of the machine instructions [1,N] a
686 /// live interval is an interval [i, j) where 1 <= i <= j < N for
687 /// which a variable is live
688 void LiveIntervals::computeIntervals() {
690 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
691 << "********** Function: "
692 << ((Value*)mf_->getFunction())->getName() << '\n';
694 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
696 MachineBasicBlock *MBB = MBBI;
697 // Track the index of the current machine instr.
698 unsigned MIIndex = getMBBStartIdx(MBB);
699 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
701 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
703 // Create intervals for live-ins to this BB first.
704 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
705 LE = MBB->livein_end(); LI != LE; ++LI) {
706 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
707 // Multiple live-ins can alias the same register.
708 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
709 if (!hasInterval(*AS))
710 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
714 // Skip over empty initial indices.
715 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
716 getInstructionFromIndex(MIIndex) == 0)
717 MIIndex += InstrSlots::NUM;
719 for (; MI != miEnd; ++MI) {
720 DOUT << MIIndex << "\t" << *MI;
723 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
724 MachineOperand &MO = MI->getOperand(i);
725 // handle register defs - build intervals
726 if (MO.isReg() && MO.getReg() && MO.isDef()) {
727 handleRegisterDef(MBB, MI, MIIndex, MO, i);
731 MIIndex += InstrSlots::NUM;
733 // Skip over empty indices.
734 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
735 getInstructionFromIndex(MIIndex) == 0)
736 MIIndex += InstrSlots::NUM;
741 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
742 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
743 std::vector<IdxMBBPair>::const_iterator I =
744 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
747 while (I != Idx2MBBMap.end()) {
748 if (LR.end <= I->first)
750 MBBs.push_back(I->second);
757 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
758 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
760 return new LiveInterval(reg, Weight);
763 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
764 /// copy field and returns the source register that defines it.
765 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
769 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
770 return VNI->copy->getOperand(1).getReg();
771 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
772 return VNI->copy->getOperand(2).getReg();
773 unsigned SrcReg, DstReg;
774 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
776 assert(0 && "Unrecognized copy instruction!");
780 //===----------------------------------------------------------------------===//
781 // Register allocator hooks.
784 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
785 /// allow one) virtual register operand, then its uses are implicitly using
786 /// the register. Returns the virtual register.
787 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
788 MachineInstr *MI) const {
790 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
791 MachineOperand &MO = MI->getOperand(i);
792 if (!MO.isReg() || !MO.isUse())
794 unsigned Reg = MO.getReg();
795 if (Reg == 0 || Reg == li.reg)
797 // FIXME: For now, only remat MI with at most one register operand.
799 "Can't rematerialize instruction with multiple register operand!");
808 /// isValNoAvailableAt - Return true if the val# of the specified interval
809 /// which reaches the given instruction also reaches the specified use index.
810 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
811 unsigned UseIdx) const {
812 unsigned Index = getInstructionIndex(MI);
813 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
814 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
815 return UI != li.end() && UI->valno == ValNo;
818 /// isReMaterializable - Returns true if the definition MI of the specified
819 /// val# of the specified interval is re-materializable.
820 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
821 const VNInfo *ValNo, MachineInstr *MI,
822 SmallVectorImpl<LiveInterval*> &SpillIs,
827 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
831 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
832 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
833 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
834 // this but remember this is not safe to fold into a two-address
836 // This is a load from fixed stack slot. It can be rematerialized.
839 // If the target-specific rules don't identify an instruction as
840 // being trivially rematerializable, use some target-independent
842 if (!MI->getDesc().isRematerializable() ||
843 !tii_->isTriviallyReMaterializable(MI)) {
844 if (!EnableAggressiveRemat)
847 // If the instruction accesses memory but the memoperands have been lost,
848 // we can't analyze it.
849 const TargetInstrDesc &TID = MI->getDesc();
850 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
853 // Avoid instructions obviously unsafe for remat.
854 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
857 // If the instruction accesses memory and the memory could be non-constant,
858 // assume the instruction is not rematerializable.
859 for (std::list<MachineMemOperand>::const_iterator
860 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
861 const MachineMemOperand &MMO = *I;
862 if (MMO.isVolatile() || MMO.isStore())
864 const Value *V = MMO.getValue();
867 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
868 if (!PSV->isConstant(mf_->getFrameInfo()))
870 } else if (!aa_->pointsToConstantMemory(V))
874 // If any of the registers accessed are non-constant, conservatively assume
875 // the instruction is not rematerializable.
877 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
878 const MachineOperand &MO = MI->getOperand(i);
880 unsigned Reg = MO.getReg();
883 if (TargetRegisterInfo::isPhysicalRegister(Reg))
886 // Only allow one def, and that in the first operand.
887 if (MO.isDef() != (i == 0))
890 // Only allow constant-valued registers.
891 bool IsLiveIn = mri_->isLiveIn(Reg);
892 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
895 // For the def, it should be the only def.
896 if (MO.isDef() && (next(I) != E || IsLiveIn))
900 // Only allow one use other register use, as that's all the
901 // remat mechanisms support currently.
905 else if (Reg != ImpUse)
908 // For uses, there should be only one associate def.
909 if (I != E && (next(I) != E || IsLiveIn))
916 unsigned ImpUse = getReMatImplicitUse(li, MI);
918 const LiveInterval &ImpLi = getInterval(ImpUse);
919 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
920 re = mri_->use_end(); ri != re; ++ri) {
921 MachineInstr *UseMI = &*ri;
922 unsigned UseIdx = getInstructionIndex(UseMI);
923 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
925 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
929 // If a register operand of the re-materialized instruction is going to
930 // be spilled next, then it's not legal to re-materialize this instruction.
931 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
932 if (ImpUse == SpillIs[i]->reg)
938 /// isReMaterializable - Returns true if every definition of MI of every
939 /// val# of the specified interval is re-materializable.
940 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
941 SmallVectorImpl<LiveInterval*> &SpillIs,
944 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
946 const VNInfo *VNI = *i;
947 unsigned DefIdx = VNI->def;
949 continue; // Dead val#.
950 // Is the def for the val# rematerializable?
953 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
954 bool DefIsLoad = false;
956 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
963 /// FilterFoldedOps - Filter out two-address use operands. Return
964 /// true if it finds any issue with the operands that ought to prevent
966 static bool FilterFoldedOps(MachineInstr *MI,
967 SmallVector<unsigned, 2> &Ops,
969 SmallVector<unsigned, 2> &FoldOps) {
970 const TargetInstrDesc &TID = MI->getDesc();
973 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
974 unsigned OpIdx = Ops[i];
975 MachineOperand &MO = MI->getOperand(OpIdx);
976 // FIXME: fold subreg use.
980 MRInfo |= (unsigned)VirtRegMap::isMod;
982 // Filter out two-address use operand(s).
983 if (!MO.isImplicit() &&
984 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
985 MRInfo = VirtRegMap::isModRef;
988 MRInfo |= (unsigned)VirtRegMap::isRef;
990 FoldOps.push_back(OpIdx);
996 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
997 /// slot / to reg or any rematerialized load into ith operand of specified
998 /// MI. If it is successul, MI is updated with the newly created MI and
1000 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1001 VirtRegMap &vrm, MachineInstr *DefMI,
1003 SmallVector<unsigned, 2> &Ops,
1004 bool isSS, int Slot, unsigned Reg) {
1005 // If it is an implicit def instruction, just delete it.
1006 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1007 RemoveMachineInstrFromMaps(MI);
1008 vrm.RemoveMachineInstrFromMaps(MI);
1009 MI->eraseFromParent();
1014 // Filter the list of operand indexes that are to be folded. Abort if
1015 // any operand will prevent folding.
1016 unsigned MRInfo = 0;
1017 SmallVector<unsigned, 2> FoldOps;
1018 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1021 // The only time it's safe to fold into a two address instruction is when
1022 // it's folding reload and spill from / into a spill stack slot.
1023 if (DefMI && (MRInfo & VirtRegMap::isMod))
1026 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1027 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1029 // Remember this instruction uses the spill slot.
1030 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1032 // Attempt to fold the memory reference into the instruction. If
1033 // we can do this, we don't need to insert spill code.
1034 MachineBasicBlock &MBB = *MI->getParent();
1035 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1036 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1037 vrm.transferSpillPts(MI, fmi);
1038 vrm.transferRestorePts(MI, fmi);
1039 vrm.transferEmergencySpills(MI, fmi);
1041 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1042 mi2iMap_[fmi] = InstrIdx;
1043 MI = MBB.insert(MBB.erase(MI), fmi);
1050 /// canFoldMemoryOperand - Returns true if the specified load / store
1051 /// folding is possible.
1052 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1053 SmallVector<unsigned, 2> &Ops,
1055 // Filter the list of operand indexes that are to be folded. Abort if
1056 // any operand will prevent folding.
1057 unsigned MRInfo = 0;
1058 SmallVector<unsigned, 2> FoldOps;
1059 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1062 // It's only legal to remat for a use, not a def.
1063 if (ReMat && (MRInfo & VirtRegMap::isMod))
1066 return tii_->canFoldMemoryOperand(MI, FoldOps);
1069 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1070 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1071 for (LiveInterval::Ranges::const_iterator
1072 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1073 std::vector<IdxMBBPair>::const_iterator II =
1074 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1075 if (II == Idx2MBBMap.end())
1077 if (I->end > II->first) // crossing a MBB.
1079 MBBs.insert(II->second);
1080 if (MBBs.size() > 1)
1086 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1087 /// interval on to-be re-materialized operands of MI) with new register.
1088 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1089 MachineInstr *MI, unsigned NewVReg,
1091 // There is an implicit use. That means one of the other operand is
1092 // being remat'ed and the remat'ed instruction has li.reg as an
1093 // use operand. Make sure we rewrite that as well.
1094 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1095 MachineOperand &MO = MI->getOperand(i);
1098 unsigned Reg = MO.getReg();
1099 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1101 if (!vrm.isReMaterialized(Reg))
1103 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1104 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1106 UseMO->setReg(NewVReg);
1110 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1111 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1112 bool LiveIntervals::
1113 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1114 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1115 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1116 unsigned Slot, int LdSlot,
1117 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1119 const TargetRegisterClass* rc,
1120 SmallVector<int, 4> &ReMatIds,
1121 const MachineLoopInfo *loopInfo,
1122 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1123 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1124 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1125 MachineBasicBlock *MBB = MI->getParent();
1126 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1127 bool CanFold = false;
1129 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1130 MachineOperand& mop = MI->getOperand(i);
1133 unsigned Reg = mop.getReg();
1134 unsigned RegI = Reg;
1135 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1140 bool TryFold = !DefIsReMat;
1141 bool FoldSS = true; // Default behavior unless it's a remat.
1142 int FoldSlot = Slot;
1144 // If this is the rematerializable definition MI itself and
1145 // all of its uses are rematerialized, simply delete it.
1146 if (MI == ReMatOrigDefMI && CanDelete) {
1147 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1149 RemoveMachineInstrFromMaps(MI);
1150 vrm.RemoveMachineInstrFromMaps(MI);
1151 MI->eraseFromParent();
1155 // If def for this use can't be rematerialized, then try folding.
1156 // If def is rematerializable and it's a load, also try folding.
1157 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1159 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1165 // Scan all of the operands of this instruction rewriting operands
1166 // to use NewVReg instead of li.reg as appropriate. We do this for
1169 // 1. If the instr reads the same spilled vreg multiple times, we
1170 // want to reuse the NewVReg.
1171 // 2. If the instr is a two-addr instruction, we are required to
1172 // keep the src/dst regs pinned.
1174 // Keep track of whether we replace a use and/or def so that we can
1175 // create the spill interval with the appropriate range.
1177 HasUse = mop.isUse();
1178 HasDef = mop.isDef();
1179 SmallVector<unsigned, 2> Ops;
1181 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1182 const MachineOperand &MOj = MI->getOperand(j);
1185 unsigned RegJ = MOj.getReg();
1186 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1190 HasUse |= MOj.isUse();
1191 HasDef |= MOj.isDef();
1195 if (HasUse && !li.liveAt(getUseIndex(index)))
1196 // Must be defined by an implicit def. It should not be spilled. Note,
1197 // this is for correctness reason. e.g.
1198 // 8 %reg1024<def> = IMPLICIT_DEF
1199 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1200 // The live range [12, 14) are not part of the r1024 live interval since
1201 // it's defined by an implicit def. It will not conflicts with live
1202 // interval of r1025. Now suppose both registers are spilled, you can
1203 // easily see a situation where both registers are reloaded before
1204 // the INSERT_SUBREG and both target registers that would overlap.
1207 // Update stack slot spill weight if we are splitting.
1208 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1215 // Do not fold load / store here if we are splitting. We'll find an
1216 // optimal point to insert a load / store later.
1218 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1219 Ops, FoldSS, FoldSlot, Reg)) {
1220 // Folding the load/store can completely change the instruction in
1221 // unpredictable ways, rescan it from the beginning.
1225 if (isRemoved(MI)) {
1229 goto RestartInstruction;
1232 // We'll try to fold it later if it's profitable.
1233 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1237 // Create a new virtual register for the spill interval.
1238 bool CreatedNewVReg = false;
1240 NewVReg = mri_->createVirtualRegister(rc);
1242 CreatedNewVReg = true;
1244 mop.setReg(NewVReg);
1245 if (mop.isImplicit())
1246 rewriteImplicitOps(li, MI, NewVReg, vrm);
1248 // Reuse NewVReg for other reads.
1249 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1250 MachineOperand &mopj = MI->getOperand(Ops[j]);
1251 mopj.setReg(NewVReg);
1252 if (mopj.isImplicit())
1253 rewriteImplicitOps(li, MI, NewVReg, vrm);
1256 if (CreatedNewVReg) {
1258 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1259 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1260 // Each valnum may have its own remat id.
1261 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1263 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1265 if (!CanDelete || (HasUse && HasDef)) {
1266 // If this is a two-addr instruction then its use operands are
1267 // rematerializable but its def is not. It should be assigned a
1269 vrm.assignVirt2StackSlot(NewVReg, Slot);
1272 vrm.assignVirt2StackSlot(NewVReg, Slot);
1274 } else if (HasUse && HasDef &&
1275 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1276 // If this interval hasn't been assigned a stack slot (because earlier
1277 // def is a deleted remat def), do it now.
1278 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1279 vrm.assignVirt2StackSlot(NewVReg, Slot);
1282 // Re-matting an instruction with virtual register use. Add the
1283 // register as an implicit use on the use MI.
1284 if (DefIsReMat && ImpUse)
1285 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1287 // create a new register interval for this spill / remat.
1288 LiveInterval &nI = getOrCreateInterval(NewVReg);
1289 if (CreatedNewVReg) {
1290 NewLIs.push_back(&nI);
1291 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1293 vrm.setIsSplitFromReg(NewVReg, li.reg);
1297 if (CreatedNewVReg) {
1298 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1299 nI.getNextValue(~0U, 0, VNInfoAllocator));
1303 // Extend the split live interval to this def / use.
1304 unsigned End = getUseIndex(index)+1;
1305 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1306 nI.getValNumInfo(nI.getNumValNums()-1));
1312 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1313 nI.getNextValue(~0U, 0, VNInfoAllocator));
1318 DOUT << "\t\t\t\tAdded new interval: ";
1319 nI.print(DOUT, tri_);
1324 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1326 MachineBasicBlock *MBB, unsigned Idx) const {
1327 unsigned End = getMBBEndIdx(MBB);
1328 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1329 unsigned KillIdx = VNI->kills[j];
1330 if (KillIdx > Idx && KillIdx < End)
1336 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1337 /// during spilling.
1339 struct RewriteInfo {
1344 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1345 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1348 struct RewriteInfoCompare {
1349 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1350 return LHS.Index < RHS.Index;
1355 void LiveIntervals::
1356 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1357 LiveInterval::Ranges::const_iterator &I,
1358 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1359 unsigned Slot, int LdSlot,
1360 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1362 const TargetRegisterClass* rc,
1363 SmallVector<int, 4> &ReMatIds,
1364 const MachineLoopInfo *loopInfo,
1365 BitVector &SpillMBBs,
1366 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1367 BitVector &RestoreMBBs,
1368 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1369 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1370 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1371 bool AllCanFold = true;
1372 unsigned NewVReg = 0;
1373 unsigned start = getBaseIndex(I->start);
1374 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1376 // First collect all the def / use in this live range that will be rewritten.
1377 // Make sure they are sorted according to instruction index.
1378 std::vector<RewriteInfo> RewriteMIs;
1379 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1380 re = mri_->reg_end(); ri != re; ) {
1381 MachineInstr *MI = &*ri;
1382 MachineOperand &O = ri.getOperand();
1384 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1385 unsigned index = getInstructionIndex(MI);
1386 if (index < start || index >= end)
1388 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1389 // Must be defined by an implicit def. It should not be spilled. Note,
1390 // this is for correctness reason. e.g.
1391 // 8 %reg1024<def> = IMPLICIT_DEF
1392 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1393 // The live range [12, 14) are not part of the r1024 live interval since
1394 // it's defined by an implicit def. It will not conflicts with live
1395 // interval of r1025. Now suppose both registers are spilled, you can
1396 // easily see a situation where both registers are reloaded before
1397 // the INSERT_SUBREG and both target registers that would overlap.
1399 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1401 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1403 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1404 // Now rewrite the defs and uses.
1405 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1406 RewriteInfo &rwi = RewriteMIs[i];
1408 unsigned index = rwi.Index;
1409 bool MIHasUse = rwi.HasUse;
1410 bool MIHasDef = rwi.HasDef;
1411 MachineInstr *MI = rwi.MI;
1412 // If MI def and/or use the same register multiple times, then there
1413 // are multiple entries.
1414 unsigned NumUses = MIHasUse;
1415 while (i != e && RewriteMIs[i].MI == MI) {
1416 assert(RewriteMIs[i].Index == index);
1417 bool isUse = RewriteMIs[i].HasUse;
1418 if (isUse) ++NumUses;
1420 MIHasDef |= RewriteMIs[i].HasDef;
1423 MachineBasicBlock *MBB = MI->getParent();
1425 if (ImpUse && MI != ReMatDefMI) {
1426 // Re-matting an instruction with virtual register use. Update the
1427 // register interval's spill weight to HUGE_VALF to prevent it from
1429 LiveInterval &ImpLi = getInterval(ImpUse);
1430 ImpLi.weight = HUGE_VALF;
1433 unsigned MBBId = MBB->getNumber();
1434 unsigned ThisVReg = 0;
1436 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1437 if (NVI != MBBVRegsMap.end()) {
1438 ThisVReg = NVI->second;
1445 // It's better to start a new interval to avoid artifically
1446 // extend the new interval.
1447 if (MIHasDef && !MIHasUse) {
1448 MBBVRegsMap.erase(MBB->getNumber());
1454 bool IsNew = ThisVReg == 0;
1456 // This ends the previous live interval. If all of its def / use
1457 // can be folded, give it a low spill weight.
1458 if (NewVReg && TrySplit && AllCanFold) {
1459 LiveInterval &nI = getOrCreateInterval(NewVReg);
1466 bool HasDef = false;
1467 bool HasUse = false;
1468 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1469 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1470 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1471 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1472 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1473 if (!HasDef && !HasUse)
1476 AllCanFold &= CanFold;
1478 // Update weight of spill interval.
1479 LiveInterval &nI = getOrCreateInterval(NewVReg);
1481 // The spill weight is now infinity as it cannot be spilled again.
1482 nI.weight = HUGE_VALF;
1486 // Keep track of the last def and first use in each MBB.
1488 if (MI != ReMatOrigDefMI || !CanDelete) {
1489 bool HasKill = false;
1491 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1493 // If this is a two-address code, then this index starts a new VNInfo.
1494 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1496 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1498 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1499 SpillIdxes.find(MBBId);
1501 if (SII == SpillIdxes.end()) {
1502 std::vector<SRInfo> S;
1503 S.push_back(SRInfo(index, NewVReg, true));
1504 SpillIdxes.insert(std::make_pair(MBBId, S));
1505 } else if (SII->second.back().vreg != NewVReg) {
1506 SII->second.push_back(SRInfo(index, NewVReg, true));
1507 } else if ((int)index > SII->second.back().index) {
1508 // If there is an earlier def and this is a two-address
1509 // instruction, then it's not possible to fold the store (which
1510 // would also fold the load).
1511 SRInfo &Info = SII->second.back();
1513 Info.canFold = !HasUse;
1515 SpillMBBs.set(MBBId);
1516 } else if (SII != SpillIdxes.end() &&
1517 SII->second.back().vreg == NewVReg &&
1518 (int)index > SII->second.back().index) {
1519 // There is an earlier def that's not killed (must be two-address).
1520 // The spill is no longer needed.
1521 SII->second.pop_back();
1522 if (SII->second.empty()) {
1523 SpillIdxes.erase(MBBId);
1524 SpillMBBs.reset(MBBId);
1531 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1532 SpillIdxes.find(MBBId);
1533 if (SII != SpillIdxes.end() &&
1534 SII->second.back().vreg == NewVReg &&
1535 (int)index > SII->second.back().index)
1536 // Use(s) following the last def, it's not safe to fold the spill.
1537 SII->second.back().canFold = false;
1538 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1539 RestoreIdxes.find(MBBId);
1540 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1541 // If we are splitting live intervals, only fold if it's the first
1542 // use and there isn't another use later in the MBB.
1543 RII->second.back().canFold = false;
1545 // Only need a reload if there isn't an earlier def / use.
1546 if (RII == RestoreIdxes.end()) {
1547 std::vector<SRInfo> Infos;
1548 Infos.push_back(SRInfo(index, NewVReg, true));
1549 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1551 RII->second.push_back(SRInfo(index, NewVReg, true));
1553 RestoreMBBs.set(MBBId);
1557 // Update spill weight.
1558 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1559 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1562 if (NewVReg && TrySplit && AllCanFold) {
1563 // If all of its def / use can be folded, give it a low spill weight.
1564 LiveInterval &nI = getOrCreateInterval(NewVReg);
1569 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1570 BitVector &RestoreMBBs,
1571 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1572 if (!RestoreMBBs[Id])
1574 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1575 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1576 if (Restores[i].index == index &&
1577 Restores[i].vreg == vr &&
1578 Restores[i].canFold)
1583 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1584 BitVector &RestoreMBBs,
1585 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1586 if (!RestoreMBBs[Id])
1588 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1589 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1590 if (Restores[i].index == index && Restores[i].vreg)
1591 Restores[i].index = -1;
1594 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1595 /// spilled and create empty intervals for their uses.
1597 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1598 const TargetRegisterClass* rc,
1599 std::vector<LiveInterval*> &NewLIs) {
1600 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1601 re = mri_->reg_end(); ri != re; ) {
1602 MachineOperand &O = ri.getOperand();
1603 MachineInstr *MI = &*ri;
1606 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1607 "Register def was not rewritten?");
1608 RemoveMachineInstrFromMaps(MI);
1609 vrm.RemoveMachineInstrFromMaps(MI);
1610 MI->eraseFromParent();
1612 // This must be an use of an implicit_def so it's not part of the live
1613 // interval. Create a new empty live interval for it.
1614 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1615 unsigned NewVReg = mri_->createVirtualRegister(rc);
1617 vrm.setIsImplicitlyDefined(NewVReg);
1618 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1619 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1620 MachineOperand &MO = MI->getOperand(i);
1621 if (MO.isReg() && MO.getReg() == li.reg)
1630 bool operator()(LiveInterval* A, LiveInterval* B) {
1631 return A->beginNumber() < B->beginNumber();
1636 std::vector<LiveInterval*> LiveIntervals::
1637 addIntervalsForSpillsFast(const LiveInterval &li,
1638 const MachineLoopInfo *loopInfo,
1639 VirtRegMap &vrm, float& SSWeight) {
1640 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1642 std::vector<LiveInterval*> added;
1644 assert(li.weight != HUGE_VALF &&
1645 "attempt to spill already spilled interval!");
1647 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1651 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1655 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1656 while (RI != mri_->reg_end()) {
1657 MachineInstr* MI = &*RI;
1659 SmallVector<unsigned, 2> Indices;
1660 bool HasUse = false;
1661 bool HasDef = false;
1663 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1664 MachineOperand& mop = MI->getOperand(i);
1665 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1667 HasUse |= MI->getOperand(i).isUse();
1668 HasDef |= MI->getOperand(i).isDef();
1670 Indices.push_back(i);
1673 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1674 Indices, true, slot, li.reg)) {
1675 unsigned NewVReg = mri_->createVirtualRegister(rc);
1677 vrm.assignVirt2StackSlot(NewVReg, slot);
1679 // create a new register for this spill
1680 LiveInterval &nI = getOrCreateInterval(NewVReg);
1682 // the spill weight is now infinity as it
1683 // cannot be spilled again
1684 nI.weight = HUGE_VALF;
1686 // Rewrite register operands to use the new vreg.
1687 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1688 E = Indices.end(); I != E; ++I) {
1689 MI->getOperand(*I).setReg(NewVReg);
1691 if (MI->getOperand(*I).isUse())
1692 MI->getOperand(*I).setIsKill(true);
1695 // Fill in the new live interval.
1696 unsigned index = getInstructionIndex(MI);
1698 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1699 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1702 vrm.addRestorePoint(NewVReg, MI);
1705 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1706 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1709 vrm.addSpillPoint(NewVReg, true, MI);
1712 added.push_back(&nI);
1714 DOUT << "\t\t\t\tadded new interval: ";
1718 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1721 SSWeight += getSpillWeight(true, true, loopDepth);
1723 SSWeight += getSpillWeight(false, true, loopDepth);
1725 SSWeight += getSpillWeight(true, false, loopDepth);
1729 RI = mri_->reg_begin(li.reg);
1732 // Clients expect the new intervals to be returned in sorted order.
1733 std::sort(added.begin(), added.end(), LISorter());
1738 std::vector<LiveInterval*> LiveIntervals::
1739 addIntervalsForSpills(const LiveInterval &li,
1740 SmallVectorImpl<LiveInterval*> &SpillIs,
1741 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1744 if (EnableFastSpilling)
1745 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1747 assert(li.weight != HUGE_VALF &&
1748 "attempt to spill already spilled interval!");
1750 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1751 li.print(DOUT, tri_);
1754 // Spill slot weight.
1757 // Each bit specify whether it a spill is required in the MBB.
1758 BitVector SpillMBBs(mf_->getNumBlockIDs());
1759 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1760 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1761 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1762 DenseMap<unsigned,unsigned> MBBVRegsMap;
1763 std::vector<LiveInterval*> NewLIs;
1764 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1766 unsigned NumValNums = li.getNumValNums();
1767 SmallVector<MachineInstr*, 4> ReMatDefs;
1768 ReMatDefs.resize(NumValNums, NULL);
1769 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1770 ReMatOrigDefs.resize(NumValNums, NULL);
1771 SmallVector<int, 4> ReMatIds;
1772 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1773 BitVector ReMatDelete(NumValNums);
1774 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1776 // Spilling a split live interval. It cannot be split any further. Also,
1777 // it's also guaranteed to be a single val# / range interval.
1778 if (vrm.getPreSplitReg(li.reg)) {
1779 vrm.setIsSplitFromReg(li.reg, 0);
1780 // Unset the split kill marker on the last use.
1781 unsigned KillIdx = vrm.getKillPoint(li.reg);
1783 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1784 assert(KillMI && "Last use disappeared?");
1785 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1786 assert(KillOp != -1 && "Last use disappeared?");
1787 KillMI->getOperand(KillOp).setIsKill(false);
1789 vrm.removeKillPoint(li.reg);
1790 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1791 Slot = vrm.getStackSlot(li.reg);
1792 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1793 MachineInstr *ReMatDefMI = DefIsReMat ?
1794 vrm.getReMaterializedMI(li.reg) : NULL;
1796 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1797 bool isLoad = isLoadSS ||
1798 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1799 bool IsFirstRange = true;
1800 for (LiveInterval::Ranges::const_iterator
1801 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1802 // If this is a split live interval with multiple ranges, it means there
1803 // are two-address instructions that re-defined the value. Only the
1804 // first def can be rematerialized!
1806 // Note ReMatOrigDefMI has already been deleted.
1807 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1808 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1809 false, vrm, rc, ReMatIds, loopInfo,
1810 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1811 MBBVRegsMap, NewLIs, SSWeight);
1813 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1814 Slot, 0, false, false, false,
1815 false, vrm, rc, ReMatIds, loopInfo,
1816 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1817 MBBVRegsMap, NewLIs, SSWeight);
1819 IsFirstRange = false;
1822 SSWeight = 0.0f; // Already accounted for when split.
1823 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1827 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1828 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1832 bool NeedStackSlot = false;
1833 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1835 const VNInfo *VNI = *i;
1836 unsigned VN = VNI->id;
1837 unsigned DefIdx = VNI->def;
1839 continue; // Dead val#.
1840 // Is the def for the val# rematerializable?
1841 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1842 ? 0 : getInstructionFromIndex(DefIdx);
1844 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1845 // Remember how to remat the def of this val#.
1846 ReMatOrigDefs[VN] = ReMatDefMI;
1847 // Original def may be modified so we have to make a copy here.
1848 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1849 ClonedMIs.push_back(Clone);
1850 ReMatDefs[VN] = Clone;
1852 bool CanDelete = true;
1853 if (VNI->hasPHIKill) {
1854 // A kill is a phi node, not all of its uses can be rematerialized.
1855 // It must not be deleted.
1857 // Need a stack slot if there is any live range where uses cannot be
1859 NeedStackSlot = true;
1862 ReMatDelete.set(VN);
1864 // Need a stack slot if there is any live range where uses cannot be
1866 NeedStackSlot = true;
1870 // One stack slot per live interval.
1871 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1872 Slot = vrm.assignVirt2StackSlot(li.reg);
1874 // Create new intervals and rewrite defs and uses.
1875 for (LiveInterval::Ranges::const_iterator
1876 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1877 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1878 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1879 bool DefIsReMat = ReMatDefMI != NULL;
1880 bool CanDelete = ReMatDelete[I->valno->id];
1882 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1883 bool isLoad = isLoadSS ||
1884 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1885 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1886 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1887 CanDelete, vrm, rc, ReMatIds, loopInfo,
1888 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1889 MBBVRegsMap, NewLIs, SSWeight);
1892 // Insert spills / restores if we are splitting.
1894 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1898 SmallPtrSet<LiveInterval*, 4> AddedKill;
1899 SmallVector<unsigned, 2> Ops;
1900 if (NeedStackSlot) {
1901 int Id = SpillMBBs.find_first();
1903 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1904 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1905 std::vector<SRInfo> &spills = SpillIdxes[Id];
1906 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1907 int index = spills[i].index;
1908 unsigned VReg = spills[i].vreg;
1909 LiveInterval &nI = getOrCreateInterval(VReg);
1910 bool isReMat = vrm.isReMaterialized(VReg);
1911 MachineInstr *MI = getInstructionFromIndex(index);
1912 bool CanFold = false;
1913 bool FoundUse = false;
1915 if (spills[i].canFold) {
1917 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1918 MachineOperand &MO = MI->getOperand(j);
1919 if (!MO.isReg() || MO.getReg() != VReg)
1926 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1927 RestoreMBBs, RestoreIdxes))) {
1928 // MI has two-address uses of the same register. If the use
1929 // isn't the first and only use in the BB, then we can't fold
1930 // it. FIXME: Move this to rewriteInstructionsForSpills.
1937 // Fold the store into the def if possible.
1938 bool Folded = false;
1939 if (CanFold && !Ops.empty()) {
1940 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1943 // Also folded uses, do not issue a load.
1944 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1945 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1947 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1951 // Otherwise tell the spiller to issue a spill.
1953 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1954 bool isKill = LR->end == getStoreIndex(index);
1955 if (!MI->registerDefIsDead(nI.reg))
1956 // No need to spill a dead def.
1957 vrm.addSpillPoint(VReg, isKill, MI);
1959 AddedKill.insert(&nI);
1962 // Update spill slot weight.
1964 SSWeight += getSpillWeight(true, false, loopDepth);
1966 Id = SpillMBBs.find_next(Id);
1970 int Id = RestoreMBBs.find_first();
1972 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1973 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1975 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1976 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1977 int index = restores[i].index;
1980 unsigned VReg = restores[i].vreg;
1981 LiveInterval &nI = getOrCreateInterval(VReg);
1982 bool isReMat = vrm.isReMaterialized(VReg);
1983 MachineInstr *MI = getInstructionFromIndex(index);
1984 bool CanFold = false;
1986 if (restores[i].canFold) {
1988 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1989 MachineOperand &MO = MI->getOperand(j);
1990 if (!MO.isReg() || MO.getReg() != VReg)
1994 // If this restore were to be folded, it would have been folded
2003 // Fold the load into the use if possible.
2004 bool Folded = false;
2005 if (CanFold && !Ops.empty()) {
2007 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2009 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2011 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2012 // If the rematerializable def is a load, also try to fold it.
2013 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2014 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2015 Ops, isLoadSS, LdSlot, VReg);
2016 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2018 // Re-matting an instruction with virtual register use. Add the
2019 // register as an implicit use on the use MI and update the register
2020 // interval's spill weight to HUGE_VALF to prevent it from being
2022 LiveInterval &ImpLi = getInterval(ImpUse);
2023 ImpLi.weight = HUGE_VALF;
2024 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2028 // If folding is not possible / failed, then tell the spiller to issue a
2029 // load / rematerialization for us.
2031 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2033 vrm.addRestorePoint(VReg, MI);
2035 // Update spill slot weight.
2037 SSWeight += getSpillWeight(false, true, loopDepth);
2039 Id = RestoreMBBs.find_next(Id);
2042 // Finalize intervals: add kills, finalize spill weights, and filter out
2044 std::vector<LiveInterval*> RetNewLIs;
2045 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2046 LiveInterval *LI = NewLIs[i];
2048 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2049 if (!AddedKill.count(LI)) {
2050 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2051 unsigned LastUseIdx = getBaseIndex(LR->end);
2052 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2053 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2054 assert(UseIdx != -1);
2055 if (LastUse->getOperand(UseIdx).isImplicit() ||
2056 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2057 LastUse->getOperand(UseIdx).setIsKill();
2058 vrm.addKillPoint(LI->reg, LastUseIdx);
2061 RetNewLIs.push_back(LI);
2065 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2069 /// hasAllocatableSuperReg - Return true if the specified physical register has
2070 /// any super register that's allocatable.
2071 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2072 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2073 if (allocatableRegs_[*AS] && hasInterval(*AS))
2078 /// getRepresentativeReg - Find the largest super register of the specified
2079 /// physical register.
2080 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2081 // Find the largest super-register that is allocatable.
2082 unsigned BestReg = Reg;
2083 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2084 unsigned SuperReg = *AS;
2085 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2093 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2094 /// specified interval that conflicts with the specified physical register.
2095 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2096 unsigned PhysReg) const {
2097 unsigned NumConflicts = 0;
2098 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2099 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2100 E = mri_->reg_end(); I != E; ++I) {
2101 MachineOperand &O = I.getOperand();
2102 MachineInstr *MI = O.getParent();
2103 unsigned Index = getInstructionIndex(MI);
2104 if (pli.liveAt(Index))
2107 return NumConflicts;
2110 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2111 /// around all defs and uses of the specified interval.
2112 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2113 unsigned PhysReg, VirtRegMap &vrm) {
2114 unsigned SpillReg = getRepresentativeReg(PhysReg);
2116 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2117 // If there are registers which alias PhysReg, but which are not a
2118 // sub-register of the chosen representative super register. Assert
2119 // since we can't handle it yet.
2120 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2121 tri_->isSuperRegister(*AS, SpillReg));
2123 LiveInterval &pli = getInterval(SpillReg);
2124 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2125 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2126 E = mri_->reg_end(); I != E; ++I) {
2127 MachineOperand &O = I.getOperand();
2128 MachineInstr *MI = O.getParent();
2129 if (SeenMIs.count(MI))
2132 unsigned Index = getInstructionIndex(MI);
2133 if (pli.liveAt(Index)) {
2134 vrm.addEmergencySpill(SpillReg, MI);
2135 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2136 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2137 if (!hasInterval(*AS))
2139 LiveInterval &spli = getInterval(*AS);
2140 if (spli.liveAt(Index))
2141 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2147 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2148 MachineInstr* startInst) {
2149 LiveInterval& Interval = getOrCreateInterval(reg);
2150 VNInfo* VN = Interval.getNextValue(
2151 getInstructionIndex(startInst) + InstrSlots::DEF,
2152 startInst, getVNInfoAllocator());
2153 VN->hasPHIKill = true;
2154 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2155 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2156 getMBBEndIdx(startInst->getParent()) + 1, VN);
2157 Interval.addRange(LR);