1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 void LiveIntervals::printRegName(unsigned reg) const {
327 if (TargetRegisterInfo::isPhysicalRegister(reg))
328 cerr << tri_->getName(reg);
330 cerr << "%reg" << reg;
333 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
334 MachineBasicBlock::iterator mi,
335 unsigned MIIdx, MachineOperand& MO,
337 LiveInterval &interval) {
338 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
339 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
341 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
342 DOUT << "is a implicit_def\n";
346 // Virtual registers may be defined multiple times (due to phi
347 // elimination and 2-addr elimination). Much of what we do only has to be
348 // done once for the vreg. We use an empty interval to detect the first
349 // time we see a vreg.
350 if (interval.empty()) {
351 // Get the Idx of the defining instructions.
352 unsigned defIndex = getDefIndex(MIIdx);
353 // Earlyclobbers move back one.
354 if (MO.isEarlyClobber())
355 defIndex = getUseIndex(MIIdx);
357 MachineInstr *CopyMI = NULL;
358 unsigned SrcReg, DstReg;
359 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
360 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
361 tii_->isMoveInstr(*mi, SrcReg, DstReg))
363 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
365 assert(ValNo->id == 0 && "First value in interval is not 0?");
367 // Loop over all of the blocks that the vreg is defined in. There are
368 // two cases we have to handle here. The most common case is a vreg
369 // whose lifetime is contained within a basic block. In this case there
370 // will be a single kill, in MBB, which comes after the definition.
371 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
372 // FIXME: what about dead vars?
374 if (vi.Kills[0] != mi)
375 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
377 killIdx = defIndex+1;
379 // If the kill happens after the definition, we have an intra-block
381 if (killIdx > defIndex) {
382 assert(vi.AliveBlocks.none() &&
383 "Shouldn't be alive across any blocks!");
384 LiveRange LR(defIndex, killIdx, ValNo);
385 interval.addRange(LR);
386 DOUT << " +" << LR << "\n";
387 interval.addKill(ValNo, killIdx);
392 // The other case we handle is when a virtual register lives to the end
393 // of the defining block, potentially live across some blocks, then is
394 // live into some number of blocks, but gets killed. Start by adding a
395 // range that goes from this definition to the end of the defining block.
396 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
397 DOUT << " +" << NewLR;
398 interval.addRange(NewLR);
400 // Iterate over all of the blocks that the variable is completely
401 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
403 for (int i = vi.AliveBlocks.find_first(); i != -1;
404 i = vi.AliveBlocks.find_next(i)) {
405 LiveRange LR(getMBBStartIdx(i),
406 getMBBEndIdx(i)+1, // MBB ends at -1.
408 interval.addRange(LR);
412 // Finally, this virtual register is live from the start of any killing
413 // block to the 'use' slot of the killing instruction.
414 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
415 MachineInstr *Kill = vi.Kills[i];
416 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
417 LiveRange LR(getMBBStartIdx(Kill->getParent()),
419 interval.addRange(LR);
420 interval.addKill(ValNo, killIdx);
425 // If this is the second time we see a virtual register definition, it
426 // must be due to phi elimination or two addr elimination. If this is
427 // the result of two address elimination, then the vreg is one of the
428 // def-and-use register operand.
429 if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
430 // If this is a two-address definition, then we have already processed
431 // the live range. The only problem is that we didn't realize there
432 // are actually two values in the live interval. Because of this we
433 // need to take the LiveRegion that defines this register and split it
435 assert(interval.containsOneValue());
436 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
437 unsigned RedefIndex = getDefIndex(MIIdx);
438 // Earlyclobbers move back one.
439 if (MO.isEarlyClobber())
440 RedefIndex = getUseIndex(MIIdx);
442 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
443 VNInfo *OldValNo = OldLR->valno;
445 // Delete the initial value, which should be short and continuous,
446 // because the 2-addr copy must be in the same MBB as the redef.
447 interval.removeRange(DefIndex, RedefIndex);
449 // Two-address vregs should always only be redefined once. This means
450 // that at this point, there should be exactly one value number in it.
451 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
453 // The new value number (#1) is defined by the instruction we claimed
455 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
458 // Value#0 is now defined by the 2-addr instruction.
459 OldValNo->def = RedefIndex;
462 // Add the new live interval which replaces the range for the input copy.
463 LiveRange LR(DefIndex, RedefIndex, ValNo);
464 DOUT << " replace range with " << LR;
465 interval.addRange(LR);
466 interval.addKill(ValNo, RedefIndex);
468 // If this redefinition is dead, we need to add a dummy unit live
469 // range covering the def slot.
471 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
474 interval.print(DOUT, tri_);
477 // Otherwise, this must be because of phi elimination. If this is the
478 // first redefinition of the vreg that we have seen, go back and change
479 // the live range in the PHI block to be a different value number.
480 if (interval.containsOneValue()) {
481 assert(vi.Kills.size() == 1 &&
482 "PHI elimination vreg should have one kill, the PHI itself!");
484 // Remove the old range that we now know has an incorrect number.
485 VNInfo *VNI = interval.getValNumInfo(0);
486 MachineInstr *Killer = vi.Kills[0];
487 unsigned Start = getMBBStartIdx(Killer->getParent());
488 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
489 DOUT << " Removing [" << Start << "," << End << "] from: ";
490 interval.print(DOUT, tri_); DOUT << "\n";
491 interval.removeRange(Start, End);
492 VNI->hasPHIKill = true;
493 DOUT << " RESULT: "; interval.print(DOUT, tri_);
495 // Replace the interval with one of a NEW value number. Note that this
496 // value number isn't actually defined by an instruction, weird huh? :)
497 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
498 DOUT << " replace range with " << LR;
499 interval.addRange(LR);
500 interval.addKill(LR.valno, End);
501 DOUT << " RESULT: "; interval.print(DOUT, tri_);
504 // In the case of PHI elimination, each variable definition is only
505 // live until the end of the block. We've already taken care of the
506 // rest of the live range.
507 unsigned defIndex = getDefIndex(MIIdx);
508 // Earlyclobbers move back one.
509 if (MO.isEarlyClobber())
510 defIndex = getUseIndex(MIIdx);
513 MachineInstr *CopyMI = NULL;
514 unsigned SrcReg, DstReg;
515 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
516 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
517 tii_->isMoveInstr(*mi, SrcReg, DstReg))
519 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
521 unsigned killIndex = getMBBEndIdx(mbb) + 1;
522 LiveRange LR(defIndex, killIndex, ValNo);
523 interval.addRange(LR);
524 interval.addKill(ValNo, killIndex);
525 ValNo->hasPHIKill = true;
533 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
534 MachineBasicBlock::iterator mi,
537 LiveInterval &interval,
538 MachineInstr *CopyMI) {
539 // A physical register cannot be live across basic block, so its
540 // lifetime must end somewhere in its defining basic block.
541 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
543 unsigned baseIndex = MIIdx;
544 unsigned start = getDefIndex(baseIndex);
545 // Earlyclobbers move back one.
546 if (MO.isEarlyClobber())
547 start = getUseIndex(MIIdx);
548 unsigned end = start;
550 // If it is not used after definition, it is considered dead at
551 // the instruction defining it. Hence its interval is:
552 // [defSlot(def), defSlot(def)+1)
559 // If it is not dead on definition, it must be killed by a
560 // subsequent instruction. Hence its interval is:
561 // [defSlot(def), useSlot(kill)+1)
562 baseIndex += InstrSlots::NUM;
563 while (++mi != MBB->end()) {
564 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
565 getInstructionFromIndex(baseIndex) == 0)
566 baseIndex += InstrSlots::NUM;
567 if (mi->killsRegister(interval.reg, tri_)) {
569 end = getUseIndex(baseIndex) + 1;
571 } else if (mi->modifiesRegister(interval.reg, tri_)) {
572 // Another instruction redefines the register before it is ever read.
573 // Then the register is essentially dead at the instruction that defines
574 // it. Hence its interval is:
575 // [defSlot(def), defSlot(def)+1)
581 baseIndex += InstrSlots::NUM;
584 // The only case we should have a dead physreg here without a killing or
585 // instruction where we know it's dead is if it is live-in to the function
587 assert(!CopyMI && "physreg was not killed in defining block!");
591 assert(start < end && "did not find end of interval?");
593 // Already exists? Extend old live interval.
594 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
595 VNInfo *ValNo = (OldLR != interval.end())
596 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
597 LiveRange LR(start, end, ValNo);
598 interval.addRange(LR);
599 interval.addKill(LR.valno, end);
600 DOUT << " +" << LR << '\n';
603 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
604 MachineBasicBlock::iterator MI,
608 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
609 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
610 getOrCreateInterval(MO.getReg()));
611 else if (allocatableRegs_[MO.getReg()]) {
612 MachineInstr *CopyMI = NULL;
613 unsigned SrcReg, DstReg;
614 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
615 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
616 tii_->isMoveInstr(*MI, SrcReg, DstReg))
618 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
619 getOrCreateInterval(MO.getReg()), CopyMI);
620 // Def of a register also defines its sub-registers.
621 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
622 // If MI also modifies the sub-register explicitly, avoid processing it
623 // more than once. Do not pass in TRI here so it checks for exact match.
624 if (!MI->modifiesRegister(*AS))
625 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
626 getOrCreateInterval(*AS), 0);
630 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
632 LiveInterval &interval, bool isAlias) {
633 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
635 // Look for kills, if it reaches a def before it's killed, then it shouldn't
636 // be considered a livein.
637 MachineBasicBlock::iterator mi = MBB->begin();
638 unsigned baseIndex = MIIdx;
639 unsigned start = baseIndex;
640 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
641 getInstructionFromIndex(baseIndex) == 0)
642 baseIndex += InstrSlots::NUM;
643 unsigned end = baseIndex;
645 while (mi != MBB->end()) {
646 if (mi->killsRegister(interval.reg, tri_)) {
648 end = getUseIndex(baseIndex) + 1;
650 } else if (mi->modifiesRegister(interval.reg, tri_)) {
651 // Another instruction redefines the register before it is ever read.
652 // Then the register is essentially dead at the instruction that defines
653 // it. Hence its interval is:
654 // [defSlot(def), defSlot(def)+1)
656 end = getDefIndex(start) + 1;
660 baseIndex += InstrSlots::NUM;
661 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
662 getInstructionFromIndex(baseIndex) == 0)
663 baseIndex += InstrSlots::NUM;
668 // Live-in register might not be used at all.
672 end = getDefIndex(MIIdx) + 1;
674 DOUT << " live through";
679 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
680 interval.addRange(LR);
681 interval.addKill(LR.valno, end);
682 DOUT << " +" << LR << '\n';
685 /// computeIntervals - computes the live intervals for virtual
686 /// registers. for some ordering of the machine instructions [1,N] a
687 /// live interval is an interval [i, j) where 1 <= i <= j < N for
688 /// which a variable is live
689 void LiveIntervals::computeIntervals() {
691 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
692 << "********** Function: "
693 << ((Value*)mf_->getFunction())->getName() << '\n';
695 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
697 MachineBasicBlock *MBB = MBBI;
698 // Track the index of the current machine instr.
699 unsigned MIIndex = getMBBStartIdx(MBB);
700 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
702 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
704 // Create intervals for live-ins to this BB first.
705 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
706 LE = MBB->livein_end(); LI != LE; ++LI) {
707 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
708 // Multiple live-ins can alias the same register.
709 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
710 if (!hasInterval(*AS))
711 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
715 // Skip over empty initial indices.
716 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
717 getInstructionFromIndex(MIIndex) == 0)
718 MIIndex += InstrSlots::NUM;
720 for (; MI != miEnd; ++MI) {
721 DOUT << MIIndex << "\t" << *MI;
724 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
725 MachineOperand &MO = MI->getOperand(i);
726 // handle register defs - build intervals
727 if (MO.isReg() && MO.getReg() && MO.isDef()) {
728 handleRegisterDef(MBB, MI, MIIndex, MO, i);
732 // Skip over the empty slots after each instruction.
733 unsigned Slots = MI->getDesc().getNumDefs();
736 MIIndex += InstrSlots::NUM * Slots;
738 // Skip over empty indices.
739 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
740 getInstructionFromIndex(MIIndex) == 0)
741 MIIndex += InstrSlots::NUM;
746 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
747 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
748 std::vector<IdxMBBPair>::const_iterator I =
749 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
752 while (I != Idx2MBBMap.end()) {
755 MBBs.push_back(I->second);
762 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
763 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
764 std::vector<IdxMBBPair>::const_iterator I =
765 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
768 while (I != Idx2MBBMap.end()) {
771 MachineBasicBlock *MBB = I->second;
772 if (getMBBEndIdx(MBB) > End)
774 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
775 SE = MBB->succ_end(); SI != SE; ++SI)
783 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
784 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
786 return new LiveInterval(reg, Weight);
789 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
790 /// copy field and returns the source register that defines it.
791 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
795 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
796 return VNI->copy->getOperand(1).getReg();
797 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
798 return VNI->copy->getOperand(2).getReg();
799 unsigned SrcReg, DstReg;
800 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
802 assert(0 && "Unrecognized copy instruction!");
806 //===----------------------------------------------------------------------===//
807 // Register allocator hooks.
810 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
811 /// allow one) virtual register operand, then its uses are implicitly using
812 /// the register. Returns the virtual register.
813 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
814 MachineInstr *MI) const {
816 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
817 MachineOperand &MO = MI->getOperand(i);
818 if (!MO.isReg() || !MO.isUse())
820 unsigned Reg = MO.getReg();
821 if (Reg == 0 || Reg == li.reg)
823 // FIXME: For now, only remat MI with at most one register operand.
825 "Can't rematerialize instruction with multiple register operand!");
834 /// isValNoAvailableAt - Return true if the val# of the specified interval
835 /// which reaches the given instruction also reaches the specified use index.
836 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
837 unsigned UseIdx) const {
838 unsigned Index = getInstructionIndex(MI);
839 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
840 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
841 return UI != li.end() && UI->valno == ValNo;
844 /// isReMaterializable - Returns true if the definition MI of the specified
845 /// val# of the specified interval is re-materializable.
846 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
847 const VNInfo *ValNo, MachineInstr *MI,
848 SmallVectorImpl<LiveInterval*> &SpillIs,
853 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
857 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
858 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
859 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
860 // this but remember this is not safe to fold into a two-address
862 // This is a load from fixed stack slot. It can be rematerialized.
865 // If the target-specific rules don't identify an instruction as
866 // being trivially rematerializable, use some target-independent
868 if (!MI->getDesc().isRematerializable() ||
869 !tii_->isTriviallyReMaterializable(MI)) {
870 if (!EnableAggressiveRemat)
873 // If the instruction accesses memory but the memoperands have been lost,
874 // we can't analyze it.
875 const TargetInstrDesc &TID = MI->getDesc();
876 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
879 // Avoid instructions obviously unsafe for remat.
880 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
883 // If the instruction accesses memory and the memory could be non-constant,
884 // assume the instruction is not rematerializable.
885 for (std::list<MachineMemOperand>::const_iterator
886 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
887 const MachineMemOperand &MMO = *I;
888 if (MMO.isVolatile() || MMO.isStore())
890 const Value *V = MMO.getValue();
893 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
894 if (!PSV->isConstant(mf_->getFrameInfo()))
896 } else if (!aa_->pointsToConstantMemory(V))
900 // If any of the registers accessed are non-constant, conservatively assume
901 // the instruction is not rematerializable.
903 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
904 const MachineOperand &MO = MI->getOperand(i);
906 unsigned Reg = MO.getReg();
909 if (TargetRegisterInfo::isPhysicalRegister(Reg))
912 // Only allow one def, and that in the first operand.
913 if (MO.isDef() != (i == 0))
916 // Only allow constant-valued registers.
917 bool IsLiveIn = mri_->isLiveIn(Reg);
918 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
921 // For the def, it should be the only def.
922 if (MO.isDef() && (next(I) != E || IsLiveIn))
926 // Only allow one use other register use, as that's all the
927 // remat mechanisms support currently.
931 else if (Reg != ImpUse)
934 // For uses, there should be only one associate def.
935 if (I != E && (next(I) != E || IsLiveIn))
942 unsigned ImpUse = getReMatImplicitUse(li, MI);
944 const LiveInterval &ImpLi = getInterval(ImpUse);
945 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
946 re = mri_->use_end(); ri != re; ++ri) {
947 MachineInstr *UseMI = &*ri;
948 unsigned UseIdx = getInstructionIndex(UseMI);
949 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
951 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
955 // If a register operand of the re-materialized instruction is going to
956 // be spilled next, then it's not legal to re-materialize this instruction.
957 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
958 if (ImpUse == SpillIs[i]->reg)
964 /// isReMaterializable - Returns true if the definition MI of the specified
965 /// val# of the specified interval is re-materializable.
966 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
967 const VNInfo *ValNo, MachineInstr *MI) {
968 SmallVector<LiveInterval*, 4> Dummy1;
970 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
973 /// isReMaterializable - Returns true if every definition of MI of every
974 /// val# of the specified interval is re-materializable.
975 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
976 SmallVectorImpl<LiveInterval*> &SpillIs,
979 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
981 const VNInfo *VNI = *i;
982 unsigned DefIdx = VNI->def;
984 continue; // Dead val#.
985 // Is the def for the val# rematerializable?
988 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
989 bool DefIsLoad = false;
991 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
998 /// FilterFoldedOps - Filter out two-address use operands. Return
999 /// true if it finds any issue with the operands that ought to prevent
1001 static bool FilterFoldedOps(MachineInstr *MI,
1002 SmallVector<unsigned, 2> &Ops,
1004 SmallVector<unsigned, 2> &FoldOps) {
1005 const TargetInstrDesc &TID = MI->getDesc();
1008 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1009 unsigned OpIdx = Ops[i];
1010 MachineOperand &MO = MI->getOperand(OpIdx);
1011 // FIXME: fold subreg use.
1015 MRInfo |= (unsigned)VirtRegMap::isMod;
1017 // Filter out two-address use operand(s).
1018 if (!MO.isImplicit() &&
1019 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1020 MRInfo = VirtRegMap::isModRef;
1023 MRInfo |= (unsigned)VirtRegMap::isRef;
1025 FoldOps.push_back(OpIdx);
1031 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1032 /// slot / to reg or any rematerialized load into ith operand of specified
1033 /// MI. If it is successul, MI is updated with the newly created MI and
1035 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1036 VirtRegMap &vrm, MachineInstr *DefMI,
1038 SmallVector<unsigned, 2> &Ops,
1039 bool isSS, int Slot, unsigned Reg) {
1040 // If it is an implicit def instruction, just delete it.
1041 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1042 RemoveMachineInstrFromMaps(MI);
1043 vrm.RemoveMachineInstrFromMaps(MI);
1044 MI->eraseFromParent();
1049 // Filter the list of operand indexes that are to be folded. Abort if
1050 // any operand will prevent folding.
1051 unsigned MRInfo = 0;
1052 SmallVector<unsigned, 2> FoldOps;
1053 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1056 // The only time it's safe to fold into a two address instruction is when
1057 // it's folding reload and spill from / into a spill stack slot.
1058 if (DefMI && (MRInfo & VirtRegMap::isMod))
1061 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1062 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1064 // Remember this instruction uses the spill slot.
1065 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1067 // Attempt to fold the memory reference into the instruction. If
1068 // we can do this, we don't need to insert spill code.
1069 MachineBasicBlock &MBB = *MI->getParent();
1070 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1071 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1072 vrm.transferSpillPts(MI, fmi);
1073 vrm.transferRestorePts(MI, fmi);
1074 vrm.transferEmergencySpills(MI, fmi);
1076 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1077 mi2iMap_[fmi] = InstrIdx;
1078 MI = MBB.insert(MBB.erase(MI), fmi);
1085 /// canFoldMemoryOperand - Returns true if the specified load / store
1086 /// folding is possible.
1087 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1088 SmallVector<unsigned, 2> &Ops,
1090 // Filter the list of operand indexes that are to be folded. Abort if
1091 // any operand will prevent folding.
1092 unsigned MRInfo = 0;
1093 SmallVector<unsigned, 2> FoldOps;
1094 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1097 // It's only legal to remat for a use, not a def.
1098 if (ReMat && (MRInfo & VirtRegMap::isMod))
1101 return tii_->canFoldMemoryOperand(MI, FoldOps);
1104 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1105 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1106 for (LiveInterval::Ranges::const_iterator
1107 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1108 std::vector<IdxMBBPair>::const_iterator II =
1109 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1110 if (II == Idx2MBBMap.end())
1112 if (I->end > II->first) // crossing a MBB.
1114 MBBs.insert(II->second);
1115 if (MBBs.size() > 1)
1121 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1122 /// interval on to-be re-materialized operands of MI) with new register.
1123 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1124 MachineInstr *MI, unsigned NewVReg,
1126 // There is an implicit use. That means one of the other operand is
1127 // being remat'ed and the remat'ed instruction has li.reg as an
1128 // use operand. Make sure we rewrite that as well.
1129 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1130 MachineOperand &MO = MI->getOperand(i);
1133 unsigned Reg = MO.getReg();
1134 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1136 if (!vrm.isReMaterialized(Reg))
1138 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1139 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1141 UseMO->setReg(NewVReg);
1145 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1146 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1147 bool LiveIntervals::
1148 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1149 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1150 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1151 unsigned Slot, int LdSlot,
1152 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1154 const TargetRegisterClass* rc,
1155 SmallVector<int, 4> &ReMatIds,
1156 const MachineLoopInfo *loopInfo,
1157 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1158 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1159 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1160 MachineBasicBlock *MBB = MI->getParent();
1161 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1162 bool CanFold = false;
1164 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1165 MachineOperand& mop = MI->getOperand(i);
1168 unsigned Reg = mop.getReg();
1169 unsigned RegI = Reg;
1170 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1175 bool TryFold = !DefIsReMat;
1176 bool FoldSS = true; // Default behavior unless it's a remat.
1177 int FoldSlot = Slot;
1179 // If this is the rematerializable definition MI itself and
1180 // all of its uses are rematerialized, simply delete it.
1181 if (MI == ReMatOrigDefMI && CanDelete) {
1182 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1184 RemoveMachineInstrFromMaps(MI);
1185 vrm.RemoveMachineInstrFromMaps(MI);
1186 MI->eraseFromParent();
1190 // If def for this use can't be rematerialized, then try folding.
1191 // If def is rematerializable and it's a load, also try folding.
1192 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1194 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1200 // Scan all of the operands of this instruction rewriting operands
1201 // to use NewVReg instead of li.reg as appropriate. We do this for
1204 // 1. If the instr reads the same spilled vreg multiple times, we
1205 // want to reuse the NewVReg.
1206 // 2. If the instr is a two-addr instruction, we are required to
1207 // keep the src/dst regs pinned.
1209 // Keep track of whether we replace a use and/or def so that we can
1210 // create the spill interval with the appropriate range.
1212 HasUse = mop.isUse();
1213 HasDef = mop.isDef();
1214 SmallVector<unsigned, 2> Ops;
1216 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1217 const MachineOperand &MOj = MI->getOperand(j);
1220 unsigned RegJ = MOj.getReg();
1221 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1225 HasUse |= MOj.isUse();
1226 HasDef |= MOj.isDef();
1230 if (HasUse && !li.liveAt(getUseIndex(index)))
1231 // Must be defined by an implicit def. It should not be spilled. Note,
1232 // this is for correctness reason. e.g.
1233 // 8 %reg1024<def> = IMPLICIT_DEF
1234 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1235 // The live range [12, 14) are not part of the r1024 live interval since
1236 // it's defined by an implicit def. It will not conflicts with live
1237 // interval of r1025. Now suppose both registers are spilled, you can
1238 // easily see a situation where both registers are reloaded before
1239 // the INSERT_SUBREG and both target registers that would overlap.
1242 // Update stack slot spill weight if we are splitting.
1243 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1247 // Create a new virtual register for the spill interval.
1248 // Create the new register now so we can map the fold instruction
1249 // to the new register so when it is unfolded we get the correct
1251 bool CreatedNewVReg = false;
1253 NewVReg = mri_->createVirtualRegister(rc);
1255 CreatedNewVReg = true;
1261 // Do not fold load / store here if we are splitting. We'll find an
1262 // optimal point to insert a load / store later.
1264 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1265 Ops, FoldSS, FoldSlot, NewVReg)) {
1266 // Folding the load/store can completely change the instruction in
1267 // unpredictable ways, rescan it from the beginning.
1270 // We need to give the new vreg the same stack slot as the
1271 // spilled interval.
1272 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1278 if (isRemoved(MI)) {
1282 goto RestartInstruction;
1285 // We'll try to fold it later if it's profitable.
1286 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1290 mop.setReg(NewVReg);
1291 if (mop.isImplicit())
1292 rewriteImplicitOps(li, MI, NewVReg, vrm);
1294 // Reuse NewVReg for other reads.
1295 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1296 MachineOperand &mopj = MI->getOperand(Ops[j]);
1297 mopj.setReg(NewVReg);
1298 if (mopj.isImplicit())
1299 rewriteImplicitOps(li, MI, NewVReg, vrm);
1302 if (CreatedNewVReg) {
1304 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1305 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1306 // Each valnum may have its own remat id.
1307 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1309 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1311 if (!CanDelete || (HasUse && HasDef)) {
1312 // If this is a two-addr instruction then its use operands are
1313 // rematerializable but its def is not. It should be assigned a
1315 vrm.assignVirt2StackSlot(NewVReg, Slot);
1318 vrm.assignVirt2StackSlot(NewVReg, Slot);
1320 } else if (HasUse && HasDef &&
1321 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1322 // If this interval hasn't been assigned a stack slot (because earlier
1323 // def is a deleted remat def), do it now.
1324 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1325 vrm.assignVirt2StackSlot(NewVReg, Slot);
1328 // Re-matting an instruction with virtual register use. Add the
1329 // register as an implicit use on the use MI.
1330 if (DefIsReMat && ImpUse)
1331 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1333 // create a new register interval for this spill / remat.
1334 LiveInterval &nI = getOrCreateInterval(NewVReg);
1335 if (CreatedNewVReg) {
1336 NewLIs.push_back(&nI);
1337 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1339 vrm.setIsSplitFromReg(NewVReg, li.reg);
1343 if (CreatedNewVReg) {
1344 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1345 nI.getNextValue(~0U, 0, VNInfoAllocator));
1349 // Extend the split live interval to this def / use.
1350 unsigned End = getUseIndex(index)+1;
1351 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1352 nI.getValNumInfo(nI.getNumValNums()-1));
1358 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1359 nI.getNextValue(~0U, 0, VNInfoAllocator));
1364 DOUT << "\t\t\t\tAdded new interval: ";
1365 nI.print(DOUT, tri_);
1370 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1372 MachineBasicBlock *MBB, unsigned Idx) const {
1373 unsigned End = getMBBEndIdx(MBB);
1374 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1375 unsigned KillIdx = VNI->kills[j];
1376 if (KillIdx > Idx && KillIdx < End)
1382 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1383 /// during spilling.
1385 struct RewriteInfo {
1390 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1391 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1394 struct RewriteInfoCompare {
1395 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1396 return LHS.Index < RHS.Index;
1401 void LiveIntervals::
1402 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1403 LiveInterval::Ranges::const_iterator &I,
1404 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1405 unsigned Slot, int LdSlot,
1406 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1408 const TargetRegisterClass* rc,
1409 SmallVector<int, 4> &ReMatIds,
1410 const MachineLoopInfo *loopInfo,
1411 BitVector &SpillMBBs,
1412 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1413 BitVector &RestoreMBBs,
1414 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1415 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1416 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1417 bool AllCanFold = true;
1418 unsigned NewVReg = 0;
1419 unsigned start = getBaseIndex(I->start);
1420 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1422 // First collect all the def / use in this live range that will be rewritten.
1423 // Make sure they are sorted according to instruction index.
1424 std::vector<RewriteInfo> RewriteMIs;
1425 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1426 re = mri_->reg_end(); ri != re; ) {
1427 MachineInstr *MI = &*ri;
1428 MachineOperand &O = ri.getOperand();
1430 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1431 unsigned index = getInstructionIndex(MI);
1432 if (index < start || index >= end)
1434 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1435 // Must be defined by an implicit def. It should not be spilled. Note,
1436 // this is for correctness reason. e.g.
1437 // 8 %reg1024<def> = IMPLICIT_DEF
1438 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1439 // The live range [12, 14) are not part of the r1024 live interval since
1440 // it's defined by an implicit def. It will not conflicts with live
1441 // interval of r1025. Now suppose both registers are spilled, you can
1442 // easily see a situation where both registers are reloaded before
1443 // the INSERT_SUBREG and both target registers that would overlap.
1445 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1447 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1449 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1450 // Now rewrite the defs and uses.
1451 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1452 RewriteInfo &rwi = RewriteMIs[i];
1454 unsigned index = rwi.Index;
1455 bool MIHasUse = rwi.HasUse;
1456 bool MIHasDef = rwi.HasDef;
1457 MachineInstr *MI = rwi.MI;
1458 // If MI def and/or use the same register multiple times, then there
1459 // are multiple entries.
1460 unsigned NumUses = MIHasUse;
1461 while (i != e && RewriteMIs[i].MI == MI) {
1462 assert(RewriteMIs[i].Index == index);
1463 bool isUse = RewriteMIs[i].HasUse;
1464 if (isUse) ++NumUses;
1466 MIHasDef |= RewriteMIs[i].HasDef;
1469 MachineBasicBlock *MBB = MI->getParent();
1471 if (ImpUse && MI != ReMatDefMI) {
1472 // Re-matting an instruction with virtual register use. Update the
1473 // register interval's spill weight to HUGE_VALF to prevent it from
1475 LiveInterval &ImpLi = getInterval(ImpUse);
1476 ImpLi.weight = HUGE_VALF;
1479 unsigned MBBId = MBB->getNumber();
1480 unsigned ThisVReg = 0;
1482 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1483 if (NVI != MBBVRegsMap.end()) {
1484 ThisVReg = NVI->second;
1491 // It's better to start a new interval to avoid artifically
1492 // extend the new interval.
1493 if (MIHasDef && !MIHasUse) {
1494 MBBVRegsMap.erase(MBB->getNumber());
1500 bool IsNew = ThisVReg == 0;
1502 // This ends the previous live interval. If all of its def / use
1503 // can be folded, give it a low spill weight.
1504 if (NewVReg && TrySplit && AllCanFold) {
1505 LiveInterval &nI = getOrCreateInterval(NewVReg);
1512 bool HasDef = false;
1513 bool HasUse = false;
1514 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1515 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1516 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1517 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1518 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1519 if (!HasDef && !HasUse)
1522 AllCanFold &= CanFold;
1524 // Update weight of spill interval.
1525 LiveInterval &nI = getOrCreateInterval(NewVReg);
1527 // The spill weight is now infinity as it cannot be spilled again.
1528 nI.weight = HUGE_VALF;
1532 // Keep track of the last def and first use in each MBB.
1534 if (MI != ReMatOrigDefMI || !CanDelete) {
1535 bool HasKill = false;
1537 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1539 // If this is a two-address code, then this index starts a new VNInfo.
1540 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1542 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1544 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1545 SpillIdxes.find(MBBId);
1547 if (SII == SpillIdxes.end()) {
1548 std::vector<SRInfo> S;
1549 S.push_back(SRInfo(index, NewVReg, true));
1550 SpillIdxes.insert(std::make_pair(MBBId, S));
1551 } else if (SII->second.back().vreg != NewVReg) {
1552 SII->second.push_back(SRInfo(index, NewVReg, true));
1553 } else if ((int)index > SII->second.back().index) {
1554 // If there is an earlier def and this is a two-address
1555 // instruction, then it's not possible to fold the store (which
1556 // would also fold the load).
1557 SRInfo &Info = SII->second.back();
1559 Info.canFold = !HasUse;
1561 SpillMBBs.set(MBBId);
1562 } else if (SII != SpillIdxes.end() &&
1563 SII->second.back().vreg == NewVReg &&
1564 (int)index > SII->second.back().index) {
1565 // There is an earlier def that's not killed (must be two-address).
1566 // The spill is no longer needed.
1567 SII->second.pop_back();
1568 if (SII->second.empty()) {
1569 SpillIdxes.erase(MBBId);
1570 SpillMBBs.reset(MBBId);
1577 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1578 SpillIdxes.find(MBBId);
1579 if (SII != SpillIdxes.end() &&
1580 SII->second.back().vreg == NewVReg &&
1581 (int)index > SII->second.back().index)
1582 // Use(s) following the last def, it's not safe to fold the spill.
1583 SII->second.back().canFold = false;
1584 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1585 RestoreIdxes.find(MBBId);
1586 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1587 // If we are splitting live intervals, only fold if it's the first
1588 // use and there isn't another use later in the MBB.
1589 RII->second.back().canFold = false;
1591 // Only need a reload if there isn't an earlier def / use.
1592 if (RII == RestoreIdxes.end()) {
1593 std::vector<SRInfo> Infos;
1594 Infos.push_back(SRInfo(index, NewVReg, true));
1595 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1597 RII->second.push_back(SRInfo(index, NewVReg, true));
1599 RestoreMBBs.set(MBBId);
1603 // Update spill weight.
1604 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1605 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1608 if (NewVReg && TrySplit && AllCanFold) {
1609 // If all of its def / use can be folded, give it a low spill weight.
1610 LiveInterval &nI = getOrCreateInterval(NewVReg);
1615 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1616 BitVector &RestoreMBBs,
1617 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1618 if (!RestoreMBBs[Id])
1620 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1621 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1622 if (Restores[i].index == index &&
1623 Restores[i].vreg == vr &&
1624 Restores[i].canFold)
1629 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1630 BitVector &RestoreMBBs,
1631 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1632 if (!RestoreMBBs[Id])
1634 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1635 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1636 if (Restores[i].index == index && Restores[i].vreg)
1637 Restores[i].index = -1;
1640 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1641 /// spilled and create empty intervals for their uses.
1643 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1644 const TargetRegisterClass* rc,
1645 std::vector<LiveInterval*> &NewLIs) {
1646 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1647 re = mri_->reg_end(); ri != re; ) {
1648 MachineOperand &O = ri.getOperand();
1649 MachineInstr *MI = &*ri;
1652 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1653 "Register def was not rewritten?");
1654 RemoveMachineInstrFromMaps(MI);
1655 vrm.RemoveMachineInstrFromMaps(MI);
1656 MI->eraseFromParent();
1658 // This must be an use of an implicit_def so it's not part of the live
1659 // interval. Create a new empty live interval for it.
1660 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1661 unsigned NewVReg = mri_->createVirtualRegister(rc);
1663 vrm.setIsImplicitlyDefined(NewVReg);
1664 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1665 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1666 MachineOperand &MO = MI->getOperand(i);
1667 if (MO.isReg() && MO.getReg() == li.reg)
1676 bool operator()(LiveInterval* A, LiveInterval* B) {
1677 return A->beginNumber() < B->beginNumber();
1682 std::vector<LiveInterval*> LiveIntervals::
1683 addIntervalsForSpillsFast(const LiveInterval &li,
1684 const MachineLoopInfo *loopInfo,
1685 VirtRegMap &vrm, float& SSWeight) {
1686 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1688 std::vector<LiveInterval*> added;
1690 assert(li.weight != HUGE_VALF &&
1691 "attempt to spill already spilled interval!");
1693 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1697 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1701 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1702 while (RI != mri_->reg_end()) {
1703 MachineInstr* MI = &*RI;
1705 SmallVector<unsigned, 2> Indices;
1706 bool HasUse = false;
1707 bool HasDef = false;
1709 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1710 MachineOperand& mop = MI->getOperand(i);
1711 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1713 HasUse |= MI->getOperand(i).isUse();
1714 HasDef |= MI->getOperand(i).isDef();
1716 Indices.push_back(i);
1719 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1720 Indices, true, slot, li.reg)) {
1721 unsigned NewVReg = mri_->createVirtualRegister(rc);
1723 vrm.assignVirt2StackSlot(NewVReg, slot);
1725 // create a new register for this spill
1726 LiveInterval &nI = getOrCreateInterval(NewVReg);
1728 // the spill weight is now infinity as it
1729 // cannot be spilled again
1730 nI.weight = HUGE_VALF;
1732 // Rewrite register operands to use the new vreg.
1733 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1734 E = Indices.end(); I != E; ++I) {
1735 MI->getOperand(*I).setReg(NewVReg);
1737 if (MI->getOperand(*I).isUse())
1738 MI->getOperand(*I).setIsKill(true);
1741 // Fill in the new live interval.
1742 unsigned index = getInstructionIndex(MI);
1744 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1745 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1748 vrm.addRestorePoint(NewVReg, MI);
1751 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1752 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1755 vrm.addSpillPoint(NewVReg, true, MI);
1758 added.push_back(&nI);
1760 DOUT << "\t\t\t\tadded new interval: ";
1764 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1767 SSWeight += getSpillWeight(true, true, loopDepth);
1769 SSWeight += getSpillWeight(false, true, loopDepth);
1771 SSWeight += getSpillWeight(true, false, loopDepth);
1775 RI = mri_->reg_begin(li.reg);
1778 // Clients expect the new intervals to be returned in sorted order.
1779 std::sort(added.begin(), added.end(), LISorter());
1784 std::vector<LiveInterval*> LiveIntervals::
1785 addIntervalsForSpills(const LiveInterval &li,
1786 SmallVectorImpl<LiveInterval*> &SpillIs,
1787 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1790 if (EnableFastSpilling)
1791 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1793 assert(li.weight != HUGE_VALF &&
1794 "attempt to spill already spilled interval!");
1796 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1797 li.print(DOUT, tri_);
1800 // Spill slot weight.
1803 // Each bit specify whether a spill is required in the MBB.
1804 BitVector SpillMBBs(mf_->getNumBlockIDs());
1805 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1806 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1807 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1808 DenseMap<unsigned,unsigned> MBBVRegsMap;
1809 std::vector<LiveInterval*> NewLIs;
1810 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1812 unsigned NumValNums = li.getNumValNums();
1813 SmallVector<MachineInstr*, 4> ReMatDefs;
1814 ReMatDefs.resize(NumValNums, NULL);
1815 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1816 ReMatOrigDefs.resize(NumValNums, NULL);
1817 SmallVector<int, 4> ReMatIds;
1818 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1819 BitVector ReMatDelete(NumValNums);
1820 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1822 // Spilling a split live interval. It cannot be split any further. Also,
1823 // it's also guaranteed to be a single val# / range interval.
1824 if (vrm.getPreSplitReg(li.reg)) {
1825 vrm.setIsSplitFromReg(li.reg, 0);
1826 // Unset the split kill marker on the last use.
1827 unsigned KillIdx = vrm.getKillPoint(li.reg);
1829 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1830 assert(KillMI && "Last use disappeared?");
1831 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1832 assert(KillOp != -1 && "Last use disappeared?");
1833 KillMI->getOperand(KillOp).setIsKill(false);
1835 vrm.removeKillPoint(li.reg);
1836 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1837 Slot = vrm.getStackSlot(li.reg);
1838 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1839 MachineInstr *ReMatDefMI = DefIsReMat ?
1840 vrm.getReMaterializedMI(li.reg) : NULL;
1842 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1843 bool isLoad = isLoadSS ||
1844 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1845 bool IsFirstRange = true;
1846 for (LiveInterval::Ranges::const_iterator
1847 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1848 // If this is a split live interval with multiple ranges, it means there
1849 // are two-address instructions that re-defined the value. Only the
1850 // first def can be rematerialized!
1852 // Note ReMatOrigDefMI has already been deleted.
1853 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1854 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1855 false, vrm, rc, ReMatIds, loopInfo,
1856 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1857 MBBVRegsMap, NewLIs, SSWeight);
1859 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1860 Slot, 0, false, false, false,
1861 false, vrm, rc, ReMatIds, loopInfo,
1862 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1863 MBBVRegsMap, NewLIs, SSWeight);
1865 IsFirstRange = false;
1868 SSWeight = 0.0f; // Already accounted for when split.
1869 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1873 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1874 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1878 bool NeedStackSlot = false;
1879 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1881 const VNInfo *VNI = *i;
1882 unsigned VN = VNI->id;
1883 unsigned DefIdx = VNI->def;
1885 continue; // Dead val#.
1886 // Is the def for the val# rematerializable?
1887 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1888 ? 0 : getInstructionFromIndex(DefIdx);
1890 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1891 // Remember how to remat the def of this val#.
1892 ReMatOrigDefs[VN] = ReMatDefMI;
1893 // Original def may be modified so we have to make a copy here.
1894 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1895 ClonedMIs.push_back(Clone);
1896 ReMatDefs[VN] = Clone;
1898 bool CanDelete = true;
1899 if (VNI->hasPHIKill) {
1900 // A kill is a phi node, not all of its uses can be rematerialized.
1901 // It must not be deleted.
1903 // Need a stack slot if there is any live range where uses cannot be
1905 NeedStackSlot = true;
1908 ReMatDelete.set(VN);
1910 // Need a stack slot if there is any live range where uses cannot be
1912 NeedStackSlot = true;
1916 // One stack slot per live interval.
1917 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1918 Slot = vrm.assignVirt2StackSlot(li.reg);
1920 // Create new intervals and rewrite defs and uses.
1921 for (LiveInterval::Ranges::const_iterator
1922 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1923 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1924 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1925 bool DefIsReMat = ReMatDefMI != NULL;
1926 bool CanDelete = ReMatDelete[I->valno->id];
1928 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1929 bool isLoad = isLoadSS ||
1930 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1931 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1932 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1933 CanDelete, vrm, rc, ReMatIds, loopInfo,
1934 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1935 MBBVRegsMap, NewLIs, SSWeight);
1938 // Insert spills / restores if we are splitting.
1940 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1944 SmallPtrSet<LiveInterval*, 4> AddedKill;
1945 SmallVector<unsigned, 2> Ops;
1946 if (NeedStackSlot) {
1947 int Id = SpillMBBs.find_first();
1949 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1950 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1951 std::vector<SRInfo> &spills = SpillIdxes[Id];
1952 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1953 int index = spills[i].index;
1954 unsigned VReg = spills[i].vreg;
1955 LiveInterval &nI = getOrCreateInterval(VReg);
1956 bool isReMat = vrm.isReMaterialized(VReg);
1957 MachineInstr *MI = getInstructionFromIndex(index);
1958 bool CanFold = false;
1959 bool FoundUse = false;
1961 if (spills[i].canFold) {
1963 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1964 MachineOperand &MO = MI->getOperand(j);
1965 if (!MO.isReg() || MO.getReg() != VReg)
1972 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1973 RestoreMBBs, RestoreIdxes))) {
1974 // MI has two-address uses of the same register. If the use
1975 // isn't the first and only use in the BB, then we can't fold
1976 // it. FIXME: Move this to rewriteInstructionsForSpills.
1983 // Fold the store into the def if possible.
1984 bool Folded = false;
1985 if (CanFold && !Ops.empty()) {
1986 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1989 // Also folded uses, do not issue a load.
1990 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1991 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1993 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1997 // Otherwise tell the spiller to issue a spill.
1999 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2000 bool isKill = LR->end == getStoreIndex(index);
2001 if (!MI->registerDefIsDead(nI.reg))
2002 // No need to spill a dead def.
2003 vrm.addSpillPoint(VReg, isKill, MI);
2005 AddedKill.insert(&nI);
2008 // Update spill slot weight.
2010 SSWeight += getSpillWeight(true, false, loopDepth);
2012 Id = SpillMBBs.find_next(Id);
2016 int Id = RestoreMBBs.find_first();
2018 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2019 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2021 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2022 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2023 int index = restores[i].index;
2026 unsigned VReg = restores[i].vreg;
2027 LiveInterval &nI = getOrCreateInterval(VReg);
2028 bool isReMat = vrm.isReMaterialized(VReg);
2029 MachineInstr *MI = getInstructionFromIndex(index);
2030 bool CanFold = false;
2032 if (restores[i].canFold) {
2034 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2035 MachineOperand &MO = MI->getOperand(j);
2036 if (!MO.isReg() || MO.getReg() != VReg)
2040 // If this restore were to be folded, it would have been folded
2049 // Fold the load into the use if possible.
2050 bool Folded = false;
2051 if (CanFold && !Ops.empty()) {
2053 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2055 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2057 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2058 // If the rematerializable def is a load, also try to fold it.
2059 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2060 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2061 Ops, isLoadSS, LdSlot, VReg);
2062 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2064 // Re-matting an instruction with virtual register use. Add the
2065 // register as an implicit use on the use MI and update the register
2066 // interval's spill weight to HUGE_VALF to prevent it from being
2068 LiveInterval &ImpLi = getInterval(ImpUse);
2069 ImpLi.weight = HUGE_VALF;
2070 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2074 // If folding is not possible / failed, then tell the spiller to issue a
2075 // load / rematerialization for us.
2077 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2079 vrm.addRestorePoint(VReg, MI);
2081 // Update spill slot weight.
2083 SSWeight += getSpillWeight(false, true, loopDepth);
2085 Id = RestoreMBBs.find_next(Id);
2088 // Finalize intervals: add kills, finalize spill weights, and filter out
2090 std::vector<LiveInterval*> RetNewLIs;
2091 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2092 LiveInterval *LI = NewLIs[i];
2094 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2095 if (!AddedKill.count(LI)) {
2096 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2097 unsigned LastUseIdx = getBaseIndex(LR->end);
2098 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2099 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2100 assert(UseIdx != -1);
2101 if (LastUse->getOperand(UseIdx).isImplicit() ||
2102 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2103 LastUse->getOperand(UseIdx).setIsKill();
2104 vrm.addKillPoint(LI->reg, LastUseIdx);
2107 RetNewLIs.push_back(LI);
2111 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2115 /// hasAllocatableSuperReg - Return true if the specified physical register has
2116 /// any super register that's allocatable.
2117 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2118 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2119 if (allocatableRegs_[*AS] && hasInterval(*AS))
2124 /// getRepresentativeReg - Find the largest super register of the specified
2125 /// physical register.
2126 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2127 // Find the largest super-register that is allocatable.
2128 unsigned BestReg = Reg;
2129 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2130 unsigned SuperReg = *AS;
2131 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2139 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2140 /// specified interval that conflicts with the specified physical register.
2141 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2142 unsigned PhysReg) const {
2143 unsigned NumConflicts = 0;
2144 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2145 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2146 E = mri_->reg_end(); I != E; ++I) {
2147 MachineOperand &O = I.getOperand();
2148 MachineInstr *MI = O.getParent();
2149 unsigned Index = getInstructionIndex(MI);
2150 if (pli.liveAt(Index))
2153 return NumConflicts;
2156 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2157 /// around all defs and uses of the specified interval.
2158 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2159 unsigned PhysReg, VirtRegMap &vrm) {
2160 unsigned SpillReg = getRepresentativeReg(PhysReg);
2162 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2163 // If there are registers which alias PhysReg, but which are not a
2164 // sub-register of the chosen representative super register. Assert
2165 // since we can't handle it yet.
2166 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2167 tri_->isSuperRegister(*AS, SpillReg));
2169 LiveInterval &pli = getInterval(SpillReg);
2170 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2171 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2172 E = mri_->reg_end(); I != E; ++I) {
2173 MachineOperand &O = I.getOperand();
2174 MachineInstr *MI = O.getParent();
2175 if (SeenMIs.count(MI))
2178 unsigned Index = getInstructionIndex(MI);
2179 if (pli.liveAt(Index)) {
2180 vrm.addEmergencySpill(SpillReg, MI);
2181 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2182 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2183 if (!hasInterval(*AS))
2185 LiveInterval &spli = getInterval(*AS);
2186 if (spli.liveAt(Index))
2187 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2193 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2194 MachineInstr* startInst) {
2195 LiveInterval& Interval = getOrCreateInterval(reg);
2196 VNInfo* VN = Interval.getNextValue(
2197 getInstructionIndex(startInst) + InstrSlots::DEF,
2198 startInst, getVNInfoAllocator());
2199 VN->hasPHIKill = true;
2200 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2201 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2202 getMBBEndIdx(startInst->getParent()) + 1, VN);
2203 Interval.addRange(LR);