1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
352 if (MO.isUse() && !CheckUse)
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
357 if (tri_->isSubRegister(Reg, PhysReg))
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404 // Earlyclobbers move back one.
405 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
407 assert(ValNo->id == 0 && "First value in interval is not 0?");
409 // Loop over all of the blocks that the vreg is defined in. There are
410 // two cases we have to handle here. The most common case is a vreg
411 // whose lifetime is contained within a basic block. In this case there
412 // will be a single kill, in MBB, which comes after the definition.
413 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
414 // FIXME: what about dead vars?
416 if (vi.Kills[0] != mi)
417 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419 killIdx = defIndex+1;
421 // If the kill happens after the definition, we have an intra-block
423 if (killIdx > defIndex) {
424 assert(vi.AliveBlocks.none() &&
425 "Shouldn't be alive across any blocks!");
426 LiveRange LR(defIndex, killIdx, ValNo);
427 interval.addRange(LR);
428 DOUT << " +" << LR << "\n";
429 interval.addKill(ValNo, killIdx);
434 // The other case we handle is when a virtual register lives to the end
435 // of the defining block, potentially live across some blocks, then is
436 // live into some number of blocks, but gets killed. Start by adding a
437 // range that goes from this definition to the end of the defining block.
438 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
439 DOUT << " +" << NewLR;
440 interval.addRange(NewLR);
442 // Iterate over all of the blocks that the variable is completely
443 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445 for (int i = vi.AliveBlocks.find_first(); i != -1;
446 i = vi.AliveBlocks.find_next(i)) {
447 LiveRange LR(getMBBStartIdx(i),
448 getMBBEndIdx(i)+1, // MBB ends at -1.
450 interval.addRange(LR);
454 // Finally, this virtual register is live from the start of any killing
455 // block to the 'use' slot of the killing instruction.
456 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
457 MachineInstr *Kill = vi.Kills[i];
458 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
459 LiveRange LR(getMBBStartIdx(Kill->getParent()),
461 interval.addRange(LR);
462 interval.addKill(ValNo, killIdx);
467 // If this is the second time we see a virtual register definition, it
468 // must be due to phi elimination or two addr elimination. If this is
469 // the result of two address elimination, then the vreg is one of the
470 // def-and-use register operand.
471 if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
472 // If this is a two-address definition, then we have already processed
473 // the live range. The only problem is that we didn't realize there
474 // are actually two values in the live interval. Because of this we
475 // need to take the LiveRegion that defines this register and split it
477 assert(interval.containsOneValue());
478 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
479 unsigned RedefIndex = getDefIndex(MIIdx);
480 if (MO.isEarlyClobber())
481 RedefIndex = getUseIndex(MIIdx);
483 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
484 VNInfo *OldValNo = OldLR->valno;
486 // Delete the initial value, which should be short and continuous,
487 // because the 2-addr copy must be in the same MBB as the redef.
488 interval.removeRange(DefIndex, RedefIndex);
490 // Two-address vregs should always only be redefined once. This means
491 // that at this point, there should be exactly one value number in it.
492 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
494 // The new value number (#1) is defined by the instruction we claimed
496 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
499 // Value#0 is now defined by the 2-addr instruction.
500 OldValNo->def = RedefIndex;
502 if (MO.isEarlyClobber())
503 OldValNo->redefByEC = true;
505 // Add the new live interval which replaces the range for the input copy.
506 LiveRange LR(DefIndex, RedefIndex, ValNo);
507 DOUT << " replace range with " << LR;
508 interval.addRange(LR);
509 interval.addKill(ValNo, RedefIndex);
511 // If this redefinition is dead, we need to add a dummy unit live
512 // range covering the def slot.
514 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
517 interval.print(DOUT, tri_);
520 // Otherwise, this must be because of phi elimination. If this is the
521 // first redefinition of the vreg that we have seen, go back and change
522 // the live range in the PHI block to be a different value number.
523 if (interval.containsOneValue()) {
524 assert(vi.Kills.size() == 1 &&
525 "PHI elimination vreg should have one kill, the PHI itself!");
527 // Remove the old range that we now know has an incorrect number.
528 VNInfo *VNI = interval.getValNumInfo(0);
529 MachineInstr *Killer = vi.Kills[0];
530 unsigned Start = getMBBStartIdx(Killer->getParent());
531 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
532 DOUT << " Removing [" << Start << "," << End << "] from: ";
533 interval.print(DOUT, tri_); DOUT << "\n";
534 interval.removeRange(Start, End);
535 VNI->hasPHIKill = true;
536 DOUT << " RESULT: "; interval.print(DOUT, tri_);
538 // Replace the interval with one of a NEW value number. Note that this
539 // value number isn't actually defined by an instruction, weird huh? :)
540 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
541 DOUT << " replace range with " << LR;
542 interval.addRange(LR);
543 interval.addKill(LR.valno, End);
544 DOUT << " RESULT: "; interval.print(DOUT, tri_);
547 // In the case of PHI elimination, each variable definition is only
548 // live until the end of the block. We've already taken care of the
549 // rest of the live range.
550 unsigned defIndex = getDefIndex(MIIdx);
551 if (MO.isEarlyClobber())
552 defIndex = getUseIndex(MIIdx);
555 MachineInstr *CopyMI = NULL;
556 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
557 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
558 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
559 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
561 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
563 unsigned killIndex = getMBBEndIdx(mbb) + 1;
564 LiveRange LR(defIndex, killIndex, ValNo);
565 interval.addRange(LR);
566 interval.addKill(ValNo, killIndex);
567 ValNo->hasPHIKill = true;
575 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
576 MachineBasicBlock::iterator mi,
579 LiveInterval &interval,
580 MachineInstr *CopyMI) {
581 // A physical register cannot be live across basic block, so its
582 // lifetime must end somewhere in its defining basic block.
583 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
585 unsigned baseIndex = MIIdx;
586 unsigned start = getDefIndex(baseIndex);
587 // Earlyclobbers move back one.
588 if (MO.isEarlyClobber())
589 start = getUseIndex(MIIdx);
590 unsigned end = start;
592 // If it is not used after definition, it is considered dead at
593 // the instruction defining it. Hence its interval is:
594 // [defSlot(def), defSlot(def)+1)
601 // If it is not dead on definition, it must be killed by a
602 // subsequent instruction. Hence its interval is:
603 // [defSlot(def), useSlot(kill)+1)
604 baseIndex += InstrSlots::NUM;
605 while (++mi != MBB->end()) {
606 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
607 getInstructionFromIndex(baseIndex) == 0)
608 baseIndex += InstrSlots::NUM;
609 if (mi->killsRegister(interval.reg, tri_)) {
611 end = getUseIndex(baseIndex) + 1;
613 } else if (mi->modifiesRegister(interval.reg, tri_)) {
614 // Another instruction redefines the register before it is ever read.
615 // Then the register is essentially dead at the instruction that defines
616 // it. Hence its interval is:
617 // [defSlot(def), defSlot(def)+1)
623 baseIndex += InstrSlots::NUM;
626 // The only case we should have a dead physreg here without a killing or
627 // instruction where we know it's dead is if it is live-in to the function
629 assert(!CopyMI && "physreg was not killed in defining block!");
633 assert(start < end && "did not find end of interval?");
635 // Already exists? Extend old live interval.
636 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
637 bool Extend = OldLR != interval.end();
638 VNInfo *ValNo = Extend
639 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
640 if (MO.isEarlyClobber() && Extend)
641 ValNo->redefByEC = true;
642 LiveRange LR(start, end, ValNo);
643 interval.addRange(LR);
644 interval.addKill(LR.valno, end);
645 DOUT << " +" << LR << '\n';
648 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
649 MachineBasicBlock::iterator MI,
653 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
654 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
655 getOrCreateInterval(MO.getReg()));
656 else if (allocatableRegs_[MO.getReg()]) {
657 MachineInstr *CopyMI = NULL;
658 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
659 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
660 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
661 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
663 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
664 getOrCreateInterval(MO.getReg()), CopyMI);
665 // Def of a register also defines its sub-registers.
666 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
667 // If MI also modifies the sub-register explicitly, avoid processing it
668 // more than once. Do not pass in TRI here so it checks for exact match.
669 if (!MI->modifiesRegister(*AS))
670 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
671 getOrCreateInterval(*AS), 0);
675 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
677 LiveInterval &interval, bool isAlias) {
678 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
680 // Look for kills, if it reaches a def before it's killed, then it shouldn't
681 // be considered a livein.
682 MachineBasicBlock::iterator mi = MBB->begin();
683 unsigned baseIndex = MIIdx;
684 unsigned start = baseIndex;
685 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
686 getInstructionFromIndex(baseIndex) == 0)
687 baseIndex += InstrSlots::NUM;
688 unsigned end = baseIndex;
689 bool SeenDefUse = false;
691 while (mi != MBB->end()) {
692 if (mi->killsRegister(interval.reg, tri_)) {
694 end = getUseIndex(baseIndex) + 1;
697 } else if (mi->modifiesRegister(interval.reg, tri_)) {
698 // Another instruction redefines the register before it is ever read.
699 // Then the register is essentially dead at the instruction that defines
700 // it. Hence its interval is:
701 // [defSlot(def), defSlot(def)+1)
703 end = getDefIndex(start) + 1;
708 baseIndex += InstrSlots::NUM;
710 if (mi != MBB->end()) {
711 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
712 getInstructionFromIndex(baseIndex) == 0)
713 baseIndex += InstrSlots::NUM;
718 // Live-in register might not be used at all.
722 end = getDefIndex(MIIdx) + 1;
724 DOUT << " live through";
729 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
730 interval.addRange(LR);
731 interval.addKill(LR.valno, end);
732 DOUT << " +" << LR << '\n';
735 /// computeIntervals - computes the live intervals for virtual
736 /// registers. for some ordering of the machine instructions [1,N] a
737 /// live interval is an interval [i, j) where 1 <= i <= j < N for
738 /// which a variable is live
739 void LiveIntervals::computeIntervals() {
741 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
742 << "********** Function: "
743 << ((Value*)mf_->getFunction())->getName() << '\n';
745 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
747 MachineBasicBlock *MBB = MBBI;
748 // Track the index of the current machine instr.
749 unsigned MIIndex = getMBBStartIdx(MBB);
750 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
752 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
754 // Create intervals for live-ins to this BB first.
755 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
756 LE = MBB->livein_end(); LI != LE; ++LI) {
757 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
758 // Multiple live-ins can alias the same register.
759 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
760 if (!hasInterval(*AS))
761 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
765 // Skip over empty initial indices.
766 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
767 getInstructionFromIndex(MIIndex) == 0)
768 MIIndex += InstrSlots::NUM;
770 for (; MI != miEnd; ++MI) {
771 DOUT << MIIndex << "\t" << *MI;
774 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
775 MachineOperand &MO = MI->getOperand(i);
776 // handle register defs - build intervals
777 if (MO.isReg() && MO.getReg() && MO.isDef()) {
778 handleRegisterDef(MBB, MI, MIIndex, MO, i);
782 // Skip over the empty slots after each instruction.
783 unsigned Slots = MI->getDesc().getNumDefs();
786 MIIndex += InstrSlots::NUM * Slots;
788 // Skip over empty indices.
789 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
790 getInstructionFromIndex(MIIndex) == 0)
791 MIIndex += InstrSlots::NUM;
796 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
797 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
798 std::vector<IdxMBBPair>::const_iterator I =
799 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
802 while (I != Idx2MBBMap.end()) {
805 MBBs.push_back(I->second);
812 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
813 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
814 std::vector<IdxMBBPair>::const_iterator I =
815 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
818 while (I != Idx2MBBMap.end()) {
821 MachineBasicBlock *MBB = I->second;
822 if (getMBBEndIdx(MBB) > End)
824 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
825 SE = MBB->succ_end(); SI != SE; ++SI)
833 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
834 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
835 return new LiveInterval(reg, Weight);
838 /// dupInterval - Duplicate a live interval. The caller is responsible for
839 /// managing the allocated memory.
840 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
841 LiveInterval *NewLI = createInterval(li->reg);
842 NewLI->Copy(*li, getVNInfoAllocator());
846 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
847 /// copy field and returns the source register that defines it.
848 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
852 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
853 // If it's extracting out of a physical register, return the sub-register.
854 unsigned Reg = VNI->copy->getOperand(1).getReg();
855 if (TargetRegisterInfo::isPhysicalRegister(Reg))
856 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
858 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
859 return VNI->copy->getOperand(2).getReg();
861 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
862 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
864 assert(0 && "Unrecognized copy instruction!");
868 //===----------------------------------------------------------------------===//
869 // Register allocator hooks.
872 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
873 /// allow one) virtual register operand, then its uses are implicitly using
874 /// the register. Returns the virtual register.
875 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
876 MachineInstr *MI) const {
878 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
879 MachineOperand &MO = MI->getOperand(i);
880 if (!MO.isReg() || !MO.isUse())
882 unsigned Reg = MO.getReg();
883 if (Reg == 0 || Reg == li.reg)
885 // FIXME: For now, only remat MI with at most one register operand.
887 "Can't rematerialize instruction with multiple register operand!");
896 /// isValNoAvailableAt - Return true if the val# of the specified interval
897 /// which reaches the given instruction also reaches the specified use index.
898 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
899 unsigned UseIdx) const {
900 unsigned Index = getInstructionIndex(MI);
901 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
902 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
903 return UI != li.end() && UI->valno == ValNo;
906 /// isReMaterializable - Returns true if the definition MI of the specified
907 /// val# of the specified interval is re-materializable.
908 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
909 const VNInfo *ValNo, MachineInstr *MI,
910 SmallVectorImpl<LiveInterval*> &SpillIs,
915 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
919 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
920 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
921 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
922 // this but remember this is not safe to fold into a two-address
924 // This is a load from fixed stack slot. It can be rematerialized.
927 // If the target-specific rules don't identify an instruction as
928 // being trivially rematerializable, use some target-independent
930 if (!MI->getDesc().isRematerializable() ||
931 !tii_->isTriviallyReMaterializable(MI)) {
932 if (!EnableAggressiveRemat)
935 // If the instruction accesses memory but the memoperands have been lost,
936 // we can't analyze it.
937 const TargetInstrDesc &TID = MI->getDesc();
938 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
941 // Avoid instructions obviously unsafe for remat.
942 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
945 // If the instruction accesses memory and the memory could be non-constant,
946 // assume the instruction is not rematerializable.
947 for (std::list<MachineMemOperand>::const_iterator
948 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
949 const MachineMemOperand &MMO = *I;
950 if (MMO.isVolatile() || MMO.isStore())
952 const Value *V = MMO.getValue();
955 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
956 if (!PSV->isConstant(mf_->getFrameInfo()))
958 } else if (!aa_->pointsToConstantMemory(V))
962 // If any of the registers accessed are non-constant, conservatively assume
963 // the instruction is not rematerializable.
965 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
966 const MachineOperand &MO = MI->getOperand(i);
968 unsigned Reg = MO.getReg();
971 if (TargetRegisterInfo::isPhysicalRegister(Reg))
974 // Only allow one def, and that in the first operand.
975 if (MO.isDef() != (i == 0))
978 // Only allow constant-valued registers.
979 bool IsLiveIn = mri_->isLiveIn(Reg);
980 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
983 // For the def, it should be the only def of that register.
984 if (MO.isDef() && (next(I) != E || IsLiveIn))
988 // Only allow one use other register use, as that's all the
989 // remat mechanisms support currently.
993 else if (Reg != ImpUse)
996 // For the use, there should be only one associated def.
997 if (I != E && (next(I) != E || IsLiveIn))
1004 unsigned ImpUse = getReMatImplicitUse(li, MI);
1006 const LiveInterval &ImpLi = getInterval(ImpUse);
1007 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1008 re = mri_->use_end(); ri != re; ++ri) {
1009 MachineInstr *UseMI = &*ri;
1010 unsigned UseIdx = getInstructionIndex(UseMI);
1011 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1013 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1017 // If a register operand of the re-materialized instruction is going to
1018 // be spilled next, then it's not legal to re-materialize this instruction.
1019 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1020 if (ImpUse == SpillIs[i]->reg)
1026 /// isReMaterializable - Returns true if the definition MI of the specified
1027 /// val# of the specified interval is re-materializable.
1028 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1029 const VNInfo *ValNo, MachineInstr *MI) {
1030 SmallVector<LiveInterval*, 4> Dummy1;
1032 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1035 /// isReMaterializable - Returns true if every definition of MI of every
1036 /// val# of the specified interval is re-materializable.
1037 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1038 SmallVectorImpl<LiveInterval*> &SpillIs,
1041 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1043 const VNInfo *VNI = *i;
1044 unsigned DefIdx = VNI->def;
1046 continue; // Dead val#.
1047 // Is the def for the val# rematerializable?
1050 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1051 bool DefIsLoad = false;
1053 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1055 isLoad |= DefIsLoad;
1060 /// FilterFoldedOps - Filter out two-address use operands. Return
1061 /// true if it finds any issue with the operands that ought to prevent
1063 static bool FilterFoldedOps(MachineInstr *MI,
1064 SmallVector<unsigned, 2> &Ops,
1066 SmallVector<unsigned, 2> &FoldOps) {
1068 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1069 unsigned OpIdx = Ops[i];
1070 MachineOperand &MO = MI->getOperand(OpIdx);
1071 // FIXME: fold subreg use.
1075 MRInfo |= (unsigned)VirtRegMap::isMod;
1077 // Filter out two-address use operand(s).
1078 if (MI->isRegTiedToDefOperand(OpIdx)) {
1079 MRInfo = VirtRegMap::isModRef;
1082 MRInfo |= (unsigned)VirtRegMap::isRef;
1084 FoldOps.push_back(OpIdx);
1090 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1091 /// slot / to reg or any rematerialized load into ith operand of specified
1092 /// MI. If it is successul, MI is updated with the newly created MI and
1094 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1095 VirtRegMap &vrm, MachineInstr *DefMI,
1097 SmallVector<unsigned, 2> &Ops,
1098 bool isSS, int Slot, unsigned Reg) {
1099 // If it is an implicit def instruction, just delete it.
1100 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1101 RemoveMachineInstrFromMaps(MI);
1102 vrm.RemoveMachineInstrFromMaps(MI);
1103 MI->eraseFromParent();
1108 // Filter the list of operand indexes that are to be folded. Abort if
1109 // any operand will prevent folding.
1110 unsigned MRInfo = 0;
1111 SmallVector<unsigned, 2> FoldOps;
1112 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1115 // The only time it's safe to fold into a two address instruction is when
1116 // it's folding reload and spill from / into a spill stack slot.
1117 if (DefMI && (MRInfo & VirtRegMap::isMod))
1120 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1121 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1123 // Remember this instruction uses the spill slot.
1124 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1126 // Attempt to fold the memory reference into the instruction. If
1127 // we can do this, we don't need to insert spill code.
1128 MachineBasicBlock &MBB = *MI->getParent();
1129 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1130 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1131 vrm.transferSpillPts(MI, fmi);
1132 vrm.transferRestorePts(MI, fmi);
1133 vrm.transferEmergencySpills(MI, fmi);
1135 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1136 mi2iMap_[fmi] = InstrIdx;
1137 MI = MBB.insert(MBB.erase(MI), fmi);
1144 /// canFoldMemoryOperand - Returns true if the specified load / store
1145 /// folding is possible.
1146 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1147 SmallVector<unsigned, 2> &Ops,
1149 // Filter the list of operand indexes that are to be folded. Abort if
1150 // any operand will prevent folding.
1151 unsigned MRInfo = 0;
1152 SmallVector<unsigned, 2> FoldOps;
1153 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1156 // It's only legal to remat for a use, not a def.
1157 if (ReMat && (MRInfo & VirtRegMap::isMod))
1160 return tii_->canFoldMemoryOperand(MI, FoldOps);
1163 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1164 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1165 for (LiveInterval::Ranges::const_iterator
1166 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1167 std::vector<IdxMBBPair>::const_iterator II =
1168 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1169 if (II == Idx2MBBMap.end())
1171 if (I->end > II->first) // crossing a MBB.
1173 MBBs.insert(II->second);
1174 if (MBBs.size() > 1)
1180 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1181 /// interval on to-be re-materialized operands of MI) with new register.
1182 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1183 MachineInstr *MI, unsigned NewVReg,
1185 // There is an implicit use. That means one of the other operand is
1186 // being remat'ed and the remat'ed instruction has li.reg as an
1187 // use operand. Make sure we rewrite that as well.
1188 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1189 MachineOperand &MO = MI->getOperand(i);
1192 unsigned Reg = MO.getReg();
1193 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1195 if (!vrm.isReMaterialized(Reg))
1197 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1198 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1200 UseMO->setReg(NewVReg);
1204 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1205 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1206 bool LiveIntervals::
1207 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1208 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1209 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1210 unsigned Slot, int LdSlot,
1211 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1213 const TargetRegisterClass* rc,
1214 SmallVector<int, 4> &ReMatIds,
1215 const MachineLoopInfo *loopInfo,
1216 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1217 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1218 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1219 MachineBasicBlock *MBB = MI->getParent();
1220 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1221 bool CanFold = false;
1223 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1224 MachineOperand& mop = MI->getOperand(i);
1227 unsigned Reg = mop.getReg();
1228 unsigned RegI = Reg;
1229 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1234 bool TryFold = !DefIsReMat;
1235 bool FoldSS = true; // Default behavior unless it's a remat.
1236 int FoldSlot = Slot;
1238 // If this is the rematerializable definition MI itself and
1239 // all of its uses are rematerialized, simply delete it.
1240 if (MI == ReMatOrigDefMI && CanDelete) {
1241 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1243 RemoveMachineInstrFromMaps(MI);
1244 vrm.RemoveMachineInstrFromMaps(MI);
1245 MI->eraseFromParent();
1249 // If def for this use can't be rematerialized, then try folding.
1250 // If def is rematerializable and it's a load, also try folding.
1251 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1253 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1259 // Scan all of the operands of this instruction rewriting operands
1260 // to use NewVReg instead of li.reg as appropriate. We do this for
1263 // 1. If the instr reads the same spilled vreg multiple times, we
1264 // want to reuse the NewVReg.
1265 // 2. If the instr is a two-addr instruction, we are required to
1266 // keep the src/dst regs pinned.
1268 // Keep track of whether we replace a use and/or def so that we can
1269 // create the spill interval with the appropriate range.
1271 HasUse = mop.isUse();
1272 HasDef = mop.isDef();
1273 SmallVector<unsigned, 2> Ops;
1275 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1276 const MachineOperand &MOj = MI->getOperand(j);
1279 unsigned RegJ = MOj.getReg();
1280 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1284 HasUse |= MOj.isUse();
1285 HasDef |= MOj.isDef();
1289 if (HasUse && !li.liveAt(getUseIndex(index)))
1290 // Must be defined by an implicit def. It should not be spilled. Note,
1291 // this is for correctness reason. e.g.
1292 // 8 %reg1024<def> = IMPLICIT_DEF
1293 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1294 // The live range [12, 14) are not part of the r1024 live interval since
1295 // it's defined by an implicit def. It will not conflicts with live
1296 // interval of r1025. Now suppose both registers are spilled, you can
1297 // easily see a situation where both registers are reloaded before
1298 // the INSERT_SUBREG and both target registers that would overlap.
1301 // Update stack slot spill weight if we are splitting.
1302 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1306 // Create a new virtual register for the spill interval.
1307 // Create the new register now so we can map the fold instruction
1308 // to the new register so when it is unfolded we get the correct
1310 bool CreatedNewVReg = false;
1312 NewVReg = mri_->createVirtualRegister(rc);
1314 CreatedNewVReg = true;
1320 // Do not fold load / store here if we are splitting. We'll find an
1321 // optimal point to insert a load / store later.
1323 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1324 Ops, FoldSS, FoldSlot, NewVReg)) {
1325 // Folding the load/store can completely change the instruction in
1326 // unpredictable ways, rescan it from the beginning.
1329 // We need to give the new vreg the same stack slot as the
1330 // spilled interval.
1331 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1337 if (isRemoved(MI)) {
1341 goto RestartInstruction;
1344 // We'll try to fold it later if it's profitable.
1345 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1349 mop.setReg(NewVReg);
1350 if (mop.isImplicit())
1351 rewriteImplicitOps(li, MI, NewVReg, vrm);
1353 // Reuse NewVReg for other reads.
1354 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1355 MachineOperand &mopj = MI->getOperand(Ops[j]);
1356 mopj.setReg(NewVReg);
1357 if (mopj.isImplicit())
1358 rewriteImplicitOps(li, MI, NewVReg, vrm);
1361 if (CreatedNewVReg) {
1363 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1364 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1365 // Each valnum may have its own remat id.
1366 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1368 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1370 if (!CanDelete || (HasUse && HasDef)) {
1371 // If this is a two-addr instruction then its use operands are
1372 // rematerializable but its def is not. It should be assigned a
1374 vrm.assignVirt2StackSlot(NewVReg, Slot);
1377 vrm.assignVirt2StackSlot(NewVReg, Slot);
1379 } else if (HasUse && HasDef &&
1380 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1381 // If this interval hasn't been assigned a stack slot (because earlier
1382 // def is a deleted remat def), do it now.
1383 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1384 vrm.assignVirt2StackSlot(NewVReg, Slot);
1387 // Re-matting an instruction with virtual register use. Add the
1388 // register as an implicit use on the use MI.
1389 if (DefIsReMat && ImpUse)
1390 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1392 // create a new register interval for this spill / remat.
1393 LiveInterval &nI = getOrCreateInterval(NewVReg);
1394 if (CreatedNewVReg) {
1395 NewLIs.push_back(&nI);
1396 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1398 vrm.setIsSplitFromReg(NewVReg, li.reg);
1402 if (CreatedNewVReg) {
1403 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1404 nI.getNextValue(~0U, 0, VNInfoAllocator));
1408 // Extend the split live interval to this def / use.
1409 unsigned End = getUseIndex(index)+1;
1410 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1411 nI.getValNumInfo(nI.getNumValNums()-1));
1417 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1418 nI.getNextValue(~0U, 0, VNInfoAllocator));
1423 DOUT << "\t\t\t\tAdded new interval: ";
1424 nI.print(DOUT, tri_);
1429 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1431 MachineBasicBlock *MBB, unsigned Idx) const {
1432 unsigned End = getMBBEndIdx(MBB);
1433 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1434 unsigned KillIdx = VNI->kills[j];
1435 if (KillIdx > Idx && KillIdx < End)
1441 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1442 /// during spilling.
1444 struct RewriteInfo {
1449 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1450 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1453 struct RewriteInfoCompare {
1454 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1455 return LHS.Index < RHS.Index;
1460 void LiveIntervals::
1461 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1462 LiveInterval::Ranges::const_iterator &I,
1463 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1464 unsigned Slot, int LdSlot,
1465 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1467 const TargetRegisterClass* rc,
1468 SmallVector<int, 4> &ReMatIds,
1469 const MachineLoopInfo *loopInfo,
1470 BitVector &SpillMBBs,
1471 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1472 BitVector &RestoreMBBs,
1473 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1474 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1475 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1476 bool AllCanFold = true;
1477 unsigned NewVReg = 0;
1478 unsigned start = getBaseIndex(I->start);
1479 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1481 // First collect all the def / use in this live range that will be rewritten.
1482 // Make sure they are sorted according to instruction index.
1483 std::vector<RewriteInfo> RewriteMIs;
1484 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1485 re = mri_->reg_end(); ri != re; ) {
1486 MachineInstr *MI = &*ri;
1487 MachineOperand &O = ri.getOperand();
1489 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1490 unsigned index = getInstructionIndex(MI);
1491 if (index < start || index >= end)
1493 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1494 // Must be defined by an implicit def. It should not be spilled. Note,
1495 // this is for correctness reason. e.g.
1496 // 8 %reg1024<def> = IMPLICIT_DEF
1497 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1498 // The live range [12, 14) are not part of the r1024 live interval since
1499 // it's defined by an implicit def. It will not conflicts with live
1500 // interval of r1025. Now suppose both registers are spilled, you can
1501 // easily see a situation where both registers are reloaded before
1502 // the INSERT_SUBREG and both target registers that would overlap.
1504 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1506 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1508 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1509 // Now rewrite the defs and uses.
1510 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1511 RewriteInfo &rwi = RewriteMIs[i];
1513 unsigned index = rwi.Index;
1514 bool MIHasUse = rwi.HasUse;
1515 bool MIHasDef = rwi.HasDef;
1516 MachineInstr *MI = rwi.MI;
1517 // If MI def and/or use the same register multiple times, then there
1518 // are multiple entries.
1519 unsigned NumUses = MIHasUse;
1520 while (i != e && RewriteMIs[i].MI == MI) {
1521 assert(RewriteMIs[i].Index == index);
1522 bool isUse = RewriteMIs[i].HasUse;
1523 if (isUse) ++NumUses;
1525 MIHasDef |= RewriteMIs[i].HasDef;
1528 MachineBasicBlock *MBB = MI->getParent();
1530 if (ImpUse && MI != ReMatDefMI) {
1531 // Re-matting an instruction with virtual register use. Update the
1532 // register interval's spill weight to HUGE_VALF to prevent it from
1534 LiveInterval &ImpLi = getInterval(ImpUse);
1535 ImpLi.weight = HUGE_VALF;
1538 unsigned MBBId = MBB->getNumber();
1539 unsigned ThisVReg = 0;
1541 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1542 if (NVI != MBBVRegsMap.end()) {
1543 ThisVReg = NVI->second;
1550 // It's better to start a new interval to avoid artifically
1551 // extend the new interval.
1552 if (MIHasDef && !MIHasUse) {
1553 MBBVRegsMap.erase(MBB->getNumber());
1559 bool IsNew = ThisVReg == 0;
1561 // This ends the previous live interval. If all of its def / use
1562 // can be folded, give it a low spill weight.
1563 if (NewVReg && TrySplit && AllCanFold) {
1564 LiveInterval &nI = getOrCreateInterval(NewVReg);
1571 bool HasDef = false;
1572 bool HasUse = false;
1573 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1574 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1575 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1576 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1577 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1578 if (!HasDef && !HasUse)
1581 AllCanFold &= CanFold;
1583 // Update weight of spill interval.
1584 LiveInterval &nI = getOrCreateInterval(NewVReg);
1586 // The spill weight is now infinity as it cannot be spilled again.
1587 nI.weight = HUGE_VALF;
1591 // Keep track of the last def and first use in each MBB.
1593 if (MI != ReMatOrigDefMI || !CanDelete) {
1594 bool HasKill = false;
1596 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1598 // If this is a two-address code, then this index starts a new VNInfo.
1599 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1601 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1603 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1604 SpillIdxes.find(MBBId);
1606 if (SII == SpillIdxes.end()) {
1607 std::vector<SRInfo> S;
1608 S.push_back(SRInfo(index, NewVReg, true));
1609 SpillIdxes.insert(std::make_pair(MBBId, S));
1610 } else if (SII->second.back().vreg != NewVReg) {
1611 SII->second.push_back(SRInfo(index, NewVReg, true));
1612 } else if ((int)index > SII->second.back().index) {
1613 // If there is an earlier def and this is a two-address
1614 // instruction, then it's not possible to fold the store (which
1615 // would also fold the load).
1616 SRInfo &Info = SII->second.back();
1618 Info.canFold = !HasUse;
1620 SpillMBBs.set(MBBId);
1621 } else if (SII != SpillIdxes.end() &&
1622 SII->second.back().vreg == NewVReg &&
1623 (int)index > SII->second.back().index) {
1624 // There is an earlier def that's not killed (must be two-address).
1625 // The spill is no longer needed.
1626 SII->second.pop_back();
1627 if (SII->second.empty()) {
1628 SpillIdxes.erase(MBBId);
1629 SpillMBBs.reset(MBBId);
1636 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1637 SpillIdxes.find(MBBId);
1638 if (SII != SpillIdxes.end() &&
1639 SII->second.back().vreg == NewVReg &&
1640 (int)index > SII->second.back().index)
1641 // Use(s) following the last def, it's not safe to fold the spill.
1642 SII->second.back().canFold = false;
1643 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1644 RestoreIdxes.find(MBBId);
1645 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1646 // If we are splitting live intervals, only fold if it's the first
1647 // use and there isn't another use later in the MBB.
1648 RII->second.back().canFold = false;
1650 // Only need a reload if there isn't an earlier def / use.
1651 if (RII == RestoreIdxes.end()) {
1652 std::vector<SRInfo> Infos;
1653 Infos.push_back(SRInfo(index, NewVReg, true));
1654 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1656 RII->second.push_back(SRInfo(index, NewVReg, true));
1658 RestoreMBBs.set(MBBId);
1662 // Update spill weight.
1663 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1664 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1667 if (NewVReg && TrySplit && AllCanFold) {
1668 // If all of its def / use can be folded, give it a low spill weight.
1669 LiveInterval &nI = getOrCreateInterval(NewVReg);
1674 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1675 BitVector &RestoreMBBs,
1676 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1677 if (!RestoreMBBs[Id])
1679 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1680 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1681 if (Restores[i].index == index &&
1682 Restores[i].vreg == vr &&
1683 Restores[i].canFold)
1688 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1689 BitVector &RestoreMBBs,
1690 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1691 if (!RestoreMBBs[Id])
1693 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1694 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1695 if (Restores[i].index == index && Restores[i].vreg)
1696 Restores[i].index = -1;
1699 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1700 /// spilled and create empty intervals for their uses.
1702 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1703 const TargetRegisterClass* rc,
1704 std::vector<LiveInterval*> &NewLIs) {
1705 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1706 re = mri_->reg_end(); ri != re; ) {
1707 MachineOperand &O = ri.getOperand();
1708 MachineInstr *MI = &*ri;
1711 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1712 "Register def was not rewritten?");
1713 RemoveMachineInstrFromMaps(MI);
1714 vrm.RemoveMachineInstrFromMaps(MI);
1715 MI->eraseFromParent();
1717 // This must be an use of an implicit_def so it's not part of the live
1718 // interval. Create a new empty live interval for it.
1719 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1720 unsigned NewVReg = mri_->createVirtualRegister(rc);
1722 vrm.setIsImplicitlyDefined(NewVReg);
1723 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1724 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1725 MachineOperand &MO = MI->getOperand(i);
1726 if (MO.isReg() && MO.getReg() == li.reg)
1735 bool operator()(LiveInterval* A, LiveInterval* B) {
1736 return A->beginNumber() < B->beginNumber();
1741 std::vector<LiveInterval*> LiveIntervals::
1742 addIntervalsForSpillsFast(const LiveInterval &li,
1743 const MachineLoopInfo *loopInfo,
1744 VirtRegMap &vrm, float& SSWeight) {
1745 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1747 std::vector<LiveInterval*> added;
1749 assert(li.weight != HUGE_VALF &&
1750 "attempt to spill already spilled interval!");
1752 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1756 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1760 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1761 while (RI != mri_->reg_end()) {
1762 MachineInstr* MI = &*RI;
1764 SmallVector<unsigned, 2> Indices;
1765 bool HasUse = false;
1766 bool HasDef = false;
1768 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1769 MachineOperand& mop = MI->getOperand(i);
1770 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1772 HasUse |= MI->getOperand(i).isUse();
1773 HasDef |= MI->getOperand(i).isDef();
1775 Indices.push_back(i);
1778 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1779 Indices, true, slot, li.reg)) {
1780 unsigned NewVReg = mri_->createVirtualRegister(rc);
1782 vrm.assignVirt2StackSlot(NewVReg, slot);
1784 // create a new register for this spill
1785 LiveInterval &nI = getOrCreateInterval(NewVReg);
1787 // the spill weight is now infinity as it
1788 // cannot be spilled again
1789 nI.weight = HUGE_VALF;
1791 // Rewrite register operands to use the new vreg.
1792 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1793 E = Indices.end(); I != E; ++I) {
1794 MI->getOperand(*I).setReg(NewVReg);
1796 if (MI->getOperand(*I).isUse())
1797 MI->getOperand(*I).setIsKill(true);
1800 // Fill in the new live interval.
1801 unsigned index = getInstructionIndex(MI);
1803 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1804 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1807 vrm.addRestorePoint(NewVReg, MI);
1810 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1811 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1814 vrm.addSpillPoint(NewVReg, true, MI);
1817 added.push_back(&nI);
1819 DOUT << "\t\t\t\tadded new interval: ";
1823 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1826 SSWeight += getSpillWeight(true, true, loopDepth);
1828 SSWeight += getSpillWeight(false, true, loopDepth);
1830 SSWeight += getSpillWeight(true, false, loopDepth);
1834 RI = mri_->reg_begin(li.reg);
1837 // Clients expect the new intervals to be returned in sorted order.
1838 std::sort(added.begin(), added.end(), LISorter());
1843 std::vector<LiveInterval*> LiveIntervals::
1844 addIntervalsForSpills(const LiveInterval &li,
1845 SmallVectorImpl<LiveInterval*> &SpillIs,
1846 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1849 if (EnableFastSpilling)
1850 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1852 assert(li.weight != HUGE_VALF &&
1853 "attempt to spill already spilled interval!");
1855 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1856 li.print(DOUT, tri_);
1859 // Spill slot weight.
1862 // Each bit specify whether a spill is required in the MBB.
1863 BitVector SpillMBBs(mf_->getNumBlockIDs());
1864 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1865 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1866 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1867 DenseMap<unsigned,unsigned> MBBVRegsMap;
1868 std::vector<LiveInterval*> NewLIs;
1869 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1871 unsigned NumValNums = li.getNumValNums();
1872 SmallVector<MachineInstr*, 4> ReMatDefs;
1873 ReMatDefs.resize(NumValNums, NULL);
1874 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1875 ReMatOrigDefs.resize(NumValNums, NULL);
1876 SmallVector<int, 4> ReMatIds;
1877 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1878 BitVector ReMatDelete(NumValNums);
1879 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1881 // Spilling a split live interval. It cannot be split any further. Also,
1882 // it's also guaranteed to be a single val# / range interval.
1883 if (vrm.getPreSplitReg(li.reg)) {
1884 vrm.setIsSplitFromReg(li.reg, 0);
1885 // Unset the split kill marker on the last use.
1886 unsigned KillIdx = vrm.getKillPoint(li.reg);
1888 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1889 assert(KillMI && "Last use disappeared?");
1890 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1891 assert(KillOp != -1 && "Last use disappeared?");
1892 KillMI->getOperand(KillOp).setIsKill(false);
1894 vrm.removeKillPoint(li.reg);
1895 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1896 Slot = vrm.getStackSlot(li.reg);
1897 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1898 MachineInstr *ReMatDefMI = DefIsReMat ?
1899 vrm.getReMaterializedMI(li.reg) : NULL;
1901 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1902 bool isLoad = isLoadSS ||
1903 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1904 bool IsFirstRange = true;
1905 for (LiveInterval::Ranges::const_iterator
1906 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1907 // If this is a split live interval with multiple ranges, it means there
1908 // are two-address instructions that re-defined the value. Only the
1909 // first def can be rematerialized!
1911 // Note ReMatOrigDefMI has already been deleted.
1912 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1913 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1914 false, vrm, rc, ReMatIds, loopInfo,
1915 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1916 MBBVRegsMap, NewLIs, SSWeight);
1918 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1919 Slot, 0, false, false, false,
1920 false, vrm, rc, ReMatIds, loopInfo,
1921 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1922 MBBVRegsMap, NewLIs, SSWeight);
1924 IsFirstRange = false;
1927 SSWeight = 0.0f; // Already accounted for when split.
1928 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1932 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1933 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1937 bool NeedStackSlot = false;
1938 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1940 const VNInfo *VNI = *i;
1941 unsigned VN = VNI->id;
1942 unsigned DefIdx = VNI->def;
1944 continue; // Dead val#.
1945 // Is the def for the val# rematerializable?
1946 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1947 ? 0 : getInstructionFromIndex(DefIdx);
1949 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1950 // Remember how to remat the def of this val#.
1951 ReMatOrigDefs[VN] = ReMatDefMI;
1952 // Original def may be modified so we have to make a copy here.
1953 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1954 ClonedMIs.push_back(Clone);
1955 ReMatDefs[VN] = Clone;
1957 bool CanDelete = true;
1958 if (VNI->hasPHIKill) {
1959 // A kill is a phi node, not all of its uses can be rematerialized.
1960 // It must not be deleted.
1962 // Need a stack slot if there is any live range where uses cannot be
1964 NeedStackSlot = true;
1967 ReMatDelete.set(VN);
1969 // Need a stack slot if there is any live range where uses cannot be
1971 NeedStackSlot = true;
1975 // One stack slot per live interval.
1976 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1977 Slot = vrm.assignVirt2StackSlot(li.reg);
1979 // Create new intervals and rewrite defs and uses.
1980 for (LiveInterval::Ranges::const_iterator
1981 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1982 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1983 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1984 bool DefIsReMat = ReMatDefMI != NULL;
1985 bool CanDelete = ReMatDelete[I->valno->id];
1987 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1988 bool isLoad = isLoadSS ||
1989 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1990 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1991 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1992 CanDelete, vrm, rc, ReMatIds, loopInfo,
1993 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1994 MBBVRegsMap, NewLIs, SSWeight);
1997 // Insert spills / restores if we are splitting.
1999 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2003 SmallPtrSet<LiveInterval*, 4> AddedKill;
2004 SmallVector<unsigned, 2> Ops;
2005 if (NeedStackSlot) {
2006 int Id = SpillMBBs.find_first();
2008 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2009 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2010 std::vector<SRInfo> &spills = SpillIdxes[Id];
2011 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2012 int index = spills[i].index;
2013 unsigned VReg = spills[i].vreg;
2014 LiveInterval &nI = getOrCreateInterval(VReg);
2015 bool isReMat = vrm.isReMaterialized(VReg);
2016 MachineInstr *MI = getInstructionFromIndex(index);
2017 bool CanFold = false;
2018 bool FoundUse = false;
2020 if (spills[i].canFold) {
2022 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2023 MachineOperand &MO = MI->getOperand(j);
2024 if (!MO.isReg() || MO.getReg() != VReg)
2031 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2032 RestoreMBBs, RestoreIdxes))) {
2033 // MI has two-address uses of the same register. If the use
2034 // isn't the first and only use in the BB, then we can't fold
2035 // it. FIXME: Move this to rewriteInstructionsForSpills.
2042 // Fold the store into the def if possible.
2043 bool Folded = false;
2044 if (CanFold && !Ops.empty()) {
2045 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2048 // Also folded uses, do not issue a load.
2049 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2050 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2052 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2056 // Otherwise tell the spiller to issue a spill.
2058 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2059 bool isKill = LR->end == getStoreIndex(index);
2060 if (!MI->registerDefIsDead(nI.reg))
2061 // No need to spill a dead def.
2062 vrm.addSpillPoint(VReg, isKill, MI);
2064 AddedKill.insert(&nI);
2067 // Update spill slot weight.
2069 SSWeight += getSpillWeight(true, false, loopDepth);
2071 Id = SpillMBBs.find_next(Id);
2075 int Id = RestoreMBBs.find_first();
2077 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2078 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2080 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2081 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2082 int index = restores[i].index;
2085 unsigned VReg = restores[i].vreg;
2086 LiveInterval &nI = getOrCreateInterval(VReg);
2087 bool isReMat = vrm.isReMaterialized(VReg);
2088 MachineInstr *MI = getInstructionFromIndex(index);
2089 bool CanFold = false;
2091 if (restores[i].canFold) {
2093 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2094 MachineOperand &MO = MI->getOperand(j);
2095 if (!MO.isReg() || MO.getReg() != VReg)
2099 // If this restore were to be folded, it would have been folded
2108 // Fold the load into the use if possible.
2109 bool Folded = false;
2110 if (CanFold && !Ops.empty()) {
2112 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2114 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2116 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2117 // If the rematerializable def is a load, also try to fold it.
2118 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2119 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2120 Ops, isLoadSS, LdSlot, VReg);
2122 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2124 // Re-matting an instruction with virtual register use. Add the
2125 // register as an implicit use on the use MI and update the register
2126 // interval's spill weight to HUGE_VALF to prevent it from being
2128 LiveInterval &ImpLi = getInterval(ImpUse);
2129 ImpLi.weight = HUGE_VALF;
2130 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2135 // If folding is not possible / failed, then tell the spiller to issue a
2136 // load / rematerialization for us.
2138 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2140 vrm.addRestorePoint(VReg, MI);
2142 // Update spill slot weight.
2144 SSWeight += getSpillWeight(false, true, loopDepth);
2146 Id = RestoreMBBs.find_next(Id);
2149 // Finalize intervals: add kills, finalize spill weights, and filter out
2151 std::vector<LiveInterval*> RetNewLIs;
2152 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2153 LiveInterval *LI = NewLIs[i];
2155 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2156 if (!AddedKill.count(LI)) {
2157 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2158 unsigned LastUseIdx = getBaseIndex(LR->end);
2159 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2160 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2161 assert(UseIdx != -1);
2162 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2163 LastUse->getOperand(UseIdx).setIsKill();
2164 vrm.addKillPoint(LI->reg, LastUseIdx);
2167 RetNewLIs.push_back(LI);
2171 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2175 /// hasAllocatableSuperReg - Return true if the specified physical register has
2176 /// any super register that's allocatable.
2177 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2178 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2179 if (allocatableRegs_[*AS] && hasInterval(*AS))
2184 /// getRepresentativeReg - Find the largest super register of the specified
2185 /// physical register.
2186 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2187 // Find the largest super-register that is allocatable.
2188 unsigned BestReg = Reg;
2189 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2190 unsigned SuperReg = *AS;
2191 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2199 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2200 /// specified interval that conflicts with the specified physical register.
2201 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2202 unsigned PhysReg) const {
2203 unsigned NumConflicts = 0;
2204 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2205 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2206 E = mri_->reg_end(); I != E; ++I) {
2207 MachineOperand &O = I.getOperand();
2208 MachineInstr *MI = O.getParent();
2209 unsigned Index = getInstructionIndex(MI);
2210 if (pli.liveAt(Index))
2213 return NumConflicts;
2216 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2217 /// around all defs and uses of the specified interval.
2218 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2219 unsigned PhysReg, VirtRegMap &vrm) {
2220 unsigned SpillReg = getRepresentativeReg(PhysReg);
2222 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2223 // If there are registers which alias PhysReg, but which are not a
2224 // sub-register of the chosen representative super register. Assert
2225 // since we can't handle it yet.
2226 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2227 tri_->isSuperRegister(*AS, SpillReg));
2229 LiveInterval &pli = getInterval(SpillReg);
2230 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2231 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2232 E = mri_->reg_end(); I != E; ++I) {
2233 MachineOperand &O = I.getOperand();
2234 MachineInstr *MI = O.getParent();
2235 if (SeenMIs.count(MI))
2238 unsigned Index = getInstructionIndex(MI);
2239 if (pli.liveAt(Index)) {
2240 vrm.addEmergencySpill(SpillReg, MI);
2241 unsigned StartIdx = getLoadIndex(Index);
2242 unsigned EndIdx = getStoreIndex(Index)+1;
2243 if (pli.isInOneLiveRange(StartIdx, EndIdx))
2244 pli.removeRange(StartIdx, EndIdx);
2246 cerr << "Ran out of registers during register allocation!\n";
2247 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2248 cerr << "Please check your inline asm statement for invalid "
2249 << "constraints:\n";
2250 MI->print(cerr.stream(), tm_);
2254 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2255 if (!hasInterval(*AS))
2257 LiveInterval &spli = getInterval(*AS);
2258 if (spli.liveAt(Index))
2259 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2265 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2266 MachineInstr* startInst) {
2267 LiveInterval& Interval = getOrCreateInterval(reg);
2268 VNInfo* VN = Interval.getNextValue(
2269 getInstructionIndex(startInst) + InstrSlots::DEF,
2270 startInst, getVNInfoAllocator());
2271 VN->hasPHIKill = true;
2272 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2273 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2274 getMBBEndIdx(startInst->getParent()) + 1, VN);
2275 Interval.addRange(LR);