1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
43 // Hidden options for help debugging.
44 static cl::opt<bool> DisableReMat("disable-rematerialization",
45 cl::init(false), cl::Hidden);
47 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
48 cl::init(true), cl::Hidden);
49 static cl::opt<int> SplitLimit("split-limit",
50 cl::init(-1), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 STATISTIC(numIntervals, "Number of original intervals");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
130 i2miMap_.push_back(I);
131 MIIndex += InstrSlots::NUM;
134 // Insert max(1, numdefs) empty slots after every instruction.
135 unsigned Slots = I->getDesc().getNumDefs();
138 MIIndex += InstrSlots::NUM * Slots;
140 i2miMap_.push_back(0);
143 // Set the MBB2IdxMap entry for this MBB.
144 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
145 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
147 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
149 if (!OldI2MI.empty())
150 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
151 for (LiveInterval::iterator LI = OI->second->begin(),
152 LE = OI->second->end(); LI != LE; ++LI) {
154 // Remap the start index of the live range to the corresponding new
155 // number, or our best guess at what it _should_ correspond to if the
156 // original instruction has been erased. This is either the following
157 // instruction or its predecessor.
158 unsigned index = LI->start / InstrSlots::NUM;
159 unsigned offset = LI->start % InstrSlots::NUM;
160 if (offset == InstrSlots::LOAD) {
161 std::vector<IdxMBBPair>::const_iterator I =
162 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
163 // Take the pair containing the index
164 std::vector<IdxMBBPair>::const_iterator J =
165 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
167 LI->start = getMBBStartIdx(J->second);
169 LI->start = mi2iMap_[OldI2MI[index]] + offset;
172 // Remap the ending index in the same way that we remapped the start,
173 // except for the final step where we always map to the immediately
174 // following instruction.
175 index = (LI->end - 1) / InstrSlots::NUM;
176 offset = LI->end % InstrSlots::NUM;
177 if (offset == InstrSlots::LOAD) {
178 // VReg dies at end of block.
179 std::vector<IdxMBBPair>::const_iterator I =
180 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
183 LI->end = getMBBEndIdx(I->second) + 1;
185 unsigned idx = index;
186 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
188 if (index != OldI2MI.size())
189 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
191 LI->end = InstrSlots::NUM * i2miMap_.size();
195 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
196 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
199 // Remap the VNInfo def index, which works the same as the
200 // start indices above. VN's with special sentinel defs
201 // don't need to be remapped.
202 if (vni->isDefAccurate() && !vni->isUnused()) {
203 unsigned index = vni->def / InstrSlots::NUM;
204 unsigned offset = vni->def % InstrSlots::NUM;
205 if (offset == InstrSlots::LOAD) {
206 std::vector<IdxMBBPair>::const_iterator I =
207 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
208 // Take the pair containing the index
209 std::vector<IdxMBBPair>::const_iterator J =
210 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
212 vni->def = getMBBStartIdx(J->second);
214 vni->def = mi2iMap_[OldI2MI[index]] + offset;
218 // Remap the VNInfo kill indices, which works the same as
219 // the end indices above.
220 for (size_t i = 0; i < vni->kills.size(); ++i) {
221 // PHI kills don't need to be remapped.
222 if (!vni->kills[i]) continue;
224 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
225 unsigned offset = vni->kills[i] % InstrSlots::NUM;
226 if (offset == InstrSlots::LOAD) {
227 std::vector<IdxMBBPair>::const_iterator I =
228 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
231 vni->kills[i] = getMBBEndIdx(I->second);
233 unsigned idx = index;
234 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
236 if (index != OldI2MI.size())
237 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
238 (idx == index ? offset : 0);
240 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
247 void LiveIntervals::scaleNumbering(int factor) {
249 // * scale MBB begin and end points
250 // * scale all ranges.
251 // * Update VNI structures.
252 // * Scale instruction numberings
254 // Scale the MBB indices.
256 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
257 MBB != MBBE; ++MBB) {
258 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
259 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
260 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
261 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
263 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
265 // Scale the intervals.
266 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
267 LI->second->scaleNumbering(factor);
270 // Scale MachineInstrs.
271 Mi2IndexMap oldmi2iMap = mi2iMap_;
272 unsigned highestSlot = 0;
273 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
275 unsigned newSlot = InstrSlots::scale(MI->second, factor);
276 mi2iMap_[MI->first] = newSlot;
277 highestSlot = std::max(highestSlot, newSlot);
281 i2miMap_.resize(highestSlot + 1);
282 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
284 i2miMap_[MI->second] = MI->first;
290 /// runOnMachineFunction - Register allocate the whole function
292 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
294 mri_ = &mf_->getRegInfo();
295 tm_ = &fn.getTarget();
296 tri_ = tm_->getRegisterInfo();
297 tii_ = tm_->getInstrInfo();
298 aa_ = &getAnalysis<AliasAnalysis>();
299 lv_ = &getAnalysis<LiveVariables>();
300 allocatableRegs_ = tri_->getAllocatableSet(fn);
305 numIntervals += getNumIntervals();
311 /// print - Implement the dump method.
312 void LiveIntervals::print(std::ostream &O, const Module* ) const {
313 O << "********** INTERVALS **********\n";
314 for (const_iterator I = begin(), E = end(); I != E; ++I) {
315 I->second->print(O, tri_);
319 O << "********** MACHINEINSTRS **********\n";
320 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
321 mbbi != mbbe; ++mbbi) {
322 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
323 for (MachineBasicBlock::iterator mii = mbbi->begin(),
324 mie = mbbi->end(); mii != mie; ++mii) {
325 O << getInstructionIndex(mii) << '\t' << *mii;
330 /// conflictsWithPhysRegDef - Returns true if the specified register
331 /// is defined during the duration of the specified interval.
332 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
333 VirtRegMap &vrm, unsigned reg) {
334 for (LiveInterval::Ranges::const_iterator
335 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
336 for (unsigned index = getBaseIndex(I->start),
337 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
338 index += InstrSlots::NUM) {
339 // skip deleted instructions
340 while (index != end && !getInstructionFromIndex(index))
341 index += InstrSlots::NUM;
342 if (index == end) break;
344 MachineInstr *MI = getInstructionFromIndex(index);
345 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
346 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
347 if (SrcReg == li.reg || DstReg == li.reg)
349 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
350 MachineOperand& mop = MI->getOperand(i);
353 unsigned PhysReg = mop.getReg();
354 if (PhysReg == 0 || PhysReg == li.reg)
356 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
357 if (!vrm.hasPhys(PhysReg))
359 PhysReg = vrm.getPhys(PhysReg);
361 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
370 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
371 /// it can check use as well.
372 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
373 unsigned Reg, bool CheckUse,
374 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
375 for (LiveInterval::Ranges::const_iterator
376 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
377 for (unsigned index = getBaseIndex(I->start),
378 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
379 index += InstrSlots::NUM) {
380 // Skip deleted instructions.
381 MachineInstr *MI = 0;
382 while (index != end) {
383 MI = getInstructionFromIndex(index);
386 index += InstrSlots::NUM;
388 if (index == end) break;
390 if (JoinedCopies.count(MI))
392 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
393 MachineOperand& MO = MI->getOperand(i);
396 if (MO.isUse() && !CheckUse)
398 unsigned PhysReg = MO.getReg();
399 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
401 if (tri_->isSubRegister(Reg, PhysReg))
411 void LiveIntervals::printRegName(unsigned reg) const {
412 if (TargetRegisterInfo::isPhysicalRegister(reg))
413 cerr << tri_->getName(reg);
415 cerr << "%reg" << reg;
418 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
419 MachineBasicBlock::iterator mi,
420 unsigned MIIdx, MachineOperand& MO,
422 LiveInterval &interval) {
423 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
424 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
426 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
427 DOUT << "is a implicit_def\n";
431 // Virtual registers may be defined multiple times (due to phi
432 // elimination and 2-addr elimination). Much of what we do only has to be
433 // done once for the vreg. We use an empty interval to detect the first
434 // time we see a vreg.
435 if (interval.empty()) {
436 // Get the Idx of the defining instructions.
437 unsigned defIndex = getDefIndex(MIIdx);
438 // Earlyclobbers move back one.
439 if (MO.isEarlyClobber())
440 defIndex = getUseIndex(MIIdx);
442 MachineInstr *CopyMI = NULL;
443 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
444 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
445 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
446 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
447 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
449 // Earlyclobbers move back one.
450 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
452 assert(ValNo->id == 0 && "First value in interval is not 0?");
454 // Loop over all of the blocks that the vreg is defined in. There are
455 // two cases we have to handle here. The most common case is a vreg
456 // whose lifetime is contained within a basic block. In this case there
457 // will be a single kill, in MBB, which comes after the definition.
458 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
459 // FIXME: what about dead vars?
461 if (vi.Kills[0] != mi)
462 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
464 killIdx = defIndex+1;
466 // If the kill happens after the definition, we have an intra-block
468 if (killIdx > defIndex) {
469 assert(vi.AliveBlocks.empty() &&
470 "Shouldn't be alive across any blocks!");
471 LiveRange LR(defIndex, killIdx, ValNo);
472 interval.addRange(LR);
473 DOUT << " +" << LR << "\n";
474 interval.addKill(ValNo, killIdx);
479 // The other case we handle is when a virtual register lives to the end
480 // of the defining block, potentially live across some blocks, then is
481 // live into some number of blocks, but gets killed. Start by adding a
482 // range that goes from this definition to the end of the defining block.
483 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
484 DOUT << " +" << NewLR;
485 interval.addRange(NewLR);
487 // Iterate over all of the blocks that the variable is completely
488 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
490 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
491 E = vi.AliveBlocks.end(); I != E; ++I) {
492 LiveRange LR(getMBBStartIdx(*I),
493 getMBBEndIdx(*I)+1, // MBB ends at -1.
495 interval.addRange(LR);
499 // Finally, this virtual register is live from the start of any killing
500 // block to the 'use' slot of the killing instruction.
501 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
502 MachineInstr *Kill = vi.Kills[i];
503 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
504 LiveRange LR(getMBBStartIdx(Kill->getParent()),
506 interval.addRange(LR);
507 interval.addKill(ValNo, killIdx);
512 // If this is the second time we see a virtual register definition, it
513 // must be due to phi elimination or two addr elimination. If this is
514 // the result of two address elimination, then the vreg is one of the
515 // def-and-use register operand.
516 if (mi->isRegTiedToUseOperand(MOIdx)) {
517 // If this is a two-address definition, then we have already processed
518 // the live range. The only problem is that we didn't realize there
519 // are actually two values in the live interval. Because of this we
520 // need to take the LiveRegion that defines this register and split it
522 assert(interval.containsOneValue());
523 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
524 unsigned RedefIndex = getDefIndex(MIIdx);
525 if (MO.isEarlyClobber())
526 RedefIndex = getUseIndex(MIIdx);
528 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
529 VNInfo *OldValNo = OldLR->valno;
531 // Delete the initial value, which should be short and continuous,
532 // because the 2-addr copy must be in the same MBB as the redef.
533 interval.removeRange(DefIndex, RedefIndex);
535 // Two-address vregs should always only be redefined once. This means
536 // that at this point, there should be exactly one value number in it.
537 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
539 // The new value number (#1) is defined by the instruction we claimed
541 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
542 false, // update at *
544 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
546 // Value#0 is now defined by the 2-addr instruction.
547 OldValNo->def = RedefIndex;
549 if (MO.isEarlyClobber())
550 OldValNo->setHasRedefByEC(true);
552 // Add the new live interval which replaces the range for the input copy.
553 LiveRange LR(DefIndex, RedefIndex, ValNo);
554 DOUT << " replace range with " << LR;
555 interval.addRange(LR);
556 interval.addKill(ValNo, RedefIndex);
558 // If this redefinition is dead, we need to add a dummy unit live
559 // range covering the def slot.
561 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
564 interval.print(DOUT, tri_);
567 // Otherwise, this must be because of phi elimination. If this is the
568 // first redefinition of the vreg that we have seen, go back and change
569 // the live range in the PHI block to be a different value number.
570 if (interval.containsOneValue()) {
571 assert(vi.Kills.size() == 1 &&
572 "PHI elimination vreg should have one kill, the PHI itself!");
574 // Remove the old range that we now know has an incorrect number.
575 VNInfo *VNI = interval.getValNumInfo(0);
576 MachineInstr *Killer = vi.Kills[0];
577 unsigned Start = getMBBStartIdx(Killer->getParent());
578 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
579 DOUT << " Removing [" << Start << "," << End << "] from: ";
580 interval.print(DOUT, tri_); DOUT << "\n";
581 interval.removeRange(Start, End);
582 VNI->setHasPHIKill(true);
583 DOUT << " RESULT: "; interval.print(DOUT, tri_);
585 // Replace the interval with one of a NEW value number. Note that this
586 // value number isn't actually defined by an instruction, weird huh? :)
587 LiveRange LR(Start, End,
588 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
589 LR.valno->setIsPHIDef(true);
590 DOUT << " replace range with " << LR;
591 interval.addRange(LR);
592 interval.addKill(LR.valno, End);
593 DOUT << " RESULT: "; interval.print(DOUT, tri_);
596 // In the case of PHI elimination, each variable definition is only
597 // live until the end of the block. We've already taken care of the
598 // rest of the live range.
599 unsigned defIndex = getDefIndex(MIIdx);
600 if (MO.isEarlyClobber())
601 defIndex = getUseIndex(MIIdx);
604 MachineInstr *CopyMI = NULL;
605 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
606 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
607 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
608 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
609 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
611 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
613 unsigned killIndex = getMBBEndIdx(mbb) + 1;
614 LiveRange LR(defIndex, killIndex, ValNo);
615 interval.addRange(LR);
616 interval.addKill(ValNo, killIndex);
617 ValNo->setHasPHIKill(true);
625 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
626 MachineBasicBlock::iterator mi,
629 LiveInterval &interval,
630 MachineInstr *CopyMI) {
631 // A physical register cannot be live across basic block, so its
632 // lifetime must end somewhere in its defining basic block.
633 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
635 unsigned baseIndex = MIIdx;
636 unsigned start = getDefIndex(baseIndex);
637 // Earlyclobbers move back one.
638 if (MO.isEarlyClobber())
639 start = getUseIndex(MIIdx);
640 unsigned end = start;
642 // If it is not used after definition, it is considered dead at
643 // the instruction defining it. Hence its interval is:
644 // [defSlot(def), defSlot(def)+1)
651 // If it is not dead on definition, it must be killed by a
652 // subsequent instruction. Hence its interval is:
653 // [defSlot(def), useSlot(kill)+1)
654 baseIndex += InstrSlots::NUM;
655 while (++mi != MBB->end()) {
656 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
657 getInstructionFromIndex(baseIndex) == 0)
658 baseIndex += InstrSlots::NUM;
659 if (mi->killsRegister(interval.reg, tri_)) {
661 end = getUseIndex(baseIndex) + 1;
664 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
666 if (mi->isRegTiedToUseOperand(DefIdx)) {
667 // Two-address instruction.
668 end = getDefIndex(baseIndex);
669 if (mi->getOperand(DefIdx).isEarlyClobber())
670 end = getUseIndex(baseIndex);
672 // Another instruction redefines the register before it is ever read.
673 // Then the register is essentially dead at the instruction that defines
674 // it. Hence its interval is:
675 // [defSlot(def), defSlot(def)+1)
683 baseIndex += InstrSlots::NUM;
686 // The only case we should have a dead physreg here without a killing or
687 // instruction where we know it's dead is if it is live-in to the function
688 // and never used. Another possible case is the implicit use of the
689 // physical register has been deleted by two-address pass.
693 assert(start < end && "did not find end of interval?");
695 // Already exists? Extend old live interval.
696 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
697 bool Extend = OldLR != interval.end();
698 VNInfo *ValNo = Extend
699 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
700 if (MO.isEarlyClobber() && Extend)
701 ValNo->setHasRedefByEC(true);
702 LiveRange LR(start, end, ValNo);
703 interval.addRange(LR);
704 interval.addKill(LR.valno, end);
705 DOUT << " +" << LR << '\n';
708 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
709 MachineBasicBlock::iterator MI,
713 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
714 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
715 getOrCreateInterval(MO.getReg()));
716 else if (allocatableRegs_[MO.getReg()]) {
717 MachineInstr *CopyMI = NULL;
718 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
719 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
720 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
721 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
722 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
724 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
725 getOrCreateInterval(MO.getReg()), CopyMI);
726 // Def of a register also defines its sub-registers.
727 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
728 // If MI also modifies the sub-register explicitly, avoid processing it
729 // more than once. Do not pass in TRI here so it checks for exact match.
730 if (!MI->modifiesRegister(*AS))
731 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
732 getOrCreateInterval(*AS), 0);
736 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
738 LiveInterval &interval, bool isAlias) {
739 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
741 // Look for kills, if it reaches a def before it's killed, then it shouldn't
742 // be considered a livein.
743 MachineBasicBlock::iterator mi = MBB->begin();
744 unsigned baseIndex = MIIdx;
745 unsigned start = baseIndex;
746 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
747 getInstructionFromIndex(baseIndex) == 0)
748 baseIndex += InstrSlots::NUM;
749 unsigned end = baseIndex;
750 bool SeenDefUse = false;
752 while (mi != MBB->end()) {
753 if (mi->killsRegister(interval.reg, tri_)) {
755 end = getUseIndex(baseIndex) + 1;
758 } else if (mi->modifiesRegister(interval.reg, tri_)) {
759 // Another instruction redefines the register before it is ever read.
760 // Then the register is essentially dead at the instruction that defines
761 // it. Hence its interval is:
762 // [defSlot(def), defSlot(def)+1)
764 end = getDefIndex(start) + 1;
769 baseIndex += InstrSlots::NUM;
771 if (mi != MBB->end()) {
772 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
773 getInstructionFromIndex(baseIndex) == 0)
774 baseIndex += InstrSlots::NUM;
778 // Live-in register might not be used at all.
782 end = getDefIndex(MIIdx) + 1;
784 DOUT << " live through";
790 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
791 vni->setIsPHIDef(true);
792 LiveRange LR(start, end, vni);
794 interval.addRange(LR);
795 interval.addKill(LR.valno, end);
796 DOUT << " +" << LR << '\n';
799 /// computeIntervals - computes the live intervals for virtual
800 /// registers. for some ordering of the machine instructions [1,N] a
801 /// live interval is an interval [i, j) where 1 <= i <= j < N for
802 /// which a variable is live
803 void LiveIntervals::computeIntervals() {
805 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
806 << "********** Function: "
807 << ((Value*)mf_->getFunction())->getName() << '\n';
809 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
811 MachineBasicBlock *MBB = MBBI;
812 // Track the index of the current machine instr.
813 unsigned MIIndex = getMBBStartIdx(MBB);
814 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
816 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
818 // Create intervals for live-ins to this BB first.
819 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
820 LE = MBB->livein_end(); LI != LE; ++LI) {
821 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
822 // Multiple live-ins can alias the same register.
823 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
824 if (!hasInterval(*AS))
825 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
829 // Skip over empty initial indices.
830 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
831 getInstructionFromIndex(MIIndex) == 0)
832 MIIndex += InstrSlots::NUM;
834 for (; MI != miEnd; ++MI) {
835 DOUT << MIIndex << "\t" << *MI;
838 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
839 MachineOperand &MO = MI->getOperand(i);
840 // handle register defs - build intervals
841 if (MO.isReg() && MO.getReg() && MO.isDef()) {
842 handleRegisterDef(MBB, MI, MIIndex, MO, i);
846 // Skip over the empty slots after each instruction.
847 unsigned Slots = MI->getDesc().getNumDefs();
850 MIIndex += InstrSlots::NUM * Slots;
852 // Skip over empty indices.
853 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
854 getInstructionFromIndex(MIIndex) == 0)
855 MIIndex += InstrSlots::NUM;
860 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
861 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
862 std::vector<IdxMBBPair>::const_iterator I =
863 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
866 while (I != Idx2MBBMap.end()) {
869 MBBs.push_back(I->second);
876 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
877 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
878 std::vector<IdxMBBPair>::const_iterator I =
879 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
882 while (I != Idx2MBBMap.end()) {
885 MachineBasicBlock *MBB = I->second;
886 if (getMBBEndIdx(MBB) > End)
888 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
889 SE = MBB->succ_end(); SI != SE; ++SI)
897 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
898 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
899 return new LiveInterval(reg, Weight);
902 /// dupInterval - Duplicate a live interval. The caller is responsible for
903 /// managing the allocated memory.
904 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
905 LiveInterval *NewLI = createInterval(li->reg);
906 NewLI->Copy(*li, mri_, getVNInfoAllocator());
910 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
911 /// copy field and returns the source register that defines it.
912 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
916 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
917 // If it's extracting out of a physical register, return the sub-register.
918 unsigned Reg = VNI->copy->getOperand(1).getReg();
919 if (TargetRegisterInfo::isPhysicalRegister(Reg))
920 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
922 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
923 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
924 return VNI->copy->getOperand(2).getReg();
926 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
927 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
929 assert(0 && "Unrecognized copy instruction!");
933 //===----------------------------------------------------------------------===//
934 // Register allocator hooks.
937 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
938 /// allow one) virtual register operand, then its uses are implicitly using
939 /// the register. Returns the virtual register.
940 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
941 MachineInstr *MI) const {
943 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
944 MachineOperand &MO = MI->getOperand(i);
945 if (!MO.isReg() || !MO.isUse())
947 unsigned Reg = MO.getReg();
948 if (Reg == 0 || Reg == li.reg)
950 // FIXME: For now, only remat MI with at most one register operand.
952 "Can't rematerialize instruction with multiple register operand!");
961 /// isValNoAvailableAt - Return true if the val# of the specified interval
962 /// which reaches the given instruction also reaches the specified use index.
963 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
964 unsigned UseIdx) const {
965 unsigned Index = getInstructionIndex(MI);
966 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
967 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
968 return UI != li.end() && UI->valno == ValNo;
971 /// isReMaterializable - Returns true if the definition MI of the specified
972 /// val# of the specified interval is re-materializable.
973 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
974 const VNInfo *ValNo, MachineInstr *MI,
975 SmallVectorImpl<LiveInterval*> &SpillIs,
980 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
984 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
985 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
986 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
987 // this but remember this is not safe to fold into a two-address
989 // This is a load from fixed stack slot. It can be rematerialized.
992 // If the target-specific rules don't identify an instruction as
993 // being trivially rematerializable, use some target-independent
995 if (!MI->getDesc().isRematerializable() ||
996 !tii_->isTriviallyReMaterializable(MI)) {
997 if (!EnableAggressiveRemat)
1000 // If the instruction accesses memory but the memoperands have been lost,
1001 // we can't analyze it.
1002 const TargetInstrDesc &TID = MI->getDesc();
1003 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1006 // Avoid instructions obviously unsafe for remat.
1007 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1010 // If the instruction accesses memory and the memory could be non-constant,
1011 // assume the instruction is not rematerializable.
1012 for (std::list<MachineMemOperand>::const_iterator
1013 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1014 const MachineMemOperand &MMO = *I;
1015 if (MMO.isVolatile() || MMO.isStore())
1017 const Value *V = MMO.getValue();
1020 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1021 if (!PSV->isConstant(mf_->getFrameInfo()))
1023 } else if (!aa_->pointsToConstantMemory(V))
1027 // If any of the registers accessed are non-constant, conservatively assume
1028 // the instruction is not rematerializable.
1029 unsigned ImpUse = 0;
1030 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1031 const MachineOperand &MO = MI->getOperand(i);
1033 unsigned Reg = MO.getReg();
1036 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1039 // Only allow one def, and that in the first operand.
1040 if (MO.isDef() != (i == 0))
1043 // Only allow constant-valued registers.
1044 bool IsLiveIn = mri_->isLiveIn(Reg);
1045 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1046 E = mri_->def_end();
1048 // For the def, it should be the only def of that register.
1049 if (MO.isDef() && (next(I) != E || IsLiveIn))
1053 // Only allow one use other register use, as that's all the
1054 // remat mechanisms support currently.
1055 if (Reg != li.reg) {
1058 else if (Reg != ImpUse)
1061 // For the use, there should be only one associated def.
1062 if (I != E && (next(I) != E || IsLiveIn))
1069 unsigned ImpUse = getReMatImplicitUse(li, MI);
1071 const LiveInterval &ImpLi = getInterval(ImpUse);
1072 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1073 re = mri_->use_end(); ri != re; ++ri) {
1074 MachineInstr *UseMI = &*ri;
1075 unsigned UseIdx = getInstructionIndex(UseMI);
1076 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1078 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1082 // If a register operand of the re-materialized instruction is going to
1083 // be spilled next, then it's not legal to re-materialize this instruction.
1084 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1085 if (ImpUse == SpillIs[i]->reg)
1091 /// isReMaterializable - Returns true if the definition MI of the specified
1092 /// val# of the specified interval is re-materializable.
1093 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1094 const VNInfo *ValNo, MachineInstr *MI) {
1095 SmallVector<LiveInterval*, 4> Dummy1;
1097 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1100 /// isReMaterializable - Returns true if every definition of MI of every
1101 /// val# of the specified interval is re-materializable.
1102 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1103 SmallVectorImpl<LiveInterval*> &SpillIs,
1106 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1108 const VNInfo *VNI = *i;
1109 if (VNI->isUnused())
1110 continue; // Dead val#.
1111 // Is the def for the val# rematerializable?
1112 if (!VNI->isDefAccurate())
1114 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1115 bool DefIsLoad = false;
1117 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1119 isLoad |= DefIsLoad;
1124 /// FilterFoldedOps - Filter out two-address use operands. Return
1125 /// true if it finds any issue with the operands that ought to prevent
1127 static bool FilterFoldedOps(MachineInstr *MI,
1128 SmallVector<unsigned, 2> &Ops,
1130 SmallVector<unsigned, 2> &FoldOps) {
1132 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1133 unsigned OpIdx = Ops[i];
1134 MachineOperand &MO = MI->getOperand(OpIdx);
1135 // FIXME: fold subreg use.
1139 MRInfo |= (unsigned)VirtRegMap::isMod;
1141 // Filter out two-address use operand(s).
1142 if (MI->isRegTiedToDefOperand(OpIdx)) {
1143 MRInfo = VirtRegMap::isModRef;
1146 MRInfo |= (unsigned)VirtRegMap::isRef;
1148 FoldOps.push_back(OpIdx);
1154 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1155 /// slot / to reg or any rematerialized load into ith operand of specified
1156 /// MI. If it is successul, MI is updated with the newly created MI and
1158 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1159 VirtRegMap &vrm, MachineInstr *DefMI,
1161 SmallVector<unsigned, 2> &Ops,
1162 bool isSS, int Slot, unsigned Reg) {
1163 // If it is an implicit def instruction, just delete it.
1164 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1165 RemoveMachineInstrFromMaps(MI);
1166 vrm.RemoveMachineInstrFromMaps(MI);
1167 MI->eraseFromParent();
1172 // Filter the list of operand indexes that are to be folded. Abort if
1173 // any operand will prevent folding.
1174 unsigned MRInfo = 0;
1175 SmallVector<unsigned, 2> FoldOps;
1176 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1179 // The only time it's safe to fold into a two address instruction is when
1180 // it's folding reload and spill from / into a spill stack slot.
1181 if (DefMI && (MRInfo & VirtRegMap::isMod))
1184 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1185 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1187 // Remember this instruction uses the spill slot.
1188 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1190 // Attempt to fold the memory reference into the instruction. If
1191 // we can do this, we don't need to insert spill code.
1192 MachineBasicBlock &MBB = *MI->getParent();
1193 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1194 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1195 vrm.transferSpillPts(MI, fmi);
1196 vrm.transferRestorePts(MI, fmi);
1197 vrm.transferEmergencySpills(MI, fmi);
1199 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1200 mi2iMap_[fmi] = InstrIdx;
1201 MI = MBB.insert(MBB.erase(MI), fmi);
1208 /// canFoldMemoryOperand - Returns true if the specified load / store
1209 /// folding is possible.
1210 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1211 SmallVector<unsigned, 2> &Ops,
1213 // Filter the list of operand indexes that are to be folded. Abort if
1214 // any operand will prevent folding.
1215 unsigned MRInfo = 0;
1216 SmallVector<unsigned, 2> FoldOps;
1217 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1220 // It's only legal to remat for a use, not a def.
1221 if (ReMat && (MRInfo & VirtRegMap::isMod))
1224 return tii_->canFoldMemoryOperand(MI, FoldOps);
1227 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1228 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1229 for (LiveInterval::Ranges::const_iterator
1230 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1231 std::vector<IdxMBBPair>::const_iterator II =
1232 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1233 if (II == Idx2MBBMap.end())
1235 if (I->end > II->first) // crossing a MBB.
1237 MBBs.insert(II->second);
1238 if (MBBs.size() > 1)
1244 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1245 /// interval on to-be re-materialized operands of MI) with new register.
1246 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1247 MachineInstr *MI, unsigned NewVReg,
1249 // There is an implicit use. That means one of the other operand is
1250 // being remat'ed and the remat'ed instruction has li.reg as an
1251 // use operand. Make sure we rewrite that as well.
1252 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1253 MachineOperand &MO = MI->getOperand(i);
1256 unsigned Reg = MO.getReg();
1257 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1259 if (!vrm.isReMaterialized(Reg))
1261 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1262 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1264 UseMO->setReg(NewVReg);
1268 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1269 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1270 bool LiveIntervals::
1271 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1272 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1273 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1274 unsigned Slot, int LdSlot,
1275 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1277 const TargetRegisterClass* rc,
1278 SmallVector<int, 4> &ReMatIds,
1279 const MachineLoopInfo *loopInfo,
1280 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1281 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1282 std::vector<LiveInterval*> &NewLIs) {
1283 bool CanFold = false;
1285 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1286 MachineOperand& mop = MI->getOperand(i);
1289 unsigned Reg = mop.getReg();
1290 unsigned RegI = Reg;
1291 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1296 bool TryFold = !DefIsReMat;
1297 bool FoldSS = true; // Default behavior unless it's a remat.
1298 int FoldSlot = Slot;
1300 // If this is the rematerializable definition MI itself and
1301 // all of its uses are rematerialized, simply delete it.
1302 if (MI == ReMatOrigDefMI && CanDelete) {
1303 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1305 RemoveMachineInstrFromMaps(MI);
1306 vrm.RemoveMachineInstrFromMaps(MI);
1307 MI->eraseFromParent();
1311 // If def for this use can't be rematerialized, then try folding.
1312 // If def is rematerializable and it's a load, also try folding.
1313 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1315 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1321 // Scan all of the operands of this instruction rewriting operands
1322 // to use NewVReg instead of li.reg as appropriate. We do this for
1325 // 1. If the instr reads the same spilled vreg multiple times, we
1326 // want to reuse the NewVReg.
1327 // 2. If the instr is a two-addr instruction, we are required to
1328 // keep the src/dst regs pinned.
1330 // Keep track of whether we replace a use and/or def so that we can
1331 // create the spill interval with the appropriate range.
1333 HasUse = mop.isUse();
1334 HasDef = mop.isDef();
1335 SmallVector<unsigned, 2> Ops;
1337 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1338 const MachineOperand &MOj = MI->getOperand(j);
1341 unsigned RegJ = MOj.getReg();
1342 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1346 HasUse |= MOj.isUse();
1347 HasDef |= MOj.isDef();
1351 if (HasUse && !li.liveAt(getUseIndex(index)))
1352 // Must be defined by an implicit def. It should not be spilled. Note,
1353 // this is for correctness reason. e.g.
1354 // 8 %reg1024<def> = IMPLICIT_DEF
1355 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1356 // The live range [12, 14) are not part of the r1024 live interval since
1357 // it's defined by an implicit def. It will not conflicts with live
1358 // interval of r1025. Now suppose both registers are spilled, you can
1359 // easily see a situation where both registers are reloaded before
1360 // the INSERT_SUBREG and both target registers that would overlap.
1363 // Create a new virtual register for the spill interval.
1364 // Create the new register now so we can map the fold instruction
1365 // to the new register so when it is unfolded we get the correct
1367 bool CreatedNewVReg = false;
1369 NewVReg = mri_->createVirtualRegister(rc);
1371 CreatedNewVReg = true;
1377 // Do not fold load / store here if we are splitting. We'll find an
1378 // optimal point to insert a load / store later.
1380 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1381 Ops, FoldSS, FoldSlot, NewVReg)) {
1382 // Folding the load/store can completely change the instruction in
1383 // unpredictable ways, rescan it from the beginning.
1386 // We need to give the new vreg the same stack slot as the
1387 // spilled interval.
1388 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1394 if (isNotInMIMap(MI))
1396 goto RestartInstruction;
1399 // We'll try to fold it later if it's profitable.
1400 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1404 mop.setReg(NewVReg);
1405 if (mop.isImplicit())
1406 rewriteImplicitOps(li, MI, NewVReg, vrm);
1408 // Reuse NewVReg for other reads.
1409 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1410 MachineOperand &mopj = MI->getOperand(Ops[j]);
1411 mopj.setReg(NewVReg);
1412 if (mopj.isImplicit())
1413 rewriteImplicitOps(li, MI, NewVReg, vrm);
1416 if (CreatedNewVReg) {
1418 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1419 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1420 // Each valnum may have its own remat id.
1421 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1423 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1425 if (!CanDelete || (HasUse && HasDef)) {
1426 // If this is a two-addr instruction then its use operands are
1427 // rematerializable but its def is not. It should be assigned a
1429 vrm.assignVirt2StackSlot(NewVReg, Slot);
1432 vrm.assignVirt2StackSlot(NewVReg, Slot);
1434 } else if (HasUse && HasDef &&
1435 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1436 // If this interval hasn't been assigned a stack slot (because earlier
1437 // def is a deleted remat def), do it now.
1438 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1439 vrm.assignVirt2StackSlot(NewVReg, Slot);
1442 // Re-matting an instruction with virtual register use. Add the
1443 // register as an implicit use on the use MI.
1444 if (DefIsReMat && ImpUse)
1445 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1447 // Create a new register interval for this spill / remat.
1448 LiveInterval &nI = getOrCreateInterval(NewVReg);
1449 if (CreatedNewVReg) {
1450 NewLIs.push_back(&nI);
1451 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1453 vrm.setIsSplitFromReg(NewVReg, li.reg);
1457 if (CreatedNewVReg) {
1458 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1459 nI.getNextValue(0, 0, false, VNInfoAllocator));
1463 // Extend the split live interval to this def / use.
1464 unsigned End = getUseIndex(index)+1;
1465 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1466 nI.getValNumInfo(nI.getNumValNums()-1));
1472 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1473 nI.getNextValue(0, 0, false, VNInfoAllocator));
1478 DOUT << "\t\t\t\tAdded new interval: ";
1479 nI.print(DOUT, tri_);
1484 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1486 MachineBasicBlock *MBB, unsigned Idx) const {
1487 unsigned End = getMBBEndIdx(MBB);
1488 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1489 unsigned KillIdx = VNI->kills[j];
1490 if (KillIdx > Idx && KillIdx < End)
1496 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1497 /// during spilling.
1499 struct RewriteInfo {
1504 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1505 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1508 struct RewriteInfoCompare {
1509 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1510 return LHS.Index < RHS.Index;
1515 void LiveIntervals::
1516 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1517 LiveInterval::Ranges::const_iterator &I,
1518 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1519 unsigned Slot, int LdSlot,
1520 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1522 const TargetRegisterClass* rc,
1523 SmallVector<int, 4> &ReMatIds,
1524 const MachineLoopInfo *loopInfo,
1525 BitVector &SpillMBBs,
1526 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1527 BitVector &RestoreMBBs,
1528 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1529 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1530 std::vector<LiveInterval*> &NewLIs) {
1531 bool AllCanFold = true;
1532 unsigned NewVReg = 0;
1533 unsigned start = getBaseIndex(I->start);
1534 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1536 // First collect all the def / use in this live range that will be rewritten.
1537 // Make sure they are sorted according to instruction index.
1538 std::vector<RewriteInfo> RewriteMIs;
1539 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1540 re = mri_->reg_end(); ri != re; ) {
1541 MachineInstr *MI = &*ri;
1542 MachineOperand &O = ri.getOperand();
1544 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1545 unsigned index = getInstructionIndex(MI);
1546 if (index < start || index >= end)
1548 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1549 // Must be defined by an implicit def. It should not be spilled. Note,
1550 // this is for correctness reason. e.g.
1551 // 8 %reg1024<def> = IMPLICIT_DEF
1552 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1553 // The live range [12, 14) are not part of the r1024 live interval since
1554 // it's defined by an implicit def. It will not conflicts with live
1555 // interval of r1025. Now suppose both registers are spilled, you can
1556 // easily see a situation where both registers are reloaded before
1557 // the INSERT_SUBREG and both target registers that would overlap.
1559 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1561 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1563 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1564 // Now rewrite the defs and uses.
1565 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1566 RewriteInfo &rwi = RewriteMIs[i];
1568 unsigned index = rwi.Index;
1569 bool MIHasUse = rwi.HasUse;
1570 bool MIHasDef = rwi.HasDef;
1571 MachineInstr *MI = rwi.MI;
1572 // If MI def and/or use the same register multiple times, then there
1573 // are multiple entries.
1574 unsigned NumUses = MIHasUse;
1575 while (i != e && RewriteMIs[i].MI == MI) {
1576 assert(RewriteMIs[i].Index == index);
1577 bool isUse = RewriteMIs[i].HasUse;
1578 if (isUse) ++NumUses;
1580 MIHasDef |= RewriteMIs[i].HasDef;
1583 MachineBasicBlock *MBB = MI->getParent();
1585 if (ImpUse && MI != ReMatDefMI) {
1586 // Re-matting an instruction with virtual register use. Update the
1587 // register interval's spill weight to HUGE_VALF to prevent it from
1589 LiveInterval &ImpLi = getInterval(ImpUse);
1590 ImpLi.weight = HUGE_VALF;
1593 unsigned MBBId = MBB->getNumber();
1594 unsigned ThisVReg = 0;
1596 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1597 if (NVI != MBBVRegsMap.end()) {
1598 ThisVReg = NVI->second;
1605 // It's better to start a new interval to avoid artifically
1606 // extend the new interval.
1607 if (MIHasDef && !MIHasUse) {
1608 MBBVRegsMap.erase(MBB->getNumber());
1614 bool IsNew = ThisVReg == 0;
1616 // This ends the previous live interval. If all of its def / use
1617 // can be folded, give it a low spill weight.
1618 if (NewVReg && TrySplit && AllCanFold) {
1619 LiveInterval &nI = getOrCreateInterval(NewVReg);
1626 bool HasDef = false;
1627 bool HasUse = false;
1628 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1629 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1630 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1631 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1632 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1633 if (!HasDef && !HasUse)
1636 AllCanFold &= CanFold;
1638 // Update weight of spill interval.
1639 LiveInterval &nI = getOrCreateInterval(NewVReg);
1641 // The spill weight is now infinity as it cannot be spilled again.
1642 nI.weight = HUGE_VALF;
1646 // Keep track of the last def and first use in each MBB.
1648 if (MI != ReMatOrigDefMI || !CanDelete) {
1649 bool HasKill = false;
1651 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1653 // If this is a two-address code, then this index starts a new VNInfo.
1654 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1656 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1658 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1659 SpillIdxes.find(MBBId);
1661 if (SII == SpillIdxes.end()) {
1662 std::vector<SRInfo> S;
1663 S.push_back(SRInfo(index, NewVReg, true));
1664 SpillIdxes.insert(std::make_pair(MBBId, S));
1665 } else if (SII->second.back().vreg != NewVReg) {
1666 SII->second.push_back(SRInfo(index, NewVReg, true));
1667 } else if ((int)index > SII->second.back().index) {
1668 // If there is an earlier def and this is a two-address
1669 // instruction, then it's not possible to fold the store (which
1670 // would also fold the load).
1671 SRInfo &Info = SII->second.back();
1673 Info.canFold = !HasUse;
1675 SpillMBBs.set(MBBId);
1676 } else if (SII != SpillIdxes.end() &&
1677 SII->second.back().vreg == NewVReg &&
1678 (int)index > SII->second.back().index) {
1679 // There is an earlier def that's not killed (must be two-address).
1680 // The spill is no longer needed.
1681 SII->second.pop_back();
1682 if (SII->second.empty()) {
1683 SpillIdxes.erase(MBBId);
1684 SpillMBBs.reset(MBBId);
1691 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1692 SpillIdxes.find(MBBId);
1693 if (SII != SpillIdxes.end() &&
1694 SII->second.back().vreg == NewVReg &&
1695 (int)index > SII->second.back().index)
1696 // Use(s) following the last def, it's not safe to fold the spill.
1697 SII->second.back().canFold = false;
1698 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1699 RestoreIdxes.find(MBBId);
1700 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1701 // If we are splitting live intervals, only fold if it's the first
1702 // use and there isn't another use later in the MBB.
1703 RII->second.back().canFold = false;
1705 // Only need a reload if there isn't an earlier def / use.
1706 if (RII == RestoreIdxes.end()) {
1707 std::vector<SRInfo> Infos;
1708 Infos.push_back(SRInfo(index, NewVReg, true));
1709 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1711 RII->second.push_back(SRInfo(index, NewVReg, true));
1713 RestoreMBBs.set(MBBId);
1717 // Update spill weight.
1718 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1719 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1722 if (NewVReg && TrySplit && AllCanFold) {
1723 // If all of its def / use can be folded, give it a low spill weight.
1724 LiveInterval &nI = getOrCreateInterval(NewVReg);
1729 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1730 BitVector &RestoreMBBs,
1731 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1732 if (!RestoreMBBs[Id])
1734 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1735 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1736 if (Restores[i].index == index &&
1737 Restores[i].vreg == vr &&
1738 Restores[i].canFold)
1743 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1744 BitVector &RestoreMBBs,
1745 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1746 if (!RestoreMBBs[Id])
1748 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1749 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1750 if (Restores[i].index == index && Restores[i].vreg)
1751 Restores[i].index = -1;
1754 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1755 /// spilled and create empty intervals for their uses.
1757 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1758 const TargetRegisterClass* rc,
1759 std::vector<LiveInterval*> &NewLIs) {
1760 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1761 re = mri_->reg_end(); ri != re; ) {
1762 MachineOperand &O = ri.getOperand();
1763 MachineInstr *MI = &*ri;
1766 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1767 "Register def was not rewritten?");
1768 RemoveMachineInstrFromMaps(MI);
1769 vrm.RemoveMachineInstrFromMaps(MI);
1770 MI->eraseFromParent();
1772 // This must be an use of an implicit_def so it's not part of the live
1773 // interval. Create a new empty live interval for it.
1774 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1775 unsigned NewVReg = mri_->createVirtualRegister(rc);
1777 vrm.setIsImplicitlyDefined(NewVReg);
1778 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1779 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1780 MachineOperand &MO = MI->getOperand(i);
1781 if (MO.isReg() && MO.getReg() == li.reg)
1788 std::vector<LiveInterval*> LiveIntervals::
1789 addIntervalsForSpillsFast(const LiveInterval &li,
1790 const MachineLoopInfo *loopInfo,
1792 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1794 std::vector<LiveInterval*> added;
1796 assert(li.weight != HUGE_VALF &&
1797 "attempt to spill already spilled interval!");
1799 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1803 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1805 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1806 while (RI != mri_->reg_end()) {
1807 MachineInstr* MI = &*RI;
1809 SmallVector<unsigned, 2> Indices;
1810 bool HasUse = false;
1811 bool HasDef = false;
1813 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1814 MachineOperand& mop = MI->getOperand(i);
1815 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1817 HasUse |= MI->getOperand(i).isUse();
1818 HasDef |= MI->getOperand(i).isDef();
1820 Indices.push_back(i);
1823 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1824 Indices, true, slot, li.reg)) {
1825 unsigned NewVReg = mri_->createVirtualRegister(rc);
1827 vrm.assignVirt2StackSlot(NewVReg, slot);
1829 // create a new register for this spill
1830 LiveInterval &nI = getOrCreateInterval(NewVReg);
1832 // the spill weight is now infinity as it
1833 // cannot be spilled again
1834 nI.weight = HUGE_VALF;
1836 // Rewrite register operands to use the new vreg.
1837 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1838 E = Indices.end(); I != E; ++I) {
1839 MI->getOperand(*I).setReg(NewVReg);
1841 if (MI->getOperand(*I).isUse())
1842 MI->getOperand(*I).setIsKill(true);
1845 // Fill in the new live interval.
1846 unsigned index = getInstructionIndex(MI);
1848 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1849 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1852 vrm.addRestorePoint(NewVReg, MI);
1855 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1856 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1859 vrm.addSpillPoint(NewVReg, true, MI);
1862 added.push_back(&nI);
1864 DOUT << "\t\t\t\tadded new interval: ";
1870 RI = mri_->reg_begin(li.reg);
1876 std::vector<LiveInterval*> LiveIntervals::
1877 addIntervalsForSpills(const LiveInterval &li,
1878 SmallVectorImpl<LiveInterval*> &SpillIs,
1879 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1881 if (EnableFastSpilling)
1882 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1884 assert(li.weight != HUGE_VALF &&
1885 "attempt to spill already spilled interval!");
1887 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1888 li.print(DOUT, tri_);
1891 // Each bit specify whether a spill is required in the MBB.
1892 BitVector SpillMBBs(mf_->getNumBlockIDs());
1893 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1894 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1895 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1896 DenseMap<unsigned,unsigned> MBBVRegsMap;
1897 std::vector<LiveInterval*> NewLIs;
1898 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1900 unsigned NumValNums = li.getNumValNums();
1901 SmallVector<MachineInstr*, 4> ReMatDefs;
1902 ReMatDefs.resize(NumValNums, NULL);
1903 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1904 ReMatOrigDefs.resize(NumValNums, NULL);
1905 SmallVector<int, 4> ReMatIds;
1906 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1907 BitVector ReMatDelete(NumValNums);
1908 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1910 // Spilling a split live interval. It cannot be split any further. Also,
1911 // it's also guaranteed to be a single val# / range interval.
1912 if (vrm.getPreSplitReg(li.reg)) {
1913 vrm.setIsSplitFromReg(li.reg, 0);
1914 // Unset the split kill marker on the last use.
1915 unsigned KillIdx = vrm.getKillPoint(li.reg);
1917 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1918 assert(KillMI && "Last use disappeared?");
1919 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1920 assert(KillOp != -1 && "Last use disappeared?");
1921 KillMI->getOperand(KillOp).setIsKill(false);
1923 vrm.removeKillPoint(li.reg);
1924 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1925 Slot = vrm.getStackSlot(li.reg);
1926 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1927 MachineInstr *ReMatDefMI = DefIsReMat ?
1928 vrm.getReMaterializedMI(li.reg) : NULL;
1930 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1931 bool isLoad = isLoadSS ||
1932 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1933 bool IsFirstRange = true;
1934 for (LiveInterval::Ranges::const_iterator
1935 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1936 // If this is a split live interval with multiple ranges, it means there
1937 // are two-address instructions that re-defined the value. Only the
1938 // first def can be rematerialized!
1940 // Note ReMatOrigDefMI has already been deleted.
1941 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1942 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1943 false, vrm, rc, ReMatIds, loopInfo,
1944 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1945 MBBVRegsMap, NewLIs);
1947 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1948 Slot, 0, false, false, false,
1949 false, vrm, rc, ReMatIds, loopInfo,
1950 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1951 MBBVRegsMap, NewLIs);
1953 IsFirstRange = false;
1956 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1960 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1961 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1965 bool NeedStackSlot = false;
1966 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1968 const VNInfo *VNI = *i;
1969 unsigned VN = VNI->id;
1970 if (VNI->isUnused())
1971 continue; // Dead val#.
1972 // Is the def for the val# rematerializable?
1973 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1974 ? getInstructionFromIndex(VNI->def) : 0;
1976 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1977 // Remember how to remat the def of this val#.
1978 ReMatOrigDefs[VN] = ReMatDefMI;
1979 // Original def may be modified so we have to make a copy here.
1980 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1981 ClonedMIs.push_back(Clone);
1982 ReMatDefs[VN] = Clone;
1984 bool CanDelete = true;
1985 if (VNI->hasPHIKill()) {
1986 // A kill is a phi node, not all of its uses can be rematerialized.
1987 // It must not be deleted.
1989 // Need a stack slot if there is any live range where uses cannot be
1991 NeedStackSlot = true;
1994 ReMatDelete.set(VN);
1996 // Need a stack slot if there is any live range where uses cannot be
1998 NeedStackSlot = true;
2002 // One stack slot per live interval.
2003 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2004 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2005 Slot = vrm.assignVirt2StackSlot(li.reg);
2007 // This case only occurs when the prealloc splitter has already assigned
2008 // a stack slot to this vreg.
2010 Slot = vrm.getStackSlot(li.reg);
2013 // Create new intervals and rewrite defs and uses.
2014 for (LiveInterval::Ranges::const_iterator
2015 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2016 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2017 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2018 bool DefIsReMat = ReMatDefMI != NULL;
2019 bool CanDelete = ReMatDelete[I->valno->id];
2021 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2022 bool isLoad = isLoadSS ||
2023 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2024 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2025 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2026 CanDelete, vrm, rc, ReMatIds, loopInfo,
2027 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2028 MBBVRegsMap, NewLIs);
2031 // Insert spills / restores if we are splitting.
2033 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2037 SmallPtrSet<LiveInterval*, 4> AddedKill;
2038 SmallVector<unsigned, 2> Ops;
2039 if (NeedStackSlot) {
2040 int Id = SpillMBBs.find_first();
2042 std::vector<SRInfo> &spills = SpillIdxes[Id];
2043 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2044 int index = spills[i].index;
2045 unsigned VReg = spills[i].vreg;
2046 LiveInterval &nI = getOrCreateInterval(VReg);
2047 bool isReMat = vrm.isReMaterialized(VReg);
2048 MachineInstr *MI = getInstructionFromIndex(index);
2049 bool CanFold = false;
2050 bool FoundUse = false;
2052 if (spills[i].canFold) {
2054 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2055 MachineOperand &MO = MI->getOperand(j);
2056 if (!MO.isReg() || MO.getReg() != VReg)
2063 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2064 RestoreMBBs, RestoreIdxes))) {
2065 // MI has two-address uses of the same register. If the use
2066 // isn't the first and only use in the BB, then we can't fold
2067 // it. FIXME: Move this to rewriteInstructionsForSpills.
2074 // Fold the store into the def if possible.
2075 bool Folded = false;
2076 if (CanFold && !Ops.empty()) {
2077 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2080 // Also folded uses, do not issue a load.
2081 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2082 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2084 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2088 // Otherwise tell the spiller to issue a spill.
2090 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2091 bool isKill = LR->end == getStoreIndex(index);
2092 if (!MI->registerDefIsDead(nI.reg))
2093 // No need to spill a dead def.
2094 vrm.addSpillPoint(VReg, isKill, MI);
2096 AddedKill.insert(&nI);
2099 Id = SpillMBBs.find_next(Id);
2103 int Id = RestoreMBBs.find_first();
2105 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2106 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2107 int index = restores[i].index;
2110 unsigned VReg = restores[i].vreg;
2111 LiveInterval &nI = getOrCreateInterval(VReg);
2112 bool isReMat = vrm.isReMaterialized(VReg);
2113 MachineInstr *MI = getInstructionFromIndex(index);
2114 bool CanFold = false;
2116 if (restores[i].canFold) {
2118 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2119 MachineOperand &MO = MI->getOperand(j);
2120 if (!MO.isReg() || MO.getReg() != VReg)
2124 // If this restore were to be folded, it would have been folded
2133 // Fold the load into the use if possible.
2134 bool Folded = false;
2135 if (CanFold && !Ops.empty()) {
2137 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2139 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2141 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2142 // If the rematerializable def is a load, also try to fold it.
2143 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2144 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2145 Ops, isLoadSS, LdSlot, VReg);
2147 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2149 // Re-matting an instruction with virtual register use. Add the
2150 // register as an implicit use on the use MI and update the register
2151 // interval's spill weight to HUGE_VALF to prevent it from being
2153 LiveInterval &ImpLi = getInterval(ImpUse);
2154 ImpLi.weight = HUGE_VALF;
2155 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2160 // If folding is not possible / failed, then tell the spiller to issue a
2161 // load / rematerialization for us.
2163 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2165 vrm.addRestorePoint(VReg, MI);
2167 Id = RestoreMBBs.find_next(Id);
2170 // Finalize intervals: add kills, finalize spill weights, and filter out
2172 std::vector<LiveInterval*> RetNewLIs;
2173 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2174 LiveInterval *LI = NewLIs[i];
2176 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2177 if (!AddedKill.count(LI)) {
2178 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2179 unsigned LastUseIdx = getBaseIndex(LR->end);
2180 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2181 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2182 assert(UseIdx != -1);
2183 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2184 LastUse->getOperand(UseIdx).setIsKill();
2185 vrm.addKillPoint(LI->reg, LastUseIdx);
2188 RetNewLIs.push_back(LI);
2192 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2196 /// hasAllocatableSuperReg - Return true if the specified physical register has
2197 /// any super register that's allocatable.
2198 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2199 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2200 if (allocatableRegs_[*AS] && hasInterval(*AS))
2205 /// getRepresentativeReg - Find the largest super register of the specified
2206 /// physical register.
2207 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2208 // Find the largest super-register that is allocatable.
2209 unsigned BestReg = Reg;
2210 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2211 unsigned SuperReg = *AS;
2212 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2220 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2221 /// specified interval that conflicts with the specified physical register.
2222 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2223 unsigned PhysReg) const {
2224 unsigned NumConflicts = 0;
2225 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2226 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2227 E = mri_->reg_end(); I != E; ++I) {
2228 MachineOperand &O = I.getOperand();
2229 MachineInstr *MI = O.getParent();
2230 unsigned Index = getInstructionIndex(MI);
2231 if (pli.liveAt(Index))
2234 return NumConflicts;
2237 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2238 /// around all defs and uses of the specified interval. Return true if it
2239 /// was able to cut its interval.
2240 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2241 unsigned PhysReg, VirtRegMap &vrm) {
2242 unsigned SpillReg = getRepresentativeReg(PhysReg);
2244 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2245 // If there are registers which alias PhysReg, but which are not a
2246 // sub-register of the chosen representative super register. Assert
2247 // since we can't handle it yet.
2248 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2249 tri_->isSuperRegister(*AS, SpillReg));
2252 LiveInterval &pli = getInterval(SpillReg);
2253 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2254 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2255 E = mri_->reg_end(); I != E; ++I) {
2256 MachineOperand &O = I.getOperand();
2257 MachineInstr *MI = O.getParent();
2258 if (SeenMIs.count(MI))
2261 unsigned Index = getInstructionIndex(MI);
2262 if (pli.liveAt(Index)) {
2263 vrm.addEmergencySpill(SpillReg, MI);
2264 unsigned StartIdx = getLoadIndex(Index);
2265 unsigned EndIdx = getStoreIndex(Index)+1;
2266 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2267 pli.removeRange(StartIdx, EndIdx);
2270 cerr << "Ran out of registers during register allocation!\n";
2271 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2272 cerr << "Please check your inline asm statement for invalid "
2273 << "constraints:\n";
2274 MI->print(cerr.stream(), tm_);
2278 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2279 if (!hasInterval(*AS))
2281 LiveInterval &spli = getInterval(*AS);
2282 if (spli.liveAt(Index))
2283 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2290 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2291 MachineInstr* startInst) {
2292 LiveInterval& Interval = getOrCreateInterval(reg);
2293 VNInfo* VN = Interval.getNextValue(
2294 getInstructionIndex(startInst) + InstrSlots::DEF,
2295 startInst, true, getVNInfoAllocator());
2296 VN->setHasPHIKill(true);
2297 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2298 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2299 getMBBEndIdx(startInst->getParent()) + 1, VN);
2300 Interval.addRange(LR);