1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
43 // Hidden options for help debugging.
44 static cl::opt<bool> DisableReMat("disable-rematerialization",
45 cl::init(false), cl::Hidden);
47 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
48 cl::init(true), cl::Hidden);
49 static cl::opt<int> SplitLimit("split-limit",
50 cl::init(-1), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 STATISTIC(numIntervals, "Number of original intervals");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
130 i2miMap_.push_back(I);
131 MIIndex += InstrSlots::NUM;
134 // Insert max(1, numdefs) empty slots after every instruction.
135 unsigned Slots = I->getDesc().getNumDefs();
138 MIIndex += InstrSlots::NUM * Slots;
140 i2miMap_.push_back(0);
143 // Set the MBB2IdxMap entry for this MBB.
144 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
145 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
147 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
149 if (!OldI2MI.empty())
150 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
151 for (LiveInterval::iterator LI = OI->second->begin(),
152 LE = OI->second->end(); LI != LE; ++LI) {
154 // Remap the start index of the live range to the corresponding new
155 // number, or our best guess at what it _should_ correspond to if the
156 // original instruction has been erased. This is either the following
157 // instruction or its predecessor.
158 unsigned index = LI->start / InstrSlots::NUM;
159 unsigned offset = LI->start % InstrSlots::NUM;
160 if (offset == InstrSlots::LOAD) {
161 std::vector<IdxMBBPair>::const_iterator I =
162 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
163 // Take the pair containing the index
164 std::vector<IdxMBBPair>::const_iterator J =
165 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
167 LI->start = getMBBStartIdx(J->second);
169 LI->start = mi2iMap_[OldI2MI[index]] + offset;
172 // Remap the ending index in the same way that we remapped the start,
173 // except for the final step where we always map to the immediately
174 // following instruction.
175 index = (LI->end - 1) / InstrSlots::NUM;
176 offset = LI->end % InstrSlots::NUM;
177 if (offset == InstrSlots::LOAD) {
178 // VReg dies at end of block.
179 std::vector<IdxMBBPair>::const_iterator I =
180 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
183 LI->end = getMBBEndIdx(I->second) + 1;
185 unsigned idx = index;
186 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
188 if (index != OldI2MI.size())
189 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
191 LI->end = InstrSlots::NUM * i2miMap_.size();
195 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
196 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
199 // Remap the VNInfo def index, which works the same as the
200 // start indices above. VN's with special sentinel defs
201 // don't need to be remapped.
202 if (vni->def != ~0U && vni->def != ~1U) {
203 unsigned index = vni->def / InstrSlots::NUM;
204 unsigned offset = vni->def % InstrSlots::NUM;
205 if (offset == InstrSlots::LOAD) {
206 std::vector<IdxMBBPair>::const_iterator I =
207 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
208 // Take the pair containing the index
209 std::vector<IdxMBBPair>::const_iterator J =
210 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
212 vni->def = getMBBStartIdx(J->second);
214 vni->def = mi2iMap_[OldI2MI[index]] + offset;
218 // Remap the VNInfo kill indices, which works the same as
219 // the end indices above.
220 for (size_t i = 0; i < vni->kills.size(); ++i) {
221 // PHI kills don't need to be remapped.
222 if (!vni->kills[i]) continue;
224 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
225 unsigned offset = vni->kills[i] % InstrSlots::NUM;
226 if (offset == InstrSlots::LOAD) {
227 std::vector<IdxMBBPair>::const_iterator I =
228 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
231 vni->kills[i] = getMBBEndIdx(I->second);
233 unsigned idx = index;
234 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
236 if (index != OldI2MI.size())
237 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
238 (idx == index ? offset : 0);
240 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
247 void LiveIntervals::scaleNumbering(int factor) {
249 // * scale MBB begin and end points
250 // * scale all ranges.
251 // * Update VNI structures.
252 // * Scale instruction numberings
254 // Scale the MBB indices.
256 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
257 MBB != MBBE; ++MBB) {
258 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
259 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
260 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
261 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
263 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
265 // Scale the intervals.
266 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
267 LI->second->scaleNumbering(factor);
270 // Scale MachineInstrs.
271 Mi2IndexMap oldmi2iMap = mi2iMap_;
272 unsigned highestSlot = 0;
273 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
275 unsigned newSlot = InstrSlots::scale(MI->second, factor);
276 mi2iMap_[MI->first] = newSlot;
277 highestSlot = std::max(highestSlot, newSlot);
281 i2miMap_.resize(highestSlot + 1);
282 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
284 i2miMap_[MI->second] = MI->first;
290 /// runOnMachineFunction - Register allocate the whole function
292 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
294 mri_ = &mf_->getRegInfo();
295 tm_ = &fn.getTarget();
296 tri_ = tm_->getRegisterInfo();
297 tii_ = tm_->getInstrInfo();
298 aa_ = &getAnalysis<AliasAnalysis>();
299 lv_ = &getAnalysis<LiveVariables>();
300 allocatableRegs_ = tri_->getAllocatableSet(fn);
305 numIntervals += getNumIntervals();
311 /// print - Implement the dump method.
312 void LiveIntervals::print(std::ostream &O, const Module* ) const {
313 O << "********** INTERVALS **********\n";
314 for (const_iterator I = begin(), E = end(); I != E; ++I) {
315 I->second->print(O, tri_);
319 O << "********** MACHINEINSTRS **********\n";
320 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
321 mbbi != mbbe; ++mbbi) {
322 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
323 for (MachineBasicBlock::iterator mii = mbbi->begin(),
324 mie = mbbi->end(); mii != mie; ++mii) {
325 O << getInstructionIndex(mii) << '\t' << *mii;
330 /// conflictsWithPhysRegDef - Returns true if the specified register
331 /// is defined during the duration of the specified interval.
332 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
333 VirtRegMap &vrm, unsigned reg) {
334 for (LiveInterval::Ranges::const_iterator
335 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
336 for (unsigned index = getBaseIndex(I->start),
337 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
338 index += InstrSlots::NUM) {
339 // skip deleted instructions
340 while (index != end && !getInstructionFromIndex(index))
341 index += InstrSlots::NUM;
342 if (index == end) break;
344 MachineInstr *MI = getInstructionFromIndex(index);
345 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
346 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
347 if (SrcReg == li.reg || DstReg == li.reg)
349 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
350 MachineOperand& mop = MI->getOperand(i);
353 unsigned PhysReg = mop.getReg();
354 if (PhysReg == 0 || PhysReg == li.reg)
356 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
357 if (!vrm.hasPhys(PhysReg))
359 PhysReg = vrm.getPhys(PhysReg);
361 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
370 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
371 /// it can check use as well.
372 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
373 unsigned Reg, bool CheckUse,
374 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
375 for (LiveInterval::Ranges::const_iterator
376 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
377 for (unsigned index = getBaseIndex(I->start),
378 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
379 index += InstrSlots::NUM) {
380 // Skip deleted instructions.
381 MachineInstr *MI = 0;
382 while (index != end) {
383 MI = getInstructionFromIndex(index);
386 index += InstrSlots::NUM;
388 if (index == end) break;
390 if (JoinedCopies.count(MI))
392 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
393 MachineOperand& MO = MI->getOperand(i);
396 if (MO.isUse() && !CheckUse)
398 unsigned PhysReg = MO.getReg();
399 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
401 if (tri_->isSubRegister(Reg, PhysReg))
411 void LiveIntervals::printRegName(unsigned reg) const {
412 if (TargetRegisterInfo::isPhysicalRegister(reg))
413 cerr << tri_->getName(reg);
415 cerr << "%reg" << reg;
418 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
419 MachineBasicBlock::iterator mi,
420 unsigned MIIdx, MachineOperand& MO,
422 LiveInterval &interval) {
423 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
424 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
426 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
427 DOUT << "is a implicit_def\n";
431 // Virtual registers may be defined multiple times (due to phi
432 // elimination and 2-addr elimination). Much of what we do only has to be
433 // done once for the vreg. We use an empty interval to detect the first
434 // time we see a vreg.
435 if (interval.empty()) {
436 // Get the Idx of the defining instructions.
437 unsigned defIndex = getDefIndex(MIIdx);
438 // Earlyclobbers move back one.
439 if (MO.isEarlyClobber())
440 defIndex = getUseIndex(MIIdx);
442 MachineInstr *CopyMI = NULL;
443 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
444 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
445 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
446 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
447 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
449 // Earlyclobbers move back one.
450 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
452 assert(ValNo->id == 0 && "First value in interval is not 0?");
454 // Loop over all of the blocks that the vreg is defined in. There are
455 // two cases we have to handle here. The most common case is a vreg
456 // whose lifetime is contained within a basic block. In this case there
457 // will be a single kill, in MBB, which comes after the definition.
458 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
459 // FIXME: what about dead vars?
461 if (vi.Kills[0] != mi)
462 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
464 killIdx = defIndex+1;
466 // If the kill happens after the definition, we have an intra-block
468 if (killIdx > defIndex) {
469 assert(vi.AliveBlocks.empty() &&
470 "Shouldn't be alive across any blocks!");
471 LiveRange LR(defIndex, killIdx, ValNo);
472 interval.addRange(LR);
473 DOUT << " +" << LR << "\n";
474 interval.addKill(ValNo, killIdx);
479 // The other case we handle is when a virtual register lives to the end
480 // of the defining block, potentially live across some blocks, then is
481 // live into some number of blocks, but gets killed. Start by adding a
482 // range that goes from this definition to the end of the defining block.
483 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
484 DOUT << " +" << NewLR;
485 interval.addRange(NewLR);
487 // Iterate over all of the blocks that the variable is completely
488 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
490 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
491 E = vi.AliveBlocks.end(); I != E; ++I) {
492 LiveRange LR(getMBBStartIdx(*I),
493 getMBBEndIdx(*I)+1, // MBB ends at -1.
495 interval.addRange(LR);
499 // Finally, this virtual register is live from the start of any killing
500 // block to the 'use' slot of the killing instruction.
501 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
502 MachineInstr *Kill = vi.Kills[i];
503 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
504 LiveRange LR(getMBBStartIdx(Kill->getParent()),
506 interval.addRange(LR);
507 interval.addKill(ValNo, killIdx);
512 // If this is the second time we see a virtual register definition, it
513 // must be due to phi elimination or two addr elimination. If this is
514 // the result of two address elimination, then the vreg is one of the
515 // def-and-use register operand.
516 if (mi->isRegTiedToUseOperand(MOIdx)) {
517 // If this is a two-address definition, then we have already processed
518 // the live range. The only problem is that we didn't realize there
519 // are actually two values in the live interval. Because of this we
520 // need to take the LiveRegion that defines this register and split it
522 assert(interval.containsOneValue());
523 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
524 unsigned RedefIndex = getDefIndex(MIIdx);
525 if (MO.isEarlyClobber())
526 RedefIndex = getUseIndex(MIIdx);
528 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
529 VNInfo *OldValNo = OldLR->valno;
531 // Delete the initial value, which should be short and continuous,
532 // because the 2-addr copy must be in the same MBB as the redef.
533 interval.removeRange(DefIndex, RedefIndex);
535 // Two-address vregs should always only be redefined once. This means
536 // that at this point, there should be exactly one value number in it.
537 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
539 // The new value number (#1) is defined by the instruction we claimed
541 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
544 // Value#0 is now defined by the 2-addr instruction.
545 OldValNo->def = RedefIndex;
547 if (MO.isEarlyClobber())
548 OldValNo->redefByEC = true;
550 // Add the new live interval which replaces the range for the input copy.
551 LiveRange LR(DefIndex, RedefIndex, ValNo);
552 DOUT << " replace range with " << LR;
553 interval.addRange(LR);
554 interval.addKill(ValNo, RedefIndex);
556 // If this redefinition is dead, we need to add a dummy unit live
557 // range covering the def slot.
559 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
562 interval.print(DOUT, tri_);
565 // Otherwise, this must be because of phi elimination. If this is the
566 // first redefinition of the vreg that we have seen, go back and change
567 // the live range in the PHI block to be a different value number.
568 if (interval.containsOneValue()) {
569 assert(vi.Kills.size() == 1 &&
570 "PHI elimination vreg should have one kill, the PHI itself!");
572 // Remove the old range that we now know has an incorrect number.
573 VNInfo *VNI = interval.getValNumInfo(0);
574 MachineInstr *Killer = vi.Kills[0];
575 unsigned Start = getMBBStartIdx(Killer->getParent());
576 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
577 DOUT << " Removing [" << Start << "," << End << "] from: ";
578 interval.print(DOUT, tri_); DOUT << "\n";
579 interval.removeRange(Start, End);
580 VNI->hasPHIKill = true;
581 DOUT << " RESULT: "; interval.print(DOUT, tri_);
583 // Replace the interval with one of a NEW value number. Note that this
584 // value number isn't actually defined by an instruction, weird huh? :)
585 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
586 DOUT << " replace range with " << LR;
587 interval.addRange(LR);
588 interval.addKill(LR.valno, End);
589 DOUT << " RESULT: "; interval.print(DOUT, tri_);
592 // In the case of PHI elimination, each variable definition is only
593 // live until the end of the block. We've already taken care of the
594 // rest of the live range.
595 unsigned defIndex = getDefIndex(MIIdx);
596 if (MO.isEarlyClobber())
597 defIndex = getUseIndex(MIIdx);
600 MachineInstr *CopyMI = NULL;
601 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
602 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
603 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
604 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
605 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
607 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
609 unsigned killIndex = getMBBEndIdx(mbb) + 1;
610 LiveRange LR(defIndex, killIndex, ValNo);
611 interval.addRange(LR);
612 interval.addKill(ValNo, killIndex);
613 ValNo->hasPHIKill = true;
621 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
622 MachineBasicBlock::iterator mi,
625 LiveInterval &interval,
626 MachineInstr *CopyMI) {
627 // A physical register cannot be live across basic block, so its
628 // lifetime must end somewhere in its defining basic block.
629 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
631 unsigned baseIndex = MIIdx;
632 unsigned start = getDefIndex(baseIndex);
633 // Earlyclobbers move back one.
634 if (MO.isEarlyClobber())
635 start = getUseIndex(MIIdx);
636 unsigned end = start;
638 // If it is not used after definition, it is considered dead at
639 // the instruction defining it. Hence its interval is:
640 // [defSlot(def), defSlot(def)+1)
647 // If it is not dead on definition, it must be killed by a
648 // subsequent instruction. Hence its interval is:
649 // [defSlot(def), useSlot(kill)+1)
650 baseIndex += InstrSlots::NUM;
651 while (++mi != MBB->end()) {
652 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
653 getInstructionFromIndex(baseIndex) == 0)
654 baseIndex += InstrSlots::NUM;
655 if (mi->killsRegister(interval.reg, tri_)) {
657 end = getUseIndex(baseIndex) + 1;
660 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
662 if (mi->isRegTiedToUseOperand(DefIdx)) {
663 // Two-address instruction.
664 end = getDefIndex(baseIndex);
665 if (mi->getOperand(DefIdx).isEarlyClobber())
666 end = getUseIndex(baseIndex);
668 // Another instruction redefines the register before it is ever read.
669 // Then the register is essentially dead at the instruction that defines
670 // it. Hence its interval is:
671 // [defSlot(def), defSlot(def)+1)
679 baseIndex += InstrSlots::NUM;
682 // The only case we should have a dead physreg here without a killing or
683 // instruction where we know it's dead is if it is live-in to the function
684 // and never used. Another possible case is the implicit use of the
685 // physical register has been deleted by two-address pass.
689 assert(start < end && "did not find end of interval?");
691 // Already exists? Extend old live interval.
692 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
693 bool Extend = OldLR != interval.end();
694 VNInfo *ValNo = Extend
695 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
696 if (MO.isEarlyClobber() && Extend)
697 ValNo->redefByEC = true;
698 LiveRange LR(start, end, ValNo);
699 interval.addRange(LR);
700 interval.addKill(LR.valno, end);
701 DOUT << " +" << LR << '\n';
704 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
705 MachineBasicBlock::iterator MI,
709 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
710 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
711 getOrCreateInterval(MO.getReg()));
712 else if (allocatableRegs_[MO.getReg()]) {
713 MachineInstr *CopyMI = NULL;
714 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
715 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
716 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
717 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
718 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
720 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
721 getOrCreateInterval(MO.getReg()), CopyMI);
722 // Def of a register also defines its sub-registers.
723 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
724 // If MI also modifies the sub-register explicitly, avoid processing it
725 // more than once. Do not pass in TRI here so it checks for exact match.
726 if (!MI->modifiesRegister(*AS))
727 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
728 getOrCreateInterval(*AS), 0);
732 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
734 LiveInterval &interval, bool isAlias) {
735 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
737 // Look for kills, if it reaches a def before it's killed, then it shouldn't
738 // be considered a livein.
739 MachineBasicBlock::iterator mi = MBB->begin();
740 unsigned baseIndex = MIIdx;
741 unsigned start = baseIndex;
742 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
743 getInstructionFromIndex(baseIndex) == 0)
744 baseIndex += InstrSlots::NUM;
745 unsigned end = baseIndex;
746 bool SeenDefUse = false;
748 while (mi != MBB->end()) {
749 if (mi->killsRegister(interval.reg, tri_)) {
751 end = getUseIndex(baseIndex) + 1;
754 } else if (mi->modifiesRegister(interval.reg, tri_)) {
755 // Another instruction redefines the register before it is ever read.
756 // Then the register is essentially dead at the instruction that defines
757 // it. Hence its interval is:
758 // [defSlot(def), defSlot(def)+1)
760 end = getDefIndex(start) + 1;
765 baseIndex += InstrSlots::NUM;
767 if (mi != MBB->end()) {
768 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
769 getInstructionFromIndex(baseIndex) == 0)
770 baseIndex += InstrSlots::NUM;
775 // Live-in register might not be used at all.
779 end = getDefIndex(MIIdx) + 1;
781 DOUT << " live through";
786 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
787 interval.addRange(LR);
788 interval.addKill(LR.valno, end);
789 DOUT << " +" << LR << '\n';
792 /// computeIntervals - computes the live intervals for virtual
793 /// registers. for some ordering of the machine instructions [1,N] a
794 /// live interval is an interval [i, j) where 1 <= i <= j < N for
795 /// which a variable is live
796 void LiveIntervals::computeIntervals() {
798 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
799 << "********** Function: "
800 << ((Value*)mf_->getFunction())->getName() << '\n';
802 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
804 MachineBasicBlock *MBB = MBBI;
805 // Track the index of the current machine instr.
806 unsigned MIIndex = getMBBStartIdx(MBB);
807 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
809 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
811 // Create intervals for live-ins to this BB first.
812 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
813 LE = MBB->livein_end(); LI != LE; ++LI) {
814 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
815 // Multiple live-ins can alias the same register.
816 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
817 if (!hasInterval(*AS))
818 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
822 // Skip over empty initial indices.
823 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
824 getInstructionFromIndex(MIIndex) == 0)
825 MIIndex += InstrSlots::NUM;
827 for (; MI != miEnd; ++MI) {
828 DOUT << MIIndex << "\t" << *MI;
831 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
832 MachineOperand &MO = MI->getOperand(i);
833 // handle register defs - build intervals
834 if (MO.isReg() && MO.getReg() && MO.isDef()) {
835 handleRegisterDef(MBB, MI, MIIndex, MO, i);
839 // Skip over the empty slots after each instruction.
840 unsigned Slots = MI->getDesc().getNumDefs();
843 MIIndex += InstrSlots::NUM * Slots;
845 // Skip over empty indices.
846 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
847 getInstructionFromIndex(MIIndex) == 0)
848 MIIndex += InstrSlots::NUM;
853 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
854 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
855 std::vector<IdxMBBPair>::const_iterator I =
856 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
859 while (I != Idx2MBBMap.end()) {
862 MBBs.push_back(I->second);
869 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
870 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
871 std::vector<IdxMBBPair>::const_iterator I =
872 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
875 while (I != Idx2MBBMap.end()) {
878 MachineBasicBlock *MBB = I->second;
879 if (getMBBEndIdx(MBB) > End)
881 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
882 SE = MBB->succ_end(); SI != SE; ++SI)
890 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
891 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
892 return new LiveInterval(reg, Weight);
895 /// dupInterval - Duplicate a live interval. The caller is responsible for
896 /// managing the allocated memory.
897 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
898 LiveInterval *NewLI = createInterval(li->reg);
899 NewLI->Copy(*li, getVNInfoAllocator());
903 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
904 /// copy field and returns the source register that defines it.
905 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
909 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
910 // If it's extracting out of a physical register, return the sub-register.
911 unsigned Reg = VNI->copy->getOperand(1).getReg();
912 if (TargetRegisterInfo::isPhysicalRegister(Reg))
913 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
915 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
916 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
917 return VNI->copy->getOperand(2).getReg();
919 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
920 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
922 assert(0 && "Unrecognized copy instruction!");
926 //===----------------------------------------------------------------------===//
927 // Register allocator hooks.
930 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
931 /// allow one) virtual register operand, then its uses are implicitly using
932 /// the register. Returns the virtual register.
933 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
934 MachineInstr *MI) const {
936 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
937 MachineOperand &MO = MI->getOperand(i);
938 if (!MO.isReg() || !MO.isUse())
940 unsigned Reg = MO.getReg();
941 if (Reg == 0 || Reg == li.reg)
943 // FIXME: For now, only remat MI with at most one register operand.
945 "Can't rematerialize instruction with multiple register operand!");
954 /// isValNoAvailableAt - Return true if the val# of the specified interval
955 /// which reaches the given instruction also reaches the specified use index.
956 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
957 unsigned UseIdx) const {
958 unsigned Index = getInstructionIndex(MI);
959 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
960 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
961 return UI != li.end() && UI->valno == ValNo;
964 /// isReMaterializable - Returns true if the definition MI of the specified
965 /// val# of the specified interval is re-materializable.
966 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
967 const VNInfo *ValNo, MachineInstr *MI,
968 SmallVectorImpl<LiveInterval*> &SpillIs,
973 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
977 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
978 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
979 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
980 // this but remember this is not safe to fold into a two-address
982 // This is a load from fixed stack slot. It can be rematerialized.
985 // If the target-specific rules don't identify an instruction as
986 // being trivially rematerializable, use some target-independent
988 if (!MI->getDesc().isRematerializable() ||
989 !tii_->isTriviallyReMaterializable(MI)) {
990 if (!EnableAggressiveRemat)
993 // If the instruction accesses memory but the memoperands have been lost,
994 // we can't analyze it.
995 const TargetInstrDesc &TID = MI->getDesc();
996 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
999 // Avoid instructions obviously unsafe for remat.
1000 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1003 // If the instruction accesses memory and the memory could be non-constant,
1004 // assume the instruction is not rematerializable.
1005 for (std::list<MachineMemOperand>::const_iterator
1006 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1007 const MachineMemOperand &MMO = *I;
1008 if (MMO.isVolatile() || MMO.isStore())
1010 const Value *V = MMO.getValue();
1013 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1014 if (!PSV->isConstant(mf_->getFrameInfo()))
1016 } else if (!aa_->pointsToConstantMemory(V))
1020 // If any of the registers accessed are non-constant, conservatively assume
1021 // the instruction is not rematerializable.
1022 unsigned ImpUse = 0;
1023 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1024 const MachineOperand &MO = MI->getOperand(i);
1026 unsigned Reg = MO.getReg();
1029 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1032 // Only allow one def, and that in the first operand.
1033 if (MO.isDef() != (i == 0))
1036 // Only allow constant-valued registers.
1037 bool IsLiveIn = mri_->isLiveIn(Reg);
1038 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1039 E = mri_->def_end();
1041 // For the def, it should be the only def of that register.
1042 if (MO.isDef() && (next(I) != E || IsLiveIn))
1046 // Only allow one use other register use, as that's all the
1047 // remat mechanisms support currently.
1048 if (Reg != li.reg) {
1051 else if (Reg != ImpUse)
1054 // For the use, there should be only one associated def.
1055 if (I != E && (next(I) != E || IsLiveIn))
1062 unsigned ImpUse = getReMatImplicitUse(li, MI);
1064 const LiveInterval &ImpLi = getInterval(ImpUse);
1065 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1066 re = mri_->use_end(); ri != re; ++ri) {
1067 MachineInstr *UseMI = &*ri;
1068 unsigned UseIdx = getInstructionIndex(UseMI);
1069 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1071 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1075 // If a register operand of the re-materialized instruction is going to
1076 // be spilled next, then it's not legal to re-materialize this instruction.
1077 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1078 if (ImpUse == SpillIs[i]->reg)
1084 /// isReMaterializable - Returns true if the definition MI of the specified
1085 /// val# of the specified interval is re-materializable.
1086 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1087 const VNInfo *ValNo, MachineInstr *MI) {
1088 SmallVector<LiveInterval*, 4> Dummy1;
1090 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1093 /// isReMaterializable - Returns true if every definition of MI of every
1094 /// val# of the specified interval is re-materializable.
1095 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1096 SmallVectorImpl<LiveInterval*> &SpillIs,
1099 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1101 const VNInfo *VNI = *i;
1102 unsigned DefIdx = VNI->def;
1104 continue; // Dead val#.
1105 // Is the def for the val# rematerializable?
1108 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1109 bool DefIsLoad = false;
1111 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1113 isLoad |= DefIsLoad;
1118 /// FilterFoldedOps - Filter out two-address use operands. Return
1119 /// true if it finds any issue with the operands that ought to prevent
1121 static bool FilterFoldedOps(MachineInstr *MI,
1122 SmallVector<unsigned, 2> &Ops,
1124 SmallVector<unsigned, 2> &FoldOps) {
1126 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1127 unsigned OpIdx = Ops[i];
1128 MachineOperand &MO = MI->getOperand(OpIdx);
1129 // FIXME: fold subreg use.
1133 MRInfo |= (unsigned)VirtRegMap::isMod;
1135 // Filter out two-address use operand(s).
1136 if (MI->isRegTiedToDefOperand(OpIdx)) {
1137 MRInfo = VirtRegMap::isModRef;
1140 MRInfo |= (unsigned)VirtRegMap::isRef;
1142 FoldOps.push_back(OpIdx);
1148 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1149 /// slot / to reg or any rematerialized load into ith operand of specified
1150 /// MI. If it is successul, MI is updated with the newly created MI and
1152 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1153 VirtRegMap &vrm, MachineInstr *DefMI,
1155 SmallVector<unsigned, 2> &Ops,
1156 bool isSS, int Slot, unsigned Reg) {
1157 // If it is an implicit def instruction, just delete it.
1158 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1159 RemoveMachineInstrFromMaps(MI);
1160 vrm.RemoveMachineInstrFromMaps(MI);
1161 MI->eraseFromParent();
1166 // Filter the list of operand indexes that are to be folded. Abort if
1167 // any operand will prevent folding.
1168 unsigned MRInfo = 0;
1169 SmallVector<unsigned, 2> FoldOps;
1170 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1173 // The only time it's safe to fold into a two address instruction is when
1174 // it's folding reload and spill from / into a spill stack slot.
1175 if (DefMI && (MRInfo & VirtRegMap::isMod))
1178 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1179 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1181 // Remember this instruction uses the spill slot.
1182 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1184 // Attempt to fold the memory reference into the instruction. If
1185 // we can do this, we don't need to insert spill code.
1186 MachineBasicBlock &MBB = *MI->getParent();
1187 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1188 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1189 vrm.transferSpillPts(MI, fmi);
1190 vrm.transferRestorePts(MI, fmi);
1191 vrm.transferEmergencySpills(MI, fmi);
1193 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1194 mi2iMap_[fmi] = InstrIdx;
1195 MI = MBB.insert(MBB.erase(MI), fmi);
1202 /// canFoldMemoryOperand - Returns true if the specified load / store
1203 /// folding is possible.
1204 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1205 SmallVector<unsigned, 2> &Ops,
1207 // Filter the list of operand indexes that are to be folded. Abort if
1208 // any operand will prevent folding.
1209 unsigned MRInfo = 0;
1210 SmallVector<unsigned, 2> FoldOps;
1211 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1214 // It's only legal to remat for a use, not a def.
1215 if (ReMat && (MRInfo & VirtRegMap::isMod))
1218 return tii_->canFoldMemoryOperand(MI, FoldOps);
1221 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1222 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1223 for (LiveInterval::Ranges::const_iterator
1224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1225 std::vector<IdxMBBPair>::const_iterator II =
1226 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1227 if (II == Idx2MBBMap.end())
1229 if (I->end > II->first) // crossing a MBB.
1231 MBBs.insert(II->second);
1232 if (MBBs.size() > 1)
1238 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1239 /// interval on to-be re-materialized operands of MI) with new register.
1240 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1241 MachineInstr *MI, unsigned NewVReg,
1243 // There is an implicit use. That means one of the other operand is
1244 // being remat'ed and the remat'ed instruction has li.reg as an
1245 // use operand. Make sure we rewrite that as well.
1246 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1247 MachineOperand &MO = MI->getOperand(i);
1250 unsigned Reg = MO.getReg();
1251 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1253 if (!vrm.isReMaterialized(Reg))
1255 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1256 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1258 UseMO->setReg(NewVReg);
1262 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1263 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1264 bool LiveIntervals::
1265 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1266 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1267 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1268 unsigned Slot, int LdSlot,
1269 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1271 const TargetRegisterClass* rc,
1272 SmallVector<int, 4> &ReMatIds,
1273 const MachineLoopInfo *loopInfo,
1274 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1275 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1276 std::vector<LiveInterval*> &NewLIs) {
1277 bool CanFold = false;
1279 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1280 MachineOperand& mop = MI->getOperand(i);
1283 unsigned Reg = mop.getReg();
1284 unsigned RegI = Reg;
1285 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1290 bool TryFold = !DefIsReMat;
1291 bool FoldSS = true; // Default behavior unless it's a remat.
1292 int FoldSlot = Slot;
1294 // If this is the rematerializable definition MI itself and
1295 // all of its uses are rematerialized, simply delete it.
1296 if (MI == ReMatOrigDefMI && CanDelete) {
1297 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1299 RemoveMachineInstrFromMaps(MI);
1300 vrm.RemoveMachineInstrFromMaps(MI);
1301 MI->eraseFromParent();
1305 // If def for this use can't be rematerialized, then try folding.
1306 // If def is rematerializable and it's a load, also try folding.
1307 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1309 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1315 // Scan all of the operands of this instruction rewriting operands
1316 // to use NewVReg instead of li.reg as appropriate. We do this for
1319 // 1. If the instr reads the same spilled vreg multiple times, we
1320 // want to reuse the NewVReg.
1321 // 2. If the instr is a two-addr instruction, we are required to
1322 // keep the src/dst regs pinned.
1324 // Keep track of whether we replace a use and/or def so that we can
1325 // create the spill interval with the appropriate range.
1327 HasUse = mop.isUse();
1328 HasDef = mop.isDef();
1329 SmallVector<unsigned, 2> Ops;
1331 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1332 const MachineOperand &MOj = MI->getOperand(j);
1335 unsigned RegJ = MOj.getReg();
1336 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1340 HasUse |= MOj.isUse();
1341 HasDef |= MOj.isDef();
1345 if (HasUse && !li.liveAt(getUseIndex(index)))
1346 // Must be defined by an implicit def. It should not be spilled. Note,
1347 // this is for correctness reason. e.g.
1348 // 8 %reg1024<def> = IMPLICIT_DEF
1349 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1350 // The live range [12, 14) are not part of the r1024 live interval since
1351 // it's defined by an implicit def. It will not conflicts with live
1352 // interval of r1025. Now suppose both registers are spilled, you can
1353 // easily see a situation where both registers are reloaded before
1354 // the INSERT_SUBREG and both target registers that would overlap.
1357 // Create a new virtual register for the spill interval.
1358 // Create the new register now so we can map the fold instruction
1359 // to the new register so when it is unfolded we get the correct
1361 bool CreatedNewVReg = false;
1363 NewVReg = mri_->createVirtualRegister(rc);
1365 CreatedNewVReg = true;
1371 // Do not fold load / store here if we are splitting. We'll find an
1372 // optimal point to insert a load / store later.
1374 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1375 Ops, FoldSS, FoldSlot, NewVReg)) {
1376 // Folding the load/store can completely change the instruction in
1377 // unpredictable ways, rescan it from the beginning.
1380 // We need to give the new vreg the same stack slot as the
1381 // spilled interval.
1382 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1388 if (isNotInMIMap(MI))
1390 goto RestartInstruction;
1393 // We'll try to fold it later if it's profitable.
1394 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1398 mop.setReg(NewVReg);
1399 if (mop.isImplicit())
1400 rewriteImplicitOps(li, MI, NewVReg, vrm);
1402 // Reuse NewVReg for other reads.
1403 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1404 MachineOperand &mopj = MI->getOperand(Ops[j]);
1405 mopj.setReg(NewVReg);
1406 if (mopj.isImplicit())
1407 rewriteImplicitOps(li, MI, NewVReg, vrm);
1410 if (CreatedNewVReg) {
1412 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1413 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1414 // Each valnum may have its own remat id.
1415 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1417 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1419 if (!CanDelete || (HasUse && HasDef)) {
1420 // If this is a two-addr instruction then its use operands are
1421 // rematerializable but its def is not. It should be assigned a
1423 vrm.assignVirt2StackSlot(NewVReg, Slot);
1426 vrm.assignVirt2StackSlot(NewVReg, Slot);
1428 } else if (HasUse && HasDef &&
1429 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1430 // If this interval hasn't been assigned a stack slot (because earlier
1431 // def is a deleted remat def), do it now.
1432 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1433 vrm.assignVirt2StackSlot(NewVReg, Slot);
1436 // Re-matting an instruction with virtual register use. Add the
1437 // register as an implicit use on the use MI.
1438 if (DefIsReMat && ImpUse)
1439 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1441 // Create a new register interval for this spill / remat.
1442 LiveInterval &nI = getOrCreateInterval(NewVReg);
1443 if (CreatedNewVReg) {
1444 NewLIs.push_back(&nI);
1445 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1447 vrm.setIsSplitFromReg(NewVReg, li.reg);
1451 if (CreatedNewVReg) {
1452 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1453 nI.getNextValue(~0U, 0, VNInfoAllocator));
1457 // Extend the split live interval to this def / use.
1458 unsigned End = getUseIndex(index)+1;
1459 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1460 nI.getValNumInfo(nI.getNumValNums()-1));
1466 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1467 nI.getNextValue(~0U, 0, VNInfoAllocator));
1472 DOUT << "\t\t\t\tAdded new interval: ";
1473 nI.print(DOUT, tri_);
1478 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1480 MachineBasicBlock *MBB, unsigned Idx) const {
1481 unsigned End = getMBBEndIdx(MBB);
1482 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1483 unsigned KillIdx = VNI->kills[j];
1484 if (KillIdx > Idx && KillIdx < End)
1490 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1491 /// during spilling.
1493 struct RewriteInfo {
1498 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1499 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1502 struct RewriteInfoCompare {
1503 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1504 return LHS.Index < RHS.Index;
1509 void LiveIntervals::
1510 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1511 LiveInterval::Ranges::const_iterator &I,
1512 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1513 unsigned Slot, int LdSlot,
1514 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1516 const TargetRegisterClass* rc,
1517 SmallVector<int, 4> &ReMatIds,
1518 const MachineLoopInfo *loopInfo,
1519 BitVector &SpillMBBs,
1520 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1521 BitVector &RestoreMBBs,
1522 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1523 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1524 std::vector<LiveInterval*> &NewLIs) {
1525 bool AllCanFold = true;
1526 unsigned NewVReg = 0;
1527 unsigned start = getBaseIndex(I->start);
1528 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1530 // First collect all the def / use in this live range that will be rewritten.
1531 // Make sure they are sorted according to instruction index.
1532 std::vector<RewriteInfo> RewriteMIs;
1533 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1534 re = mri_->reg_end(); ri != re; ) {
1535 MachineInstr *MI = &*ri;
1536 MachineOperand &O = ri.getOperand();
1538 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1539 unsigned index = getInstructionIndex(MI);
1540 if (index < start || index >= end)
1542 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1543 // Must be defined by an implicit def. It should not be spilled. Note,
1544 // this is for correctness reason. e.g.
1545 // 8 %reg1024<def> = IMPLICIT_DEF
1546 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1547 // The live range [12, 14) are not part of the r1024 live interval since
1548 // it's defined by an implicit def. It will not conflicts with live
1549 // interval of r1025. Now suppose both registers are spilled, you can
1550 // easily see a situation where both registers are reloaded before
1551 // the INSERT_SUBREG and both target registers that would overlap.
1553 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1555 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1557 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1558 // Now rewrite the defs and uses.
1559 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1560 RewriteInfo &rwi = RewriteMIs[i];
1562 unsigned index = rwi.Index;
1563 bool MIHasUse = rwi.HasUse;
1564 bool MIHasDef = rwi.HasDef;
1565 MachineInstr *MI = rwi.MI;
1566 // If MI def and/or use the same register multiple times, then there
1567 // are multiple entries.
1568 unsigned NumUses = MIHasUse;
1569 while (i != e && RewriteMIs[i].MI == MI) {
1570 assert(RewriteMIs[i].Index == index);
1571 bool isUse = RewriteMIs[i].HasUse;
1572 if (isUse) ++NumUses;
1574 MIHasDef |= RewriteMIs[i].HasDef;
1577 MachineBasicBlock *MBB = MI->getParent();
1579 if (ImpUse && MI != ReMatDefMI) {
1580 // Re-matting an instruction with virtual register use. Update the
1581 // register interval's spill weight to HUGE_VALF to prevent it from
1583 LiveInterval &ImpLi = getInterval(ImpUse);
1584 ImpLi.weight = HUGE_VALF;
1587 unsigned MBBId = MBB->getNumber();
1588 unsigned ThisVReg = 0;
1590 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1591 if (NVI != MBBVRegsMap.end()) {
1592 ThisVReg = NVI->second;
1599 // It's better to start a new interval to avoid artifically
1600 // extend the new interval.
1601 if (MIHasDef && !MIHasUse) {
1602 MBBVRegsMap.erase(MBB->getNumber());
1608 bool IsNew = ThisVReg == 0;
1610 // This ends the previous live interval. If all of its def / use
1611 // can be folded, give it a low spill weight.
1612 if (NewVReg && TrySplit && AllCanFold) {
1613 LiveInterval &nI = getOrCreateInterval(NewVReg);
1620 bool HasDef = false;
1621 bool HasUse = false;
1622 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1623 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1624 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1625 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1626 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1627 if (!HasDef && !HasUse)
1630 AllCanFold &= CanFold;
1632 // Update weight of spill interval.
1633 LiveInterval &nI = getOrCreateInterval(NewVReg);
1635 // The spill weight is now infinity as it cannot be spilled again.
1636 nI.weight = HUGE_VALF;
1640 // Keep track of the last def and first use in each MBB.
1642 if (MI != ReMatOrigDefMI || !CanDelete) {
1643 bool HasKill = false;
1645 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1647 // If this is a two-address code, then this index starts a new VNInfo.
1648 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1650 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1652 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1653 SpillIdxes.find(MBBId);
1655 if (SII == SpillIdxes.end()) {
1656 std::vector<SRInfo> S;
1657 S.push_back(SRInfo(index, NewVReg, true));
1658 SpillIdxes.insert(std::make_pair(MBBId, S));
1659 } else if (SII->second.back().vreg != NewVReg) {
1660 SII->second.push_back(SRInfo(index, NewVReg, true));
1661 } else if ((int)index > SII->second.back().index) {
1662 // If there is an earlier def and this is a two-address
1663 // instruction, then it's not possible to fold the store (which
1664 // would also fold the load).
1665 SRInfo &Info = SII->second.back();
1667 Info.canFold = !HasUse;
1669 SpillMBBs.set(MBBId);
1670 } else if (SII != SpillIdxes.end() &&
1671 SII->second.back().vreg == NewVReg &&
1672 (int)index > SII->second.back().index) {
1673 // There is an earlier def that's not killed (must be two-address).
1674 // The spill is no longer needed.
1675 SII->second.pop_back();
1676 if (SII->second.empty()) {
1677 SpillIdxes.erase(MBBId);
1678 SpillMBBs.reset(MBBId);
1685 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1686 SpillIdxes.find(MBBId);
1687 if (SII != SpillIdxes.end() &&
1688 SII->second.back().vreg == NewVReg &&
1689 (int)index > SII->second.back().index)
1690 // Use(s) following the last def, it's not safe to fold the spill.
1691 SII->second.back().canFold = false;
1692 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1693 RestoreIdxes.find(MBBId);
1694 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1695 // If we are splitting live intervals, only fold if it's the first
1696 // use and there isn't another use later in the MBB.
1697 RII->second.back().canFold = false;
1699 // Only need a reload if there isn't an earlier def / use.
1700 if (RII == RestoreIdxes.end()) {
1701 std::vector<SRInfo> Infos;
1702 Infos.push_back(SRInfo(index, NewVReg, true));
1703 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1705 RII->second.push_back(SRInfo(index, NewVReg, true));
1707 RestoreMBBs.set(MBBId);
1711 // Update spill weight.
1712 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1713 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1716 if (NewVReg && TrySplit && AllCanFold) {
1717 // If all of its def / use can be folded, give it a low spill weight.
1718 LiveInterval &nI = getOrCreateInterval(NewVReg);
1723 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1724 BitVector &RestoreMBBs,
1725 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1726 if (!RestoreMBBs[Id])
1728 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1729 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1730 if (Restores[i].index == index &&
1731 Restores[i].vreg == vr &&
1732 Restores[i].canFold)
1737 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1738 BitVector &RestoreMBBs,
1739 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1740 if (!RestoreMBBs[Id])
1742 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1743 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1744 if (Restores[i].index == index && Restores[i].vreg)
1745 Restores[i].index = -1;
1748 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1749 /// spilled and create empty intervals for their uses.
1751 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1752 const TargetRegisterClass* rc,
1753 std::vector<LiveInterval*> &NewLIs) {
1754 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1755 re = mri_->reg_end(); ri != re; ) {
1756 MachineOperand &O = ri.getOperand();
1757 MachineInstr *MI = &*ri;
1760 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1761 "Register def was not rewritten?");
1762 RemoveMachineInstrFromMaps(MI);
1763 vrm.RemoveMachineInstrFromMaps(MI);
1764 MI->eraseFromParent();
1766 // This must be an use of an implicit_def so it's not part of the live
1767 // interval. Create a new empty live interval for it.
1768 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1769 unsigned NewVReg = mri_->createVirtualRegister(rc);
1771 vrm.setIsImplicitlyDefined(NewVReg);
1772 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1773 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1774 MachineOperand &MO = MI->getOperand(i);
1775 if (MO.isReg() && MO.getReg() == li.reg)
1782 std::vector<LiveInterval*> LiveIntervals::
1783 addIntervalsForSpillsFast(const LiveInterval &li,
1784 const MachineLoopInfo *loopInfo,
1786 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1788 std::vector<LiveInterval*> added;
1790 assert(li.weight != HUGE_VALF &&
1791 "attempt to spill already spilled interval!");
1793 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1797 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1799 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1800 while (RI != mri_->reg_end()) {
1801 MachineInstr* MI = &*RI;
1803 SmallVector<unsigned, 2> Indices;
1804 bool HasUse = false;
1805 bool HasDef = false;
1807 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1808 MachineOperand& mop = MI->getOperand(i);
1809 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1811 HasUse |= MI->getOperand(i).isUse();
1812 HasDef |= MI->getOperand(i).isDef();
1814 Indices.push_back(i);
1817 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1818 Indices, true, slot, li.reg)) {
1819 unsigned NewVReg = mri_->createVirtualRegister(rc);
1821 vrm.assignVirt2StackSlot(NewVReg, slot);
1823 // create a new register for this spill
1824 LiveInterval &nI = getOrCreateInterval(NewVReg);
1826 // the spill weight is now infinity as it
1827 // cannot be spilled again
1828 nI.weight = HUGE_VALF;
1830 // Rewrite register operands to use the new vreg.
1831 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1832 E = Indices.end(); I != E; ++I) {
1833 MI->getOperand(*I).setReg(NewVReg);
1835 if (MI->getOperand(*I).isUse())
1836 MI->getOperand(*I).setIsKill(true);
1839 // Fill in the new live interval.
1840 unsigned index = getInstructionIndex(MI);
1842 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1843 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1846 vrm.addRestorePoint(NewVReg, MI);
1849 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1850 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1853 vrm.addSpillPoint(NewVReg, true, MI);
1856 added.push_back(&nI);
1858 DOUT << "\t\t\t\tadded new interval: ";
1864 RI = mri_->reg_begin(li.reg);
1870 std::vector<LiveInterval*> LiveIntervals::
1871 addIntervalsForSpills(const LiveInterval &li,
1872 SmallVectorImpl<LiveInterval*> &SpillIs,
1873 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1875 if (EnableFastSpilling)
1876 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1878 assert(li.weight != HUGE_VALF &&
1879 "attempt to spill already spilled interval!");
1881 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1882 li.print(DOUT, tri_);
1885 // Each bit specify whether a spill is required in the MBB.
1886 BitVector SpillMBBs(mf_->getNumBlockIDs());
1887 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1888 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1889 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1890 DenseMap<unsigned,unsigned> MBBVRegsMap;
1891 std::vector<LiveInterval*> NewLIs;
1892 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1894 unsigned NumValNums = li.getNumValNums();
1895 SmallVector<MachineInstr*, 4> ReMatDefs;
1896 ReMatDefs.resize(NumValNums, NULL);
1897 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1898 ReMatOrigDefs.resize(NumValNums, NULL);
1899 SmallVector<int, 4> ReMatIds;
1900 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1901 BitVector ReMatDelete(NumValNums);
1902 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1904 // Spilling a split live interval. It cannot be split any further. Also,
1905 // it's also guaranteed to be a single val# / range interval.
1906 if (vrm.getPreSplitReg(li.reg)) {
1907 vrm.setIsSplitFromReg(li.reg, 0);
1908 // Unset the split kill marker on the last use.
1909 unsigned KillIdx = vrm.getKillPoint(li.reg);
1911 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1912 assert(KillMI && "Last use disappeared?");
1913 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1914 assert(KillOp != -1 && "Last use disappeared?");
1915 KillMI->getOperand(KillOp).setIsKill(false);
1917 vrm.removeKillPoint(li.reg);
1918 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1919 Slot = vrm.getStackSlot(li.reg);
1920 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1921 MachineInstr *ReMatDefMI = DefIsReMat ?
1922 vrm.getReMaterializedMI(li.reg) : NULL;
1924 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1925 bool isLoad = isLoadSS ||
1926 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1927 bool IsFirstRange = true;
1928 for (LiveInterval::Ranges::const_iterator
1929 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1930 // If this is a split live interval with multiple ranges, it means there
1931 // are two-address instructions that re-defined the value. Only the
1932 // first def can be rematerialized!
1934 // Note ReMatOrigDefMI has already been deleted.
1935 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1936 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1937 false, vrm, rc, ReMatIds, loopInfo,
1938 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1939 MBBVRegsMap, NewLIs);
1941 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1942 Slot, 0, false, false, false,
1943 false, vrm, rc, ReMatIds, loopInfo,
1944 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1945 MBBVRegsMap, NewLIs);
1947 IsFirstRange = false;
1950 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1954 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1955 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1959 bool NeedStackSlot = false;
1960 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1962 const VNInfo *VNI = *i;
1963 unsigned VN = VNI->id;
1964 unsigned DefIdx = VNI->def;
1966 continue; // Dead val#.
1967 // Is the def for the val# rematerializable?
1968 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1969 ? 0 : getInstructionFromIndex(DefIdx);
1971 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1972 // Remember how to remat the def of this val#.
1973 ReMatOrigDefs[VN] = ReMatDefMI;
1974 // Original def may be modified so we have to make a copy here.
1975 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1976 ClonedMIs.push_back(Clone);
1977 ReMatDefs[VN] = Clone;
1979 bool CanDelete = true;
1980 if (VNI->hasPHIKill) {
1981 // A kill is a phi node, not all of its uses can be rematerialized.
1982 // It must not be deleted.
1984 // Need a stack slot if there is any live range where uses cannot be
1986 NeedStackSlot = true;
1989 ReMatDelete.set(VN);
1991 // Need a stack slot if there is any live range where uses cannot be
1993 NeedStackSlot = true;
1997 // One stack slot per live interval.
1998 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1999 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2000 Slot = vrm.assignVirt2StackSlot(li.reg);
2002 // This case only occurs when the prealloc splitter has already assigned
2003 // a stack slot to this vreg.
2005 Slot = vrm.getStackSlot(li.reg);
2008 // Create new intervals and rewrite defs and uses.
2009 for (LiveInterval::Ranges::const_iterator
2010 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2011 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2012 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2013 bool DefIsReMat = ReMatDefMI != NULL;
2014 bool CanDelete = ReMatDelete[I->valno->id];
2016 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2017 bool isLoad = isLoadSS ||
2018 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2019 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2020 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2021 CanDelete, vrm, rc, ReMatIds, loopInfo,
2022 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2023 MBBVRegsMap, NewLIs);
2026 // Insert spills / restores if we are splitting.
2028 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2032 SmallPtrSet<LiveInterval*, 4> AddedKill;
2033 SmallVector<unsigned, 2> Ops;
2034 if (NeedStackSlot) {
2035 int Id = SpillMBBs.find_first();
2037 std::vector<SRInfo> &spills = SpillIdxes[Id];
2038 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2039 int index = spills[i].index;
2040 unsigned VReg = spills[i].vreg;
2041 LiveInterval &nI = getOrCreateInterval(VReg);
2042 bool isReMat = vrm.isReMaterialized(VReg);
2043 MachineInstr *MI = getInstructionFromIndex(index);
2044 bool CanFold = false;
2045 bool FoundUse = false;
2047 if (spills[i].canFold) {
2049 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2050 MachineOperand &MO = MI->getOperand(j);
2051 if (!MO.isReg() || MO.getReg() != VReg)
2058 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2059 RestoreMBBs, RestoreIdxes))) {
2060 // MI has two-address uses of the same register. If the use
2061 // isn't the first and only use in the BB, then we can't fold
2062 // it. FIXME: Move this to rewriteInstructionsForSpills.
2069 // Fold the store into the def if possible.
2070 bool Folded = false;
2071 if (CanFold && !Ops.empty()) {
2072 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2075 // Also folded uses, do not issue a load.
2076 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2077 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2079 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2083 // Otherwise tell the spiller to issue a spill.
2085 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2086 bool isKill = LR->end == getStoreIndex(index);
2087 if (!MI->registerDefIsDead(nI.reg))
2088 // No need to spill a dead def.
2089 vrm.addSpillPoint(VReg, isKill, MI);
2091 AddedKill.insert(&nI);
2094 Id = SpillMBBs.find_next(Id);
2098 int Id = RestoreMBBs.find_first();
2100 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2101 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2102 int index = restores[i].index;
2105 unsigned VReg = restores[i].vreg;
2106 LiveInterval &nI = getOrCreateInterval(VReg);
2107 bool isReMat = vrm.isReMaterialized(VReg);
2108 MachineInstr *MI = getInstructionFromIndex(index);
2109 bool CanFold = false;
2111 if (restores[i].canFold) {
2113 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2114 MachineOperand &MO = MI->getOperand(j);
2115 if (!MO.isReg() || MO.getReg() != VReg)
2119 // If this restore were to be folded, it would have been folded
2128 // Fold the load into the use if possible.
2129 bool Folded = false;
2130 if (CanFold && !Ops.empty()) {
2132 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2134 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2136 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2137 // If the rematerializable def is a load, also try to fold it.
2138 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2139 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2140 Ops, isLoadSS, LdSlot, VReg);
2142 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2144 // Re-matting an instruction with virtual register use. Add the
2145 // register as an implicit use on the use MI and update the register
2146 // interval's spill weight to HUGE_VALF to prevent it from being
2148 LiveInterval &ImpLi = getInterval(ImpUse);
2149 ImpLi.weight = HUGE_VALF;
2150 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2155 // If folding is not possible / failed, then tell the spiller to issue a
2156 // load / rematerialization for us.
2158 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2160 vrm.addRestorePoint(VReg, MI);
2162 Id = RestoreMBBs.find_next(Id);
2165 // Finalize intervals: add kills, finalize spill weights, and filter out
2167 std::vector<LiveInterval*> RetNewLIs;
2168 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2169 LiveInterval *LI = NewLIs[i];
2171 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2172 if (!AddedKill.count(LI)) {
2173 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2174 unsigned LastUseIdx = getBaseIndex(LR->end);
2175 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2176 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2177 assert(UseIdx != -1);
2178 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2179 LastUse->getOperand(UseIdx).setIsKill();
2180 vrm.addKillPoint(LI->reg, LastUseIdx);
2183 RetNewLIs.push_back(LI);
2187 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2191 /// hasAllocatableSuperReg - Return true if the specified physical register has
2192 /// any super register that's allocatable.
2193 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2194 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2195 if (allocatableRegs_[*AS] && hasInterval(*AS))
2200 /// getRepresentativeReg - Find the largest super register of the specified
2201 /// physical register.
2202 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2203 // Find the largest super-register that is allocatable.
2204 unsigned BestReg = Reg;
2205 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2206 unsigned SuperReg = *AS;
2207 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2215 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2216 /// specified interval that conflicts with the specified physical register.
2217 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2218 unsigned PhysReg) const {
2219 unsigned NumConflicts = 0;
2220 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2221 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2222 E = mri_->reg_end(); I != E; ++I) {
2223 MachineOperand &O = I.getOperand();
2224 MachineInstr *MI = O.getParent();
2225 unsigned Index = getInstructionIndex(MI);
2226 if (pli.liveAt(Index))
2229 return NumConflicts;
2232 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2233 /// around all defs and uses of the specified interval. Return true if it
2234 /// was able to cut its interval.
2235 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2236 unsigned PhysReg, VirtRegMap &vrm) {
2237 unsigned SpillReg = getRepresentativeReg(PhysReg);
2239 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2240 // If there are registers which alias PhysReg, but which are not a
2241 // sub-register of the chosen representative super register. Assert
2242 // since we can't handle it yet.
2243 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2244 tri_->isSuperRegister(*AS, SpillReg));
2247 LiveInterval &pli = getInterval(SpillReg);
2248 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2249 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2250 E = mri_->reg_end(); I != E; ++I) {
2251 MachineOperand &O = I.getOperand();
2252 MachineInstr *MI = O.getParent();
2253 if (SeenMIs.count(MI))
2256 unsigned Index = getInstructionIndex(MI);
2257 if (pli.liveAt(Index)) {
2258 vrm.addEmergencySpill(SpillReg, MI);
2259 unsigned StartIdx = getLoadIndex(Index);
2260 unsigned EndIdx = getStoreIndex(Index)+1;
2261 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2262 pli.removeRange(StartIdx, EndIdx);
2265 cerr << "Ran out of registers during register allocation!\n";
2266 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2267 cerr << "Please check your inline asm statement for invalid "
2268 << "constraints:\n";
2269 MI->print(cerr.stream(), tm_);
2273 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2274 if (!hasInterval(*AS))
2276 LiveInterval &spli = getInterval(*AS);
2277 if (spli.liveAt(Index))
2278 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2285 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2286 MachineInstr* startInst) {
2287 LiveInterval& Interval = getOrCreateInterval(reg);
2288 VNInfo* VN = Interval.getNextValue(
2289 getInstructionIndex(startInst) + InstrSlots::DEF,
2290 startInst, getVNInfoAllocator());
2291 VN->hasPHIKill = true;
2292 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2293 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2294 getMBBEndIdx(startInst->getParent()) + 1, VN);
2295 Interval.addRange(LR);