1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
352 if (MO.isUse() && !CheckUse)
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
357 if (tri_->isSubRegister(Reg, PhysReg))
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404 // Earlyclobbers move back one.
405 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
407 assert(ValNo->id == 0 && "First value in interval is not 0?");
409 // Loop over all of the blocks that the vreg is defined in. There are
410 // two cases we have to handle here. The most common case is a vreg
411 // whose lifetime is contained within a basic block. In this case there
412 // will be a single kill, in MBB, which comes after the definition.
413 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
414 // FIXME: what about dead vars?
416 if (vi.Kills[0] != mi)
417 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419 killIdx = defIndex+1;
421 // If the kill happens after the definition, we have an intra-block
423 if (killIdx > defIndex) {
424 assert(vi.AliveBlocks.none() &&
425 "Shouldn't be alive across any blocks!");
426 LiveRange LR(defIndex, killIdx, ValNo);
427 interval.addRange(LR);
428 DOUT << " +" << LR << "\n";
429 interval.addKill(ValNo, killIdx);
434 // The other case we handle is when a virtual register lives to the end
435 // of the defining block, potentially live across some blocks, then is
436 // live into some number of blocks, but gets killed. Start by adding a
437 // range that goes from this definition to the end of the defining block.
438 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
439 DOUT << " +" << NewLR;
440 interval.addRange(NewLR);
442 // Iterate over all of the blocks that the variable is completely
443 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445 for (int i = vi.AliveBlocks.find_first(); i != -1;
446 i = vi.AliveBlocks.find_next(i)) {
447 LiveRange LR(getMBBStartIdx(i),
448 getMBBEndIdx(i)+1, // MBB ends at -1.
450 interval.addRange(LR);
454 // Finally, this virtual register is live from the start of any killing
455 // block to the 'use' slot of the killing instruction.
456 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
457 MachineInstr *Kill = vi.Kills[i];
458 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
459 LiveRange LR(getMBBStartIdx(Kill->getParent()),
461 interval.addRange(LR);
462 interval.addKill(ValNo, killIdx);
467 // If this is the second time we see a virtual register definition, it
468 // must be due to phi elimination or two addr elimination. If this is
469 // the result of two address elimination, then the vreg is one of the
470 // def-and-use register operand.
471 if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
472 // If this is a two-address definition, then we have already processed
473 // the live range. The only problem is that we didn't realize there
474 // are actually two values in the live interval. Because of this we
475 // need to take the LiveRegion that defines this register and split it
477 assert(interval.containsOneValue());
478 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
479 unsigned RedefIndex = getDefIndex(MIIdx);
480 // It cannot be an early clobber MO.
481 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
483 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
484 VNInfo *OldValNo = OldLR->valno;
486 // Delete the initial value, which should be short and continuous,
487 // because the 2-addr copy must be in the same MBB as the redef.
488 interval.removeRange(DefIndex, RedefIndex);
490 // Two-address vregs should always only be redefined once. This means
491 // that at this point, there should be exactly one value number in it.
492 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
494 // The new value number (#1) is defined by the instruction we claimed
496 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
499 // Value#0 is now defined by the 2-addr instruction.
500 OldValNo->def = RedefIndex;
503 // Add the new live interval which replaces the range for the input copy.
504 LiveRange LR(DefIndex, RedefIndex, ValNo);
505 DOUT << " replace range with " << LR;
506 interval.addRange(LR);
507 interval.addKill(ValNo, RedefIndex);
509 // If this redefinition is dead, we need to add a dummy unit live
510 // range covering the def slot.
512 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
515 interval.print(DOUT, tri_);
518 // Otherwise, this must be because of phi elimination. If this is the
519 // first redefinition of the vreg that we have seen, go back and change
520 // the live range in the PHI block to be a different value number.
521 if (interval.containsOneValue()) {
522 assert(vi.Kills.size() == 1 &&
523 "PHI elimination vreg should have one kill, the PHI itself!");
525 // Remove the old range that we now know has an incorrect number.
526 VNInfo *VNI = interval.getValNumInfo(0);
527 MachineInstr *Killer = vi.Kills[0];
528 unsigned Start = getMBBStartIdx(Killer->getParent());
529 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
530 DOUT << " Removing [" << Start << "," << End << "] from: ";
531 interval.print(DOUT, tri_); DOUT << "\n";
532 interval.removeRange(Start, End);
533 VNI->hasPHIKill = true;
534 DOUT << " RESULT: "; interval.print(DOUT, tri_);
536 // Replace the interval with one of a NEW value number. Note that this
537 // value number isn't actually defined by an instruction, weird huh? :)
538 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
539 DOUT << " replace range with " << LR;
540 interval.addRange(LR);
541 interval.addKill(LR.valno, End);
542 DOUT << " RESULT: "; interval.print(DOUT, tri_);
545 // In the case of PHI elimination, each variable definition is only
546 // live until the end of the block. We've already taken care of the
547 // rest of the live range.
548 unsigned defIndex = getDefIndex(MIIdx);
549 // It cannot be an early clobber MO.
550 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
553 MachineInstr *CopyMI = NULL;
554 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
555 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
556 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
557 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
559 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
561 unsigned killIndex = getMBBEndIdx(mbb) + 1;
562 LiveRange LR(defIndex, killIndex, ValNo);
563 interval.addRange(LR);
564 interval.addKill(ValNo, killIndex);
565 ValNo->hasPHIKill = true;
573 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
574 MachineBasicBlock::iterator mi,
577 LiveInterval &interval,
578 MachineInstr *CopyMI) {
579 // A physical register cannot be live across basic block, so its
580 // lifetime must end somewhere in its defining basic block.
581 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
583 unsigned baseIndex = MIIdx;
584 unsigned start = getDefIndex(baseIndex);
585 // Earlyclobbers move back one.
586 if (MO.isEarlyClobber())
587 start = getUseIndex(MIIdx);
588 unsigned end = start;
590 // If it is not used after definition, it is considered dead at
591 // the instruction defining it. Hence its interval is:
592 // [defSlot(def), defSlot(def)+1)
599 // If it is not dead on definition, it must be killed by a
600 // subsequent instruction. Hence its interval is:
601 // [defSlot(def), useSlot(kill)+1)
602 baseIndex += InstrSlots::NUM;
603 while (++mi != MBB->end()) {
604 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
605 getInstructionFromIndex(baseIndex) == 0)
606 baseIndex += InstrSlots::NUM;
607 if (mi->killsRegister(interval.reg, tri_)) {
609 end = getUseIndex(baseIndex) + 1;
611 } else if (mi->modifiesRegister(interval.reg, tri_)) {
612 // Another instruction redefines the register before it is ever read.
613 // Then the register is essentially dead at the instruction that defines
614 // it. Hence its interval is:
615 // [defSlot(def), defSlot(def)+1)
621 baseIndex += InstrSlots::NUM;
624 // The only case we should have a dead physreg here without a killing or
625 // instruction where we know it's dead is if it is live-in to the function
627 assert(!CopyMI && "physreg was not killed in defining block!");
631 assert(start < end && "did not find end of interval?");
633 // Already exists? Extend old live interval.
634 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
635 bool Extend = OldLR != interval.end();
636 VNInfo *ValNo = Extend
637 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
638 if (MO.isEarlyClobber() && Extend)
639 ValNo->redefByEC = true;
640 LiveRange LR(start, end, ValNo);
641 interval.addRange(LR);
642 interval.addKill(LR.valno, end);
643 DOUT << " +" << LR << '\n';
646 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
647 MachineBasicBlock::iterator MI,
651 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
652 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
653 getOrCreateInterval(MO.getReg()));
654 else if (allocatableRegs_[MO.getReg()]) {
655 MachineInstr *CopyMI = NULL;
656 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
657 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
658 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
659 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
661 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
662 getOrCreateInterval(MO.getReg()), CopyMI);
663 // Def of a register also defines its sub-registers.
664 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
665 // If MI also modifies the sub-register explicitly, avoid processing it
666 // more than once. Do not pass in TRI here so it checks for exact match.
667 if (!MI->modifiesRegister(*AS))
668 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
669 getOrCreateInterval(*AS), 0);
673 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
675 LiveInterval &interval, bool isAlias) {
676 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
678 // Look for kills, if it reaches a def before it's killed, then it shouldn't
679 // be considered a livein.
680 MachineBasicBlock::iterator mi = MBB->begin();
681 unsigned baseIndex = MIIdx;
682 unsigned start = baseIndex;
683 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
684 getInstructionFromIndex(baseIndex) == 0)
685 baseIndex += InstrSlots::NUM;
686 unsigned end = baseIndex;
688 while (mi != MBB->end()) {
689 if (mi->killsRegister(interval.reg, tri_)) {
691 end = getUseIndex(baseIndex) + 1;
693 } else if (mi->modifiesRegister(interval.reg, tri_)) {
694 // Another instruction redefines the register before it is ever read.
695 // Then the register is essentially dead at the instruction that defines
696 // it. Hence its interval is:
697 // [defSlot(def), defSlot(def)+1)
699 end = getDefIndex(start) + 1;
703 baseIndex += InstrSlots::NUM;
704 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
705 getInstructionFromIndex(baseIndex) == 0)
706 baseIndex += InstrSlots::NUM;
711 // Live-in register might not be used at all.
715 end = getDefIndex(MIIdx) + 1;
717 DOUT << " live through";
722 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
723 interval.addRange(LR);
724 interval.addKill(LR.valno, end);
725 DOUT << " +" << LR << '\n';
728 /// computeIntervals - computes the live intervals for virtual
729 /// registers. for some ordering of the machine instructions [1,N] a
730 /// live interval is an interval [i, j) where 1 <= i <= j < N for
731 /// which a variable is live
732 void LiveIntervals::computeIntervals() {
734 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
735 << "********** Function: "
736 << ((Value*)mf_->getFunction())->getName() << '\n';
738 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
740 MachineBasicBlock *MBB = MBBI;
741 // Track the index of the current machine instr.
742 unsigned MIIndex = getMBBStartIdx(MBB);
743 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
745 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
747 // Create intervals for live-ins to this BB first.
748 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
749 LE = MBB->livein_end(); LI != LE; ++LI) {
750 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
751 // Multiple live-ins can alias the same register.
752 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
753 if (!hasInterval(*AS))
754 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
758 // Skip over empty initial indices.
759 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
760 getInstructionFromIndex(MIIndex) == 0)
761 MIIndex += InstrSlots::NUM;
763 for (; MI != miEnd; ++MI) {
764 DOUT << MIIndex << "\t" << *MI;
767 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
768 MachineOperand &MO = MI->getOperand(i);
769 // handle register defs - build intervals
770 if (MO.isReg() && MO.getReg() && MO.isDef()) {
771 handleRegisterDef(MBB, MI, MIIndex, MO, i);
775 // Skip over the empty slots after each instruction.
776 unsigned Slots = MI->getDesc().getNumDefs();
779 MIIndex += InstrSlots::NUM * Slots;
781 // Skip over empty indices.
782 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
783 getInstructionFromIndex(MIIndex) == 0)
784 MIIndex += InstrSlots::NUM;
789 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
790 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
791 std::vector<IdxMBBPair>::const_iterator I =
792 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
795 while (I != Idx2MBBMap.end()) {
798 MBBs.push_back(I->second);
805 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
806 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
807 std::vector<IdxMBBPair>::const_iterator I =
808 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
811 while (I != Idx2MBBMap.end()) {
814 MachineBasicBlock *MBB = I->second;
815 if (getMBBEndIdx(MBB) > End)
817 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
818 SE = MBB->succ_end(); SI != SE; ++SI)
826 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
827 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
829 return new LiveInterval(reg, Weight);
832 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
833 /// copy field and returns the source register that defines it.
834 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
838 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
839 // If it's extracting out of a physical register, return the sub-register.
840 unsigned Reg = VNI->copy->getOperand(1).getReg();
841 if (TargetRegisterInfo::isPhysicalRegister(Reg))
842 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
844 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
845 return VNI->copy->getOperand(2).getReg();
847 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
848 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
850 assert(0 && "Unrecognized copy instruction!");
854 //===----------------------------------------------------------------------===//
855 // Register allocator hooks.
858 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
859 /// allow one) virtual register operand, then its uses are implicitly using
860 /// the register. Returns the virtual register.
861 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
862 MachineInstr *MI) const {
864 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
865 MachineOperand &MO = MI->getOperand(i);
866 if (!MO.isReg() || !MO.isUse())
868 unsigned Reg = MO.getReg();
869 if (Reg == 0 || Reg == li.reg)
871 // FIXME: For now, only remat MI with at most one register operand.
873 "Can't rematerialize instruction with multiple register operand!");
882 /// isValNoAvailableAt - Return true if the val# of the specified interval
883 /// which reaches the given instruction also reaches the specified use index.
884 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
885 unsigned UseIdx) const {
886 unsigned Index = getInstructionIndex(MI);
887 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
888 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
889 return UI != li.end() && UI->valno == ValNo;
892 /// isReMaterializable - Returns true if the definition MI of the specified
893 /// val# of the specified interval is re-materializable.
894 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
895 const VNInfo *ValNo, MachineInstr *MI,
896 SmallVectorImpl<LiveInterval*> &SpillIs,
901 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
905 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
906 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
907 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
908 // this but remember this is not safe to fold into a two-address
910 // This is a load from fixed stack slot. It can be rematerialized.
913 // If the target-specific rules don't identify an instruction as
914 // being trivially rematerializable, use some target-independent
916 if (!MI->getDesc().isRematerializable() ||
917 !tii_->isTriviallyReMaterializable(MI)) {
918 if (!EnableAggressiveRemat)
921 // If the instruction accesses memory but the memoperands have been lost,
922 // we can't analyze it.
923 const TargetInstrDesc &TID = MI->getDesc();
924 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
927 // Avoid instructions obviously unsafe for remat.
928 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
931 // If the instruction accesses memory and the memory could be non-constant,
932 // assume the instruction is not rematerializable.
933 for (std::list<MachineMemOperand>::const_iterator
934 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
935 const MachineMemOperand &MMO = *I;
936 if (MMO.isVolatile() || MMO.isStore())
938 const Value *V = MMO.getValue();
941 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
942 if (!PSV->isConstant(mf_->getFrameInfo()))
944 } else if (!aa_->pointsToConstantMemory(V))
948 // If any of the registers accessed are non-constant, conservatively assume
949 // the instruction is not rematerializable.
951 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
952 const MachineOperand &MO = MI->getOperand(i);
954 unsigned Reg = MO.getReg();
957 if (TargetRegisterInfo::isPhysicalRegister(Reg))
960 // Only allow one def, and that in the first operand.
961 if (MO.isDef() != (i == 0))
964 // Only allow constant-valued registers.
965 bool IsLiveIn = mri_->isLiveIn(Reg);
966 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
969 // For the def, it should be the only def of that register.
970 if (MO.isDef() && (next(I) != E || IsLiveIn))
974 // Only allow one use other register use, as that's all the
975 // remat mechanisms support currently.
979 else if (Reg != ImpUse)
982 // For the use, there should be only one associated def.
983 if (I != E && (next(I) != E || IsLiveIn))
990 unsigned ImpUse = getReMatImplicitUse(li, MI);
992 const LiveInterval &ImpLi = getInterval(ImpUse);
993 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
994 re = mri_->use_end(); ri != re; ++ri) {
995 MachineInstr *UseMI = &*ri;
996 unsigned UseIdx = getInstructionIndex(UseMI);
997 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
999 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1003 // If a register operand of the re-materialized instruction is going to
1004 // be spilled next, then it's not legal to re-materialize this instruction.
1005 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1006 if (ImpUse == SpillIs[i]->reg)
1012 /// isReMaterializable - Returns true if the definition MI of the specified
1013 /// val# of the specified interval is re-materializable.
1014 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1015 const VNInfo *ValNo, MachineInstr *MI) {
1016 SmallVector<LiveInterval*, 4> Dummy1;
1018 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1021 /// isReMaterializable - Returns true if every definition of MI of every
1022 /// val# of the specified interval is re-materializable.
1023 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1024 SmallVectorImpl<LiveInterval*> &SpillIs,
1027 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1029 const VNInfo *VNI = *i;
1030 unsigned DefIdx = VNI->def;
1032 continue; // Dead val#.
1033 // Is the def for the val# rematerializable?
1036 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1037 bool DefIsLoad = false;
1039 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1041 isLoad |= DefIsLoad;
1046 /// FilterFoldedOps - Filter out two-address use operands. Return
1047 /// true if it finds any issue with the operands that ought to prevent
1049 static bool FilterFoldedOps(MachineInstr *MI,
1050 SmallVector<unsigned, 2> &Ops,
1052 SmallVector<unsigned, 2> &FoldOps) {
1053 const TargetInstrDesc &TID = MI->getDesc();
1056 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1057 unsigned OpIdx = Ops[i];
1058 MachineOperand &MO = MI->getOperand(OpIdx);
1059 // FIXME: fold subreg use.
1063 MRInfo |= (unsigned)VirtRegMap::isMod;
1065 // Filter out two-address use operand(s).
1066 if (!MO.isImplicit() &&
1067 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1068 MRInfo = VirtRegMap::isModRef;
1071 MRInfo |= (unsigned)VirtRegMap::isRef;
1073 FoldOps.push_back(OpIdx);
1079 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1080 /// slot / to reg or any rematerialized load into ith operand of specified
1081 /// MI. If it is successul, MI is updated with the newly created MI and
1083 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1084 VirtRegMap &vrm, MachineInstr *DefMI,
1086 SmallVector<unsigned, 2> &Ops,
1087 bool isSS, int Slot, unsigned Reg) {
1088 // If it is an implicit def instruction, just delete it.
1089 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1090 RemoveMachineInstrFromMaps(MI);
1091 vrm.RemoveMachineInstrFromMaps(MI);
1092 MI->eraseFromParent();
1097 // Filter the list of operand indexes that are to be folded. Abort if
1098 // any operand will prevent folding.
1099 unsigned MRInfo = 0;
1100 SmallVector<unsigned, 2> FoldOps;
1101 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1104 // The only time it's safe to fold into a two address instruction is when
1105 // it's folding reload and spill from / into a spill stack slot.
1106 if (DefMI && (MRInfo & VirtRegMap::isMod))
1109 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1110 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1112 // Remember this instruction uses the spill slot.
1113 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1115 // Attempt to fold the memory reference into the instruction. If
1116 // we can do this, we don't need to insert spill code.
1117 MachineBasicBlock &MBB = *MI->getParent();
1118 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1119 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1120 vrm.transferSpillPts(MI, fmi);
1121 vrm.transferRestorePts(MI, fmi);
1122 vrm.transferEmergencySpills(MI, fmi);
1124 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1125 mi2iMap_[fmi] = InstrIdx;
1126 MI = MBB.insert(MBB.erase(MI), fmi);
1133 /// canFoldMemoryOperand - Returns true if the specified load / store
1134 /// folding is possible.
1135 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1136 SmallVector<unsigned, 2> &Ops,
1138 // Filter the list of operand indexes that are to be folded. Abort if
1139 // any operand will prevent folding.
1140 unsigned MRInfo = 0;
1141 SmallVector<unsigned, 2> FoldOps;
1142 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1145 // It's only legal to remat for a use, not a def.
1146 if (ReMat && (MRInfo & VirtRegMap::isMod))
1149 return tii_->canFoldMemoryOperand(MI, FoldOps);
1152 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1153 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1154 for (LiveInterval::Ranges::const_iterator
1155 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1156 std::vector<IdxMBBPair>::const_iterator II =
1157 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1158 if (II == Idx2MBBMap.end())
1160 if (I->end > II->first) // crossing a MBB.
1162 MBBs.insert(II->second);
1163 if (MBBs.size() > 1)
1169 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1170 /// interval on to-be re-materialized operands of MI) with new register.
1171 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1172 MachineInstr *MI, unsigned NewVReg,
1174 // There is an implicit use. That means one of the other operand is
1175 // being remat'ed and the remat'ed instruction has li.reg as an
1176 // use operand. Make sure we rewrite that as well.
1177 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1178 MachineOperand &MO = MI->getOperand(i);
1181 unsigned Reg = MO.getReg();
1182 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1184 if (!vrm.isReMaterialized(Reg))
1186 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1187 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1189 UseMO->setReg(NewVReg);
1193 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1194 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1195 bool LiveIntervals::
1196 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1197 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1198 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1199 unsigned Slot, int LdSlot,
1200 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1202 const TargetRegisterClass* rc,
1203 SmallVector<int, 4> &ReMatIds,
1204 const MachineLoopInfo *loopInfo,
1205 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1206 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1207 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1208 MachineBasicBlock *MBB = MI->getParent();
1209 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1210 bool CanFold = false;
1212 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1213 MachineOperand& mop = MI->getOperand(i);
1216 unsigned Reg = mop.getReg();
1217 unsigned RegI = Reg;
1218 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1223 bool TryFold = !DefIsReMat;
1224 bool FoldSS = true; // Default behavior unless it's a remat.
1225 int FoldSlot = Slot;
1227 // If this is the rematerializable definition MI itself and
1228 // all of its uses are rematerialized, simply delete it.
1229 if (MI == ReMatOrigDefMI && CanDelete) {
1230 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1232 RemoveMachineInstrFromMaps(MI);
1233 vrm.RemoveMachineInstrFromMaps(MI);
1234 MI->eraseFromParent();
1238 // If def for this use can't be rematerialized, then try folding.
1239 // If def is rematerializable and it's a load, also try folding.
1240 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1242 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1248 // Scan all of the operands of this instruction rewriting operands
1249 // to use NewVReg instead of li.reg as appropriate. We do this for
1252 // 1. If the instr reads the same spilled vreg multiple times, we
1253 // want to reuse the NewVReg.
1254 // 2. If the instr is a two-addr instruction, we are required to
1255 // keep the src/dst regs pinned.
1257 // Keep track of whether we replace a use and/or def so that we can
1258 // create the spill interval with the appropriate range.
1260 HasUse = mop.isUse();
1261 HasDef = mop.isDef();
1262 SmallVector<unsigned, 2> Ops;
1264 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1265 const MachineOperand &MOj = MI->getOperand(j);
1268 unsigned RegJ = MOj.getReg();
1269 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1273 HasUse |= MOj.isUse();
1274 HasDef |= MOj.isDef();
1278 if (HasUse && !li.liveAt(getUseIndex(index)))
1279 // Must be defined by an implicit def. It should not be spilled. Note,
1280 // this is for correctness reason. e.g.
1281 // 8 %reg1024<def> = IMPLICIT_DEF
1282 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1283 // The live range [12, 14) are not part of the r1024 live interval since
1284 // it's defined by an implicit def. It will not conflicts with live
1285 // interval of r1025. Now suppose both registers are spilled, you can
1286 // easily see a situation where both registers are reloaded before
1287 // the INSERT_SUBREG and both target registers that would overlap.
1290 // Update stack slot spill weight if we are splitting.
1291 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1295 // Create a new virtual register for the spill interval.
1296 // Create the new register now so we can map the fold instruction
1297 // to the new register so when it is unfolded we get the correct
1299 bool CreatedNewVReg = false;
1301 NewVReg = mri_->createVirtualRegister(rc);
1303 CreatedNewVReg = true;
1309 // Do not fold load / store here if we are splitting. We'll find an
1310 // optimal point to insert a load / store later.
1312 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1313 Ops, FoldSS, FoldSlot, NewVReg)) {
1314 // Folding the load/store can completely change the instruction in
1315 // unpredictable ways, rescan it from the beginning.
1318 // We need to give the new vreg the same stack slot as the
1319 // spilled interval.
1320 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1326 if (isRemoved(MI)) {
1330 goto RestartInstruction;
1333 // We'll try to fold it later if it's profitable.
1334 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1338 mop.setReg(NewVReg);
1339 if (mop.isImplicit())
1340 rewriteImplicitOps(li, MI, NewVReg, vrm);
1342 // Reuse NewVReg for other reads.
1343 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1344 MachineOperand &mopj = MI->getOperand(Ops[j]);
1345 mopj.setReg(NewVReg);
1346 if (mopj.isImplicit())
1347 rewriteImplicitOps(li, MI, NewVReg, vrm);
1350 if (CreatedNewVReg) {
1352 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1353 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1354 // Each valnum may have its own remat id.
1355 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1357 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1359 if (!CanDelete || (HasUse && HasDef)) {
1360 // If this is a two-addr instruction then its use operands are
1361 // rematerializable but its def is not. It should be assigned a
1363 vrm.assignVirt2StackSlot(NewVReg, Slot);
1366 vrm.assignVirt2StackSlot(NewVReg, Slot);
1368 } else if (HasUse && HasDef &&
1369 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1370 // If this interval hasn't been assigned a stack slot (because earlier
1371 // def is a deleted remat def), do it now.
1372 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1373 vrm.assignVirt2StackSlot(NewVReg, Slot);
1376 // Re-matting an instruction with virtual register use. Add the
1377 // register as an implicit use on the use MI.
1378 if (DefIsReMat && ImpUse)
1379 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1381 // create a new register interval for this spill / remat.
1382 LiveInterval &nI = getOrCreateInterval(NewVReg);
1383 if (CreatedNewVReg) {
1384 NewLIs.push_back(&nI);
1385 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1387 vrm.setIsSplitFromReg(NewVReg, li.reg);
1391 if (CreatedNewVReg) {
1392 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1393 nI.getNextValue(~0U, 0, VNInfoAllocator));
1397 // Extend the split live interval to this def / use.
1398 unsigned End = getUseIndex(index)+1;
1399 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1400 nI.getValNumInfo(nI.getNumValNums()-1));
1406 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1407 nI.getNextValue(~0U, 0, VNInfoAllocator));
1412 DOUT << "\t\t\t\tAdded new interval: ";
1413 nI.print(DOUT, tri_);
1418 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1420 MachineBasicBlock *MBB, unsigned Idx) const {
1421 unsigned End = getMBBEndIdx(MBB);
1422 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1423 unsigned KillIdx = VNI->kills[j];
1424 if (KillIdx > Idx && KillIdx < End)
1430 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1431 /// during spilling.
1433 struct RewriteInfo {
1438 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1439 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1442 struct RewriteInfoCompare {
1443 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1444 return LHS.Index < RHS.Index;
1449 void LiveIntervals::
1450 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1451 LiveInterval::Ranges::const_iterator &I,
1452 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1453 unsigned Slot, int LdSlot,
1454 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1456 const TargetRegisterClass* rc,
1457 SmallVector<int, 4> &ReMatIds,
1458 const MachineLoopInfo *loopInfo,
1459 BitVector &SpillMBBs,
1460 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1461 BitVector &RestoreMBBs,
1462 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1463 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1464 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1465 bool AllCanFold = true;
1466 unsigned NewVReg = 0;
1467 unsigned start = getBaseIndex(I->start);
1468 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1470 // First collect all the def / use in this live range that will be rewritten.
1471 // Make sure they are sorted according to instruction index.
1472 std::vector<RewriteInfo> RewriteMIs;
1473 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1474 re = mri_->reg_end(); ri != re; ) {
1475 MachineInstr *MI = &*ri;
1476 MachineOperand &O = ri.getOperand();
1478 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1479 unsigned index = getInstructionIndex(MI);
1480 if (index < start || index >= end)
1482 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1483 // Must be defined by an implicit def. It should not be spilled. Note,
1484 // this is for correctness reason. e.g.
1485 // 8 %reg1024<def> = IMPLICIT_DEF
1486 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1487 // The live range [12, 14) are not part of the r1024 live interval since
1488 // it's defined by an implicit def. It will not conflicts with live
1489 // interval of r1025. Now suppose both registers are spilled, you can
1490 // easily see a situation where both registers are reloaded before
1491 // the INSERT_SUBREG and both target registers that would overlap.
1493 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1495 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1497 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1498 // Now rewrite the defs and uses.
1499 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1500 RewriteInfo &rwi = RewriteMIs[i];
1502 unsigned index = rwi.Index;
1503 bool MIHasUse = rwi.HasUse;
1504 bool MIHasDef = rwi.HasDef;
1505 MachineInstr *MI = rwi.MI;
1506 // If MI def and/or use the same register multiple times, then there
1507 // are multiple entries.
1508 unsigned NumUses = MIHasUse;
1509 while (i != e && RewriteMIs[i].MI == MI) {
1510 assert(RewriteMIs[i].Index == index);
1511 bool isUse = RewriteMIs[i].HasUse;
1512 if (isUse) ++NumUses;
1514 MIHasDef |= RewriteMIs[i].HasDef;
1517 MachineBasicBlock *MBB = MI->getParent();
1519 if (ImpUse && MI != ReMatDefMI) {
1520 // Re-matting an instruction with virtual register use. Update the
1521 // register interval's spill weight to HUGE_VALF to prevent it from
1523 LiveInterval &ImpLi = getInterval(ImpUse);
1524 ImpLi.weight = HUGE_VALF;
1527 unsigned MBBId = MBB->getNumber();
1528 unsigned ThisVReg = 0;
1530 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1531 if (NVI != MBBVRegsMap.end()) {
1532 ThisVReg = NVI->second;
1539 // It's better to start a new interval to avoid artifically
1540 // extend the new interval.
1541 if (MIHasDef && !MIHasUse) {
1542 MBBVRegsMap.erase(MBB->getNumber());
1548 bool IsNew = ThisVReg == 0;
1550 // This ends the previous live interval. If all of its def / use
1551 // can be folded, give it a low spill weight.
1552 if (NewVReg && TrySplit && AllCanFold) {
1553 LiveInterval &nI = getOrCreateInterval(NewVReg);
1560 bool HasDef = false;
1561 bool HasUse = false;
1562 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1563 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1564 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1565 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1566 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1567 if (!HasDef && !HasUse)
1570 AllCanFold &= CanFold;
1572 // Update weight of spill interval.
1573 LiveInterval &nI = getOrCreateInterval(NewVReg);
1575 // The spill weight is now infinity as it cannot be spilled again.
1576 nI.weight = HUGE_VALF;
1580 // Keep track of the last def and first use in each MBB.
1582 if (MI != ReMatOrigDefMI || !CanDelete) {
1583 bool HasKill = false;
1585 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1587 // If this is a two-address code, then this index starts a new VNInfo.
1588 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1590 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1592 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1593 SpillIdxes.find(MBBId);
1595 if (SII == SpillIdxes.end()) {
1596 std::vector<SRInfo> S;
1597 S.push_back(SRInfo(index, NewVReg, true));
1598 SpillIdxes.insert(std::make_pair(MBBId, S));
1599 } else if (SII->second.back().vreg != NewVReg) {
1600 SII->second.push_back(SRInfo(index, NewVReg, true));
1601 } else if ((int)index > SII->second.back().index) {
1602 // If there is an earlier def and this is a two-address
1603 // instruction, then it's not possible to fold the store (which
1604 // would also fold the load).
1605 SRInfo &Info = SII->second.back();
1607 Info.canFold = !HasUse;
1609 SpillMBBs.set(MBBId);
1610 } else if (SII != SpillIdxes.end() &&
1611 SII->second.back().vreg == NewVReg &&
1612 (int)index > SII->second.back().index) {
1613 // There is an earlier def that's not killed (must be two-address).
1614 // The spill is no longer needed.
1615 SII->second.pop_back();
1616 if (SII->second.empty()) {
1617 SpillIdxes.erase(MBBId);
1618 SpillMBBs.reset(MBBId);
1625 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1626 SpillIdxes.find(MBBId);
1627 if (SII != SpillIdxes.end() &&
1628 SII->second.back().vreg == NewVReg &&
1629 (int)index > SII->second.back().index)
1630 // Use(s) following the last def, it's not safe to fold the spill.
1631 SII->second.back().canFold = false;
1632 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1633 RestoreIdxes.find(MBBId);
1634 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1635 // If we are splitting live intervals, only fold if it's the first
1636 // use and there isn't another use later in the MBB.
1637 RII->second.back().canFold = false;
1639 // Only need a reload if there isn't an earlier def / use.
1640 if (RII == RestoreIdxes.end()) {
1641 std::vector<SRInfo> Infos;
1642 Infos.push_back(SRInfo(index, NewVReg, true));
1643 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1645 RII->second.push_back(SRInfo(index, NewVReg, true));
1647 RestoreMBBs.set(MBBId);
1651 // Update spill weight.
1652 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1653 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1656 if (NewVReg && TrySplit && AllCanFold) {
1657 // If all of its def / use can be folded, give it a low spill weight.
1658 LiveInterval &nI = getOrCreateInterval(NewVReg);
1663 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1664 BitVector &RestoreMBBs,
1665 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1666 if (!RestoreMBBs[Id])
1668 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1669 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1670 if (Restores[i].index == index &&
1671 Restores[i].vreg == vr &&
1672 Restores[i].canFold)
1677 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1678 BitVector &RestoreMBBs,
1679 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1680 if (!RestoreMBBs[Id])
1682 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1683 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1684 if (Restores[i].index == index && Restores[i].vreg)
1685 Restores[i].index = -1;
1688 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1689 /// spilled and create empty intervals for their uses.
1691 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1692 const TargetRegisterClass* rc,
1693 std::vector<LiveInterval*> &NewLIs) {
1694 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1695 re = mri_->reg_end(); ri != re; ) {
1696 MachineOperand &O = ri.getOperand();
1697 MachineInstr *MI = &*ri;
1700 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1701 "Register def was not rewritten?");
1702 RemoveMachineInstrFromMaps(MI);
1703 vrm.RemoveMachineInstrFromMaps(MI);
1704 MI->eraseFromParent();
1706 // This must be an use of an implicit_def so it's not part of the live
1707 // interval. Create a new empty live interval for it.
1708 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1709 unsigned NewVReg = mri_->createVirtualRegister(rc);
1711 vrm.setIsImplicitlyDefined(NewVReg);
1712 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1713 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1714 MachineOperand &MO = MI->getOperand(i);
1715 if (MO.isReg() && MO.getReg() == li.reg)
1724 bool operator()(LiveInterval* A, LiveInterval* B) {
1725 return A->beginNumber() < B->beginNumber();
1730 std::vector<LiveInterval*> LiveIntervals::
1731 addIntervalsForSpillsFast(const LiveInterval &li,
1732 const MachineLoopInfo *loopInfo,
1733 VirtRegMap &vrm, float& SSWeight) {
1734 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1736 std::vector<LiveInterval*> added;
1738 assert(li.weight != HUGE_VALF &&
1739 "attempt to spill already spilled interval!");
1741 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1745 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1749 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1750 while (RI != mri_->reg_end()) {
1751 MachineInstr* MI = &*RI;
1753 SmallVector<unsigned, 2> Indices;
1754 bool HasUse = false;
1755 bool HasDef = false;
1757 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1758 MachineOperand& mop = MI->getOperand(i);
1759 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1761 HasUse |= MI->getOperand(i).isUse();
1762 HasDef |= MI->getOperand(i).isDef();
1764 Indices.push_back(i);
1767 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1768 Indices, true, slot, li.reg)) {
1769 unsigned NewVReg = mri_->createVirtualRegister(rc);
1771 vrm.assignVirt2StackSlot(NewVReg, slot);
1773 // create a new register for this spill
1774 LiveInterval &nI = getOrCreateInterval(NewVReg);
1776 // the spill weight is now infinity as it
1777 // cannot be spilled again
1778 nI.weight = HUGE_VALF;
1780 // Rewrite register operands to use the new vreg.
1781 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1782 E = Indices.end(); I != E; ++I) {
1783 MI->getOperand(*I).setReg(NewVReg);
1785 if (MI->getOperand(*I).isUse())
1786 MI->getOperand(*I).setIsKill(true);
1789 // Fill in the new live interval.
1790 unsigned index = getInstructionIndex(MI);
1792 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1793 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1796 vrm.addRestorePoint(NewVReg, MI);
1799 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1800 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1803 vrm.addSpillPoint(NewVReg, true, MI);
1806 added.push_back(&nI);
1808 DOUT << "\t\t\t\tadded new interval: ";
1812 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1815 SSWeight += getSpillWeight(true, true, loopDepth);
1817 SSWeight += getSpillWeight(false, true, loopDepth);
1819 SSWeight += getSpillWeight(true, false, loopDepth);
1823 RI = mri_->reg_begin(li.reg);
1826 // Clients expect the new intervals to be returned in sorted order.
1827 std::sort(added.begin(), added.end(), LISorter());
1832 std::vector<LiveInterval*> LiveIntervals::
1833 addIntervalsForSpills(const LiveInterval &li,
1834 SmallVectorImpl<LiveInterval*> &SpillIs,
1835 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1838 if (EnableFastSpilling)
1839 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1841 assert(li.weight != HUGE_VALF &&
1842 "attempt to spill already spilled interval!");
1844 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1845 li.print(DOUT, tri_);
1848 // Spill slot weight.
1851 // Each bit specify whether a spill is required in the MBB.
1852 BitVector SpillMBBs(mf_->getNumBlockIDs());
1853 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1854 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1855 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1856 DenseMap<unsigned,unsigned> MBBVRegsMap;
1857 std::vector<LiveInterval*> NewLIs;
1858 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1860 unsigned NumValNums = li.getNumValNums();
1861 SmallVector<MachineInstr*, 4> ReMatDefs;
1862 ReMatDefs.resize(NumValNums, NULL);
1863 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1864 ReMatOrigDefs.resize(NumValNums, NULL);
1865 SmallVector<int, 4> ReMatIds;
1866 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1867 BitVector ReMatDelete(NumValNums);
1868 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1870 // Spilling a split live interval. It cannot be split any further. Also,
1871 // it's also guaranteed to be a single val# / range interval.
1872 if (vrm.getPreSplitReg(li.reg)) {
1873 vrm.setIsSplitFromReg(li.reg, 0);
1874 // Unset the split kill marker on the last use.
1875 unsigned KillIdx = vrm.getKillPoint(li.reg);
1877 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1878 assert(KillMI && "Last use disappeared?");
1879 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1880 assert(KillOp != -1 && "Last use disappeared?");
1881 KillMI->getOperand(KillOp).setIsKill(false);
1883 vrm.removeKillPoint(li.reg);
1884 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1885 Slot = vrm.getStackSlot(li.reg);
1886 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1887 MachineInstr *ReMatDefMI = DefIsReMat ?
1888 vrm.getReMaterializedMI(li.reg) : NULL;
1890 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1891 bool isLoad = isLoadSS ||
1892 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1893 bool IsFirstRange = true;
1894 for (LiveInterval::Ranges::const_iterator
1895 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1896 // If this is a split live interval with multiple ranges, it means there
1897 // are two-address instructions that re-defined the value. Only the
1898 // first def can be rematerialized!
1900 // Note ReMatOrigDefMI has already been deleted.
1901 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1902 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1903 false, vrm, rc, ReMatIds, loopInfo,
1904 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1905 MBBVRegsMap, NewLIs, SSWeight);
1907 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1908 Slot, 0, false, false, false,
1909 false, vrm, rc, ReMatIds, loopInfo,
1910 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1911 MBBVRegsMap, NewLIs, SSWeight);
1913 IsFirstRange = false;
1916 SSWeight = 0.0f; // Already accounted for when split.
1917 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1921 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1922 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1926 bool NeedStackSlot = false;
1927 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1929 const VNInfo *VNI = *i;
1930 unsigned VN = VNI->id;
1931 unsigned DefIdx = VNI->def;
1933 continue; // Dead val#.
1934 // Is the def for the val# rematerializable?
1935 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1936 ? 0 : getInstructionFromIndex(DefIdx);
1938 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1939 // Remember how to remat the def of this val#.
1940 ReMatOrigDefs[VN] = ReMatDefMI;
1941 // Original def may be modified so we have to make a copy here.
1942 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1943 ClonedMIs.push_back(Clone);
1944 ReMatDefs[VN] = Clone;
1946 bool CanDelete = true;
1947 if (VNI->hasPHIKill) {
1948 // A kill is a phi node, not all of its uses can be rematerialized.
1949 // It must not be deleted.
1951 // Need a stack slot if there is any live range where uses cannot be
1953 NeedStackSlot = true;
1956 ReMatDelete.set(VN);
1958 // Need a stack slot if there is any live range where uses cannot be
1960 NeedStackSlot = true;
1964 // One stack slot per live interval.
1965 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1966 Slot = vrm.assignVirt2StackSlot(li.reg);
1968 // Create new intervals and rewrite defs and uses.
1969 for (LiveInterval::Ranges::const_iterator
1970 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1971 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1972 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1973 bool DefIsReMat = ReMatDefMI != NULL;
1974 bool CanDelete = ReMatDelete[I->valno->id];
1976 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1977 bool isLoad = isLoadSS ||
1978 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1979 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1980 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1981 CanDelete, vrm, rc, ReMatIds, loopInfo,
1982 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1983 MBBVRegsMap, NewLIs, SSWeight);
1986 // Insert spills / restores if we are splitting.
1988 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1992 SmallPtrSet<LiveInterval*, 4> AddedKill;
1993 SmallVector<unsigned, 2> Ops;
1994 if (NeedStackSlot) {
1995 int Id = SpillMBBs.find_first();
1997 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1998 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1999 std::vector<SRInfo> &spills = SpillIdxes[Id];
2000 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2001 int index = spills[i].index;
2002 unsigned VReg = spills[i].vreg;
2003 LiveInterval &nI = getOrCreateInterval(VReg);
2004 bool isReMat = vrm.isReMaterialized(VReg);
2005 MachineInstr *MI = getInstructionFromIndex(index);
2006 bool CanFold = false;
2007 bool FoundUse = false;
2009 if (spills[i].canFold) {
2011 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2012 MachineOperand &MO = MI->getOperand(j);
2013 if (!MO.isReg() || MO.getReg() != VReg)
2020 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2021 RestoreMBBs, RestoreIdxes))) {
2022 // MI has two-address uses of the same register. If the use
2023 // isn't the first and only use in the BB, then we can't fold
2024 // it. FIXME: Move this to rewriteInstructionsForSpills.
2031 // Fold the store into the def if possible.
2032 bool Folded = false;
2033 if (CanFold && !Ops.empty()) {
2034 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2037 // Also folded uses, do not issue a load.
2038 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2039 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2041 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2045 // Otherwise tell the spiller to issue a spill.
2047 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2048 bool isKill = LR->end == getStoreIndex(index);
2049 if (!MI->registerDefIsDead(nI.reg))
2050 // No need to spill a dead def.
2051 vrm.addSpillPoint(VReg, isKill, MI);
2053 AddedKill.insert(&nI);
2056 // Update spill slot weight.
2058 SSWeight += getSpillWeight(true, false, loopDepth);
2060 Id = SpillMBBs.find_next(Id);
2064 int Id = RestoreMBBs.find_first();
2066 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2067 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2069 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2070 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2071 int index = restores[i].index;
2074 unsigned VReg = restores[i].vreg;
2075 LiveInterval &nI = getOrCreateInterval(VReg);
2076 bool isReMat = vrm.isReMaterialized(VReg);
2077 MachineInstr *MI = getInstructionFromIndex(index);
2078 bool CanFold = false;
2080 if (restores[i].canFold) {
2082 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2083 MachineOperand &MO = MI->getOperand(j);
2084 if (!MO.isReg() || MO.getReg() != VReg)
2088 // If this restore were to be folded, it would have been folded
2097 // Fold the load into the use if possible.
2098 bool Folded = false;
2099 if (CanFold && !Ops.empty()) {
2101 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2103 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2105 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2106 // If the rematerializable def is a load, also try to fold it.
2107 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2108 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2109 Ops, isLoadSS, LdSlot, VReg);
2111 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2113 // Re-matting an instruction with virtual register use. Add the
2114 // register as an implicit use on the use MI and update the register
2115 // interval's spill weight to HUGE_VALF to prevent it from being
2117 LiveInterval &ImpLi = getInterval(ImpUse);
2118 ImpLi.weight = HUGE_VALF;
2119 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2124 // If folding is not possible / failed, then tell the spiller to issue a
2125 // load / rematerialization for us.
2127 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2129 vrm.addRestorePoint(VReg, MI);
2131 // Update spill slot weight.
2133 SSWeight += getSpillWeight(false, true, loopDepth);
2135 Id = RestoreMBBs.find_next(Id);
2138 // Finalize intervals: add kills, finalize spill weights, and filter out
2140 std::vector<LiveInterval*> RetNewLIs;
2141 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2142 LiveInterval *LI = NewLIs[i];
2144 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2145 if (!AddedKill.count(LI)) {
2146 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2147 unsigned LastUseIdx = getBaseIndex(LR->end);
2148 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2149 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2150 assert(UseIdx != -1);
2151 if (LastUse->getOperand(UseIdx).isImplicit() ||
2152 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2153 LastUse->getOperand(UseIdx).setIsKill();
2154 vrm.addKillPoint(LI->reg, LastUseIdx);
2157 RetNewLIs.push_back(LI);
2161 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2165 /// hasAllocatableSuperReg - Return true if the specified physical register has
2166 /// any super register that's allocatable.
2167 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2168 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2169 if (allocatableRegs_[*AS] && hasInterval(*AS))
2174 /// getRepresentativeReg - Find the largest super register of the specified
2175 /// physical register.
2176 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2177 // Find the largest super-register that is allocatable.
2178 unsigned BestReg = Reg;
2179 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2180 unsigned SuperReg = *AS;
2181 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2189 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2190 /// specified interval that conflicts with the specified physical register.
2191 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2192 unsigned PhysReg) const {
2193 unsigned NumConflicts = 0;
2194 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2195 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2196 E = mri_->reg_end(); I != E; ++I) {
2197 MachineOperand &O = I.getOperand();
2198 MachineInstr *MI = O.getParent();
2199 unsigned Index = getInstructionIndex(MI);
2200 if (pli.liveAt(Index))
2203 return NumConflicts;
2206 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2207 /// around all defs and uses of the specified interval.
2208 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2209 unsigned PhysReg, VirtRegMap &vrm) {
2210 unsigned SpillReg = getRepresentativeReg(PhysReg);
2212 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2213 // If there are registers which alias PhysReg, but which are not a
2214 // sub-register of the chosen representative super register. Assert
2215 // since we can't handle it yet.
2216 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2217 tri_->isSuperRegister(*AS, SpillReg));
2219 LiveInterval &pli = getInterval(SpillReg);
2220 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2221 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2222 E = mri_->reg_end(); I != E; ++I) {
2223 MachineOperand &O = I.getOperand();
2224 MachineInstr *MI = O.getParent();
2225 if (SeenMIs.count(MI))
2228 unsigned Index = getInstructionIndex(MI);
2229 if (pli.liveAt(Index)) {
2230 vrm.addEmergencySpill(SpillReg, MI);
2231 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2232 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2233 if (!hasInterval(*AS))
2235 LiveInterval &spli = getInterval(*AS);
2236 if (spli.liveAt(Index))
2237 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2243 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2244 MachineInstr* startInst) {
2245 LiveInterval& Interval = getOrCreateInterval(reg);
2246 VNInfo* VN = Interval.getNextValue(
2247 getInstructionIndex(startInst) + InstrSlots::DEF,
2248 startInst, getVNInfoAllocator());
2249 VN->hasPHIKill = true;
2250 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2251 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2252 getMBBEndIdx(startInst->getParent()) + 1, VN);
2253 Interval.addRange(LR);