1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
352 if (MO.isUse() && !CheckUse)
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
357 if (tri_->isSubRegister(Reg, PhysReg))
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404 // Earlyclobbers move back one.
405 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
407 assert(ValNo->id == 0 && "First value in interval is not 0?");
409 // Loop over all of the blocks that the vreg is defined in. There are
410 // two cases we have to handle here. The most common case is a vreg
411 // whose lifetime is contained within a basic block. In this case there
412 // will be a single kill, in MBB, which comes after the definition.
413 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
414 // FIXME: what about dead vars?
416 if (vi.Kills[0] != mi)
417 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419 killIdx = defIndex+1;
421 // If the kill happens after the definition, we have an intra-block
423 if (killIdx > defIndex) {
424 assert(vi.AliveBlocks.none() &&
425 "Shouldn't be alive across any blocks!");
426 LiveRange LR(defIndex, killIdx, ValNo);
427 interval.addRange(LR);
428 DOUT << " +" << LR << "\n";
429 interval.addKill(ValNo, killIdx);
434 // The other case we handle is when a virtual register lives to the end
435 // of the defining block, potentially live across some blocks, then is
436 // live into some number of blocks, but gets killed. Start by adding a
437 // range that goes from this definition to the end of the defining block.
438 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
439 DOUT << " +" << NewLR;
440 interval.addRange(NewLR);
442 // Iterate over all of the blocks that the variable is completely
443 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445 for (int i = vi.AliveBlocks.find_first(); i != -1;
446 i = vi.AliveBlocks.find_next(i)) {
447 LiveRange LR(getMBBStartIdx(i),
448 getMBBEndIdx(i)+1, // MBB ends at -1.
450 interval.addRange(LR);
454 // Finally, this virtual register is live from the start of any killing
455 // block to the 'use' slot of the killing instruction.
456 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
457 MachineInstr *Kill = vi.Kills[i];
458 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
459 LiveRange LR(getMBBStartIdx(Kill->getParent()),
461 interval.addRange(LR);
462 interval.addKill(ValNo, killIdx);
467 // If this is the second time we see a virtual register definition, it
468 // must be due to phi elimination or two addr elimination. If this is
469 // the result of two address elimination, then the vreg is one of the
470 // def-and-use register operand.
471 if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
472 // If this is a two-address definition, then we have already processed
473 // the live range. The only problem is that we didn't realize there
474 // are actually two values in the live interval. Because of this we
475 // need to take the LiveRegion that defines this register and split it
477 assert(interval.containsOneValue());
478 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
479 unsigned RedefIndex = getDefIndex(MIIdx);
480 // It cannot be an early clobber MO.
481 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
483 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
484 VNInfo *OldValNo = OldLR->valno;
486 // Delete the initial value, which should be short and continuous,
487 // because the 2-addr copy must be in the same MBB as the redef.
488 interval.removeRange(DefIndex, RedefIndex);
490 // Two-address vregs should always only be redefined once. This means
491 // that at this point, there should be exactly one value number in it.
492 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
494 // The new value number (#1) is defined by the instruction we claimed
496 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
499 // Value#0 is now defined by the 2-addr instruction.
500 OldValNo->def = RedefIndex;
503 // Add the new live interval which replaces the range for the input copy.
504 LiveRange LR(DefIndex, RedefIndex, ValNo);
505 DOUT << " replace range with " << LR;
506 interval.addRange(LR);
507 interval.addKill(ValNo, RedefIndex);
509 // If this redefinition is dead, we need to add a dummy unit live
510 // range covering the def slot.
512 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
515 interval.print(DOUT, tri_);
518 // Otherwise, this must be because of phi elimination. If this is the
519 // first redefinition of the vreg that we have seen, go back and change
520 // the live range in the PHI block to be a different value number.
521 if (interval.containsOneValue()) {
522 assert(vi.Kills.size() == 1 &&
523 "PHI elimination vreg should have one kill, the PHI itself!");
525 // Remove the old range that we now know has an incorrect number.
526 VNInfo *VNI = interval.getValNumInfo(0);
527 MachineInstr *Killer = vi.Kills[0];
528 unsigned Start = getMBBStartIdx(Killer->getParent());
529 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
530 DOUT << " Removing [" << Start << "," << End << "] from: ";
531 interval.print(DOUT, tri_); DOUT << "\n";
532 interval.removeRange(Start, End);
533 VNI->hasPHIKill = true;
534 DOUT << " RESULT: "; interval.print(DOUT, tri_);
536 // Replace the interval with one of a NEW value number. Note that this
537 // value number isn't actually defined by an instruction, weird huh? :)
538 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
539 DOUT << " replace range with " << LR;
540 interval.addRange(LR);
541 interval.addKill(LR.valno, End);
542 DOUT << " RESULT: "; interval.print(DOUT, tri_);
545 // In the case of PHI elimination, each variable definition is only
546 // live until the end of the block. We've already taken care of the
547 // rest of the live range.
548 unsigned defIndex = getDefIndex(MIIdx);
549 // It cannot be an early clobber MO.
550 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
553 MachineInstr *CopyMI = NULL;
554 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
555 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
556 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
557 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
559 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
561 unsigned killIndex = getMBBEndIdx(mbb) + 1;
562 LiveRange LR(defIndex, killIndex, ValNo);
563 interval.addRange(LR);
564 interval.addKill(ValNo, killIndex);
565 ValNo->hasPHIKill = true;
573 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
574 MachineBasicBlock::iterator mi,
577 LiveInterval &interval,
578 MachineInstr *CopyMI) {
579 // A physical register cannot be live across basic block, so its
580 // lifetime must end somewhere in its defining basic block.
581 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
583 unsigned baseIndex = MIIdx;
584 unsigned start = getDefIndex(baseIndex);
585 // Earlyclobbers move back one.
586 if (MO.isEarlyClobber())
587 start = getUseIndex(MIIdx);
588 unsigned end = start;
590 // If it is not used after definition, it is considered dead at
591 // the instruction defining it. Hence its interval is:
592 // [defSlot(def), defSlot(def)+1)
599 // If it is not dead on definition, it must be killed by a
600 // subsequent instruction. Hence its interval is:
601 // [defSlot(def), useSlot(kill)+1)
602 baseIndex += InstrSlots::NUM;
603 while (++mi != MBB->end()) {
604 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
605 getInstructionFromIndex(baseIndex) == 0)
606 baseIndex += InstrSlots::NUM;
607 if (mi->killsRegister(interval.reg, tri_)) {
609 end = getUseIndex(baseIndex) + 1;
611 } else if (mi->modifiesRegister(interval.reg, tri_)) {
612 // Another instruction redefines the register before it is ever read.
613 // Then the register is essentially dead at the instruction that defines
614 // it. Hence its interval is:
615 // [defSlot(def), defSlot(def)+1)
621 baseIndex += InstrSlots::NUM;
624 // The only case we should have a dead physreg here without a killing or
625 // instruction where we know it's dead is if it is live-in to the function
627 assert(!CopyMI && "physreg was not killed in defining block!");
631 assert(start < end && "did not find end of interval?");
633 // Already exists? Extend old live interval.
634 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
635 bool Extend = OldLR != interval.end();
636 VNInfo *ValNo = Extend
637 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
638 if (MO.isEarlyClobber() && Extend)
639 ValNo->redefByEC = true;
640 LiveRange LR(start, end, ValNo);
641 interval.addRange(LR);
642 interval.addKill(LR.valno, end);
643 DOUT << " +" << LR << '\n';
646 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
647 MachineBasicBlock::iterator MI,
651 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
652 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
653 getOrCreateInterval(MO.getReg()));
654 else if (allocatableRegs_[MO.getReg()]) {
655 MachineInstr *CopyMI = NULL;
656 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
657 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
658 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
659 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
661 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
662 getOrCreateInterval(MO.getReg()), CopyMI);
663 // Def of a register also defines its sub-registers.
664 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
665 // If MI also modifies the sub-register explicitly, avoid processing it
666 // more than once. Do not pass in TRI here so it checks for exact match.
667 if (!MI->modifiesRegister(*AS))
668 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
669 getOrCreateInterval(*AS), 0);
673 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
675 LiveInterval &interval, bool isAlias) {
676 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
678 // Look for kills, if it reaches a def before it's killed, then it shouldn't
679 // be considered a livein.
680 MachineBasicBlock::iterator mi = MBB->begin();
681 unsigned baseIndex = MIIdx;
682 unsigned start = baseIndex;
683 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
684 getInstructionFromIndex(baseIndex) == 0)
685 baseIndex += InstrSlots::NUM;
686 unsigned end = baseIndex;
688 while (mi != MBB->end()) {
689 if (mi->killsRegister(interval.reg, tri_)) {
691 end = getUseIndex(baseIndex) + 1;
693 } else if (mi->modifiesRegister(interval.reg, tri_)) {
694 // Another instruction redefines the register before it is ever read.
695 // Then the register is essentially dead at the instruction that defines
696 // it. Hence its interval is:
697 // [defSlot(def), defSlot(def)+1)
699 end = getDefIndex(start) + 1;
703 baseIndex += InstrSlots::NUM;
704 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
705 getInstructionFromIndex(baseIndex) == 0)
706 baseIndex += InstrSlots::NUM;
711 // Live-in register might not be used at all.
715 end = getDefIndex(MIIdx) + 1;
717 DOUT << " live through";
722 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
723 interval.addRange(LR);
724 interval.addKill(LR.valno, end);
725 DOUT << " +" << LR << '\n';
728 /// computeIntervals - computes the live intervals for virtual
729 /// registers. for some ordering of the machine instructions [1,N] a
730 /// live interval is an interval [i, j) where 1 <= i <= j < N for
731 /// which a variable is live
732 void LiveIntervals::computeIntervals() {
734 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
735 << "********** Function: "
736 << ((Value*)mf_->getFunction())->getName() << '\n';
738 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
740 MachineBasicBlock *MBB = MBBI;
741 // Track the index of the current machine instr.
742 unsigned MIIndex = getMBBStartIdx(MBB);
743 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
745 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
747 // Create intervals for live-ins to this BB first.
748 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
749 LE = MBB->livein_end(); LI != LE; ++LI) {
750 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
751 // Multiple live-ins can alias the same register.
752 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
753 if (!hasInterval(*AS))
754 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
758 // Skip over empty initial indices.
759 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
760 getInstructionFromIndex(MIIndex) == 0)
761 MIIndex += InstrSlots::NUM;
763 for (; MI != miEnd; ++MI) {
764 DOUT << MIIndex << "\t" << *MI;
767 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
768 MachineOperand &MO = MI->getOperand(i);
769 // handle register defs - build intervals
770 if (MO.isReg() && MO.getReg() && MO.isDef()) {
771 handleRegisterDef(MBB, MI, MIIndex, MO, i);
775 // Skip over the empty slots after each instruction.
776 unsigned Slots = MI->getDesc().getNumDefs();
779 MIIndex += InstrSlots::NUM * Slots;
781 // Skip over empty indices.
782 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
783 getInstructionFromIndex(MIIndex) == 0)
784 MIIndex += InstrSlots::NUM;
789 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
790 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
791 std::vector<IdxMBBPair>::const_iterator I =
792 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
795 while (I != Idx2MBBMap.end()) {
798 MBBs.push_back(I->second);
805 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
806 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
807 std::vector<IdxMBBPair>::const_iterator I =
808 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
811 while (I != Idx2MBBMap.end()) {
814 MachineBasicBlock *MBB = I->second;
815 if (getMBBEndIdx(MBB) > End)
817 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
818 SE = MBB->succ_end(); SI != SE; ++SI)
826 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
827 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
828 return new LiveInterval(reg, Weight);
831 /// dupInterval - Duplicate a live interval. The caller is responsible for
832 /// managing the allocated memory.
833 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
834 LiveInterval *NewLI = createInterval(li->reg);
835 NewLI->Copy(*li, getVNInfoAllocator());
839 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
840 /// copy field and returns the source register that defines it.
841 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
845 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
846 // If it's extracting out of a physical register, return the sub-register.
847 unsigned Reg = VNI->copy->getOperand(1).getReg();
848 if (TargetRegisterInfo::isPhysicalRegister(Reg))
849 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
851 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
852 return VNI->copy->getOperand(2).getReg();
854 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
855 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
857 assert(0 && "Unrecognized copy instruction!");
861 //===----------------------------------------------------------------------===//
862 // Register allocator hooks.
865 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
866 /// allow one) virtual register operand, then its uses are implicitly using
867 /// the register. Returns the virtual register.
868 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
869 MachineInstr *MI) const {
871 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
872 MachineOperand &MO = MI->getOperand(i);
873 if (!MO.isReg() || !MO.isUse())
875 unsigned Reg = MO.getReg();
876 if (Reg == 0 || Reg == li.reg)
878 // FIXME: For now, only remat MI with at most one register operand.
880 "Can't rematerialize instruction with multiple register operand!");
889 /// isValNoAvailableAt - Return true if the val# of the specified interval
890 /// which reaches the given instruction also reaches the specified use index.
891 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
892 unsigned UseIdx) const {
893 unsigned Index = getInstructionIndex(MI);
894 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
895 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
896 return UI != li.end() && UI->valno == ValNo;
899 /// isReMaterializable - Returns true if the definition MI of the specified
900 /// val# of the specified interval is re-materializable.
901 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
902 const VNInfo *ValNo, MachineInstr *MI,
903 SmallVectorImpl<LiveInterval*> &SpillIs,
908 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
912 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
913 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
914 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
915 // this but remember this is not safe to fold into a two-address
917 // This is a load from fixed stack slot. It can be rematerialized.
920 // If the target-specific rules don't identify an instruction as
921 // being trivially rematerializable, use some target-independent
923 if (!MI->getDesc().isRematerializable() ||
924 !tii_->isTriviallyReMaterializable(MI)) {
925 if (!EnableAggressiveRemat)
928 // If the instruction accesses memory but the memoperands have been lost,
929 // we can't analyze it.
930 const TargetInstrDesc &TID = MI->getDesc();
931 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
934 // Avoid instructions obviously unsafe for remat.
935 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
938 // If the instruction accesses memory and the memory could be non-constant,
939 // assume the instruction is not rematerializable.
940 for (std::list<MachineMemOperand>::const_iterator
941 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
942 const MachineMemOperand &MMO = *I;
943 if (MMO.isVolatile() || MMO.isStore())
945 const Value *V = MMO.getValue();
948 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
949 if (!PSV->isConstant(mf_->getFrameInfo()))
951 } else if (!aa_->pointsToConstantMemory(V))
955 // If any of the registers accessed are non-constant, conservatively assume
956 // the instruction is not rematerializable.
958 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
959 const MachineOperand &MO = MI->getOperand(i);
961 unsigned Reg = MO.getReg();
964 if (TargetRegisterInfo::isPhysicalRegister(Reg))
967 // Only allow one def, and that in the first operand.
968 if (MO.isDef() != (i == 0))
971 // Only allow constant-valued registers.
972 bool IsLiveIn = mri_->isLiveIn(Reg);
973 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
976 // For the def, it should be the only def of that register.
977 if (MO.isDef() && (next(I) != E || IsLiveIn))
981 // Only allow one use other register use, as that's all the
982 // remat mechanisms support currently.
986 else if (Reg != ImpUse)
989 // For the use, there should be only one associated def.
990 if (I != E && (next(I) != E || IsLiveIn))
997 unsigned ImpUse = getReMatImplicitUse(li, MI);
999 const LiveInterval &ImpLi = getInterval(ImpUse);
1000 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1001 re = mri_->use_end(); ri != re; ++ri) {
1002 MachineInstr *UseMI = &*ri;
1003 unsigned UseIdx = getInstructionIndex(UseMI);
1004 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1006 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1010 // If a register operand of the re-materialized instruction is going to
1011 // be spilled next, then it's not legal to re-materialize this instruction.
1012 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1013 if (ImpUse == SpillIs[i]->reg)
1019 /// isReMaterializable - Returns true if the definition MI of the specified
1020 /// val# of the specified interval is re-materializable.
1021 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1022 const VNInfo *ValNo, MachineInstr *MI) {
1023 SmallVector<LiveInterval*, 4> Dummy1;
1025 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1028 /// isReMaterializable - Returns true if every definition of MI of every
1029 /// val# of the specified interval is re-materializable.
1030 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1031 SmallVectorImpl<LiveInterval*> &SpillIs,
1034 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1036 const VNInfo *VNI = *i;
1037 unsigned DefIdx = VNI->def;
1039 continue; // Dead val#.
1040 // Is the def for the val# rematerializable?
1043 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1044 bool DefIsLoad = false;
1046 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1048 isLoad |= DefIsLoad;
1053 /// FilterFoldedOps - Filter out two-address use operands. Return
1054 /// true if it finds any issue with the operands that ought to prevent
1056 static bool FilterFoldedOps(MachineInstr *MI,
1057 SmallVector<unsigned, 2> &Ops,
1059 SmallVector<unsigned, 2> &FoldOps) {
1060 const TargetInstrDesc &TID = MI->getDesc();
1063 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1064 unsigned OpIdx = Ops[i];
1065 MachineOperand &MO = MI->getOperand(OpIdx);
1066 // FIXME: fold subreg use.
1070 MRInfo |= (unsigned)VirtRegMap::isMod;
1072 // Filter out two-address use operand(s).
1073 if (!MO.isImplicit() &&
1074 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1075 MRInfo = VirtRegMap::isModRef;
1078 MRInfo |= (unsigned)VirtRegMap::isRef;
1080 FoldOps.push_back(OpIdx);
1086 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1087 /// slot / to reg or any rematerialized load into ith operand of specified
1088 /// MI. If it is successul, MI is updated with the newly created MI and
1090 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1091 VirtRegMap &vrm, MachineInstr *DefMI,
1093 SmallVector<unsigned, 2> &Ops,
1094 bool isSS, int Slot, unsigned Reg) {
1095 // If it is an implicit def instruction, just delete it.
1096 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1097 RemoveMachineInstrFromMaps(MI);
1098 vrm.RemoveMachineInstrFromMaps(MI);
1099 MI->eraseFromParent();
1104 // Filter the list of operand indexes that are to be folded. Abort if
1105 // any operand will prevent folding.
1106 unsigned MRInfo = 0;
1107 SmallVector<unsigned, 2> FoldOps;
1108 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1111 // The only time it's safe to fold into a two address instruction is when
1112 // it's folding reload and spill from / into a spill stack slot.
1113 if (DefMI && (MRInfo & VirtRegMap::isMod))
1116 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1117 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1119 // Remember this instruction uses the spill slot.
1120 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1122 // Attempt to fold the memory reference into the instruction. If
1123 // we can do this, we don't need to insert spill code.
1124 MachineBasicBlock &MBB = *MI->getParent();
1125 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1126 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1127 vrm.transferSpillPts(MI, fmi);
1128 vrm.transferRestorePts(MI, fmi);
1129 vrm.transferEmergencySpills(MI, fmi);
1131 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1132 mi2iMap_[fmi] = InstrIdx;
1133 MI = MBB.insert(MBB.erase(MI), fmi);
1140 /// canFoldMemoryOperand - Returns true if the specified load / store
1141 /// folding is possible.
1142 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1143 SmallVector<unsigned, 2> &Ops,
1145 // Filter the list of operand indexes that are to be folded. Abort if
1146 // any operand will prevent folding.
1147 unsigned MRInfo = 0;
1148 SmallVector<unsigned, 2> FoldOps;
1149 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1152 // It's only legal to remat for a use, not a def.
1153 if (ReMat && (MRInfo & VirtRegMap::isMod))
1156 return tii_->canFoldMemoryOperand(MI, FoldOps);
1159 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1160 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1161 for (LiveInterval::Ranges::const_iterator
1162 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1163 std::vector<IdxMBBPair>::const_iterator II =
1164 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1165 if (II == Idx2MBBMap.end())
1167 if (I->end > II->first) // crossing a MBB.
1169 MBBs.insert(II->second);
1170 if (MBBs.size() > 1)
1176 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1177 /// interval on to-be re-materialized operands of MI) with new register.
1178 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1179 MachineInstr *MI, unsigned NewVReg,
1181 // There is an implicit use. That means one of the other operand is
1182 // being remat'ed and the remat'ed instruction has li.reg as an
1183 // use operand. Make sure we rewrite that as well.
1184 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1185 MachineOperand &MO = MI->getOperand(i);
1188 unsigned Reg = MO.getReg();
1189 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1191 if (!vrm.isReMaterialized(Reg))
1193 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1194 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1196 UseMO->setReg(NewVReg);
1200 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1201 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1202 bool LiveIntervals::
1203 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1204 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1205 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1206 unsigned Slot, int LdSlot,
1207 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1209 const TargetRegisterClass* rc,
1210 SmallVector<int, 4> &ReMatIds,
1211 const MachineLoopInfo *loopInfo,
1212 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1213 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1214 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1215 MachineBasicBlock *MBB = MI->getParent();
1216 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1217 bool CanFold = false;
1219 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1220 MachineOperand& mop = MI->getOperand(i);
1223 unsigned Reg = mop.getReg();
1224 unsigned RegI = Reg;
1225 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1230 bool TryFold = !DefIsReMat;
1231 bool FoldSS = true; // Default behavior unless it's a remat.
1232 int FoldSlot = Slot;
1234 // If this is the rematerializable definition MI itself and
1235 // all of its uses are rematerialized, simply delete it.
1236 if (MI == ReMatOrigDefMI && CanDelete) {
1237 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1239 RemoveMachineInstrFromMaps(MI);
1240 vrm.RemoveMachineInstrFromMaps(MI);
1241 MI->eraseFromParent();
1245 // If def for this use can't be rematerialized, then try folding.
1246 // If def is rematerializable and it's a load, also try folding.
1247 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1249 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1255 // Scan all of the operands of this instruction rewriting operands
1256 // to use NewVReg instead of li.reg as appropriate. We do this for
1259 // 1. If the instr reads the same spilled vreg multiple times, we
1260 // want to reuse the NewVReg.
1261 // 2. If the instr is a two-addr instruction, we are required to
1262 // keep the src/dst regs pinned.
1264 // Keep track of whether we replace a use and/or def so that we can
1265 // create the spill interval with the appropriate range.
1267 HasUse = mop.isUse();
1268 HasDef = mop.isDef();
1269 SmallVector<unsigned, 2> Ops;
1271 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1272 const MachineOperand &MOj = MI->getOperand(j);
1275 unsigned RegJ = MOj.getReg();
1276 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1280 HasUse |= MOj.isUse();
1281 HasDef |= MOj.isDef();
1285 if (HasUse && !li.liveAt(getUseIndex(index)))
1286 // Must be defined by an implicit def. It should not be spilled. Note,
1287 // this is for correctness reason. e.g.
1288 // 8 %reg1024<def> = IMPLICIT_DEF
1289 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1290 // The live range [12, 14) are not part of the r1024 live interval since
1291 // it's defined by an implicit def. It will not conflicts with live
1292 // interval of r1025. Now suppose both registers are spilled, you can
1293 // easily see a situation where both registers are reloaded before
1294 // the INSERT_SUBREG and both target registers that would overlap.
1297 // Update stack slot spill weight if we are splitting.
1298 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1302 // Create a new virtual register for the spill interval.
1303 // Create the new register now so we can map the fold instruction
1304 // to the new register so when it is unfolded we get the correct
1306 bool CreatedNewVReg = false;
1308 NewVReg = mri_->createVirtualRegister(rc);
1310 CreatedNewVReg = true;
1316 // Do not fold load / store here if we are splitting. We'll find an
1317 // optimal point to insert a load / store later.
1319 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1320 Ops, FoldSS, FoldSlot, NewVReg)) {
1321 // Folding the load/store can completely change the instruction in
1322 // unpredictable ways, rescan it from the beginning.
1325 // We need to give the new vreg the same stack slot as the
1326 // spilled interval.
1327 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1333 if (isRemoved(MI)) {
1337 goto RestartInstruction;
1340 // We'll try to fold it later if it's profitable.
1341 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1345 mop.setReg(NewVReg);
1346 if (mop.isImplicit())
1347 rewriteImplicitOps(li, MI, NewVReg, vrm);
1349 // Reuse NewVReg for other reads.
1350 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1351 MachineOperand &mopj = MI->getOperand(Ops[j]);
1352 mopj.setReg(NewVReg);
1353 if (mopj.isImplicit())
1354 rewriteImplicitOps(li, MI, NewVReg, vrm);
1357 if (CreatedNewVReg) {
1359 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1360 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1361 // Each valnum may have its own remat id.
1362 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1364 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1366 if (!CanDelete || (HasUse && HasDef)) {
1367 // If this is a two-addr instruction then its use operands are
1368 // rematerializable but its def is not. It should be assigned a
1370 vrm.assignVirt2StackSlot(NewVReg, Slot);
1373 vrm.assignVirt2StackSlot(NewVReg, Slot);
1375 } else if (HasUse && HasDef &&
1376 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1377 // If this interval hasn't been assigned a stack slot (because earlier
1378 // def is a deleted remat def), do it now.
1379 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1380 vrm.assignVirt2StackSlot(NewVReg, Slot);
1383 // Re-matting an instruction with virtual register use. Add the
1384 // register as an implicit use on the use MI.
1385 if (DefIsReMat && ImpUse)
1386 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1388 // create a new register interval for this spill / remat.
1389 LiveInterval &nI = getOrCreateInterval(NewVReg);
1390 if (CreatedNewVReg) {
1391 NewLIs.push_back(&nI);
1392 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1394 vrm.setIsSplitFromReg(NewVReg, li.reg);
1398 if (CreatedNewVReg) {
1399 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1400 nI.getNextValue(~0U, 0, VNInfoAllocator));
1404 // Extend the split live interval to this def / use.
1405 unsigned End = getUseIndex(index)+1;
1406 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1407 nI.getValNumInfo(nI.getNumValNums()-1));
1413 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1414 nI.getNextValue(~0U, 0, VNInfoAllocator));
1419 DOUT << "\t\t\t\tAdded new interval: ";
1420 nI.print(DOUT, tri_);
1425 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1427 MachineBasicBlock *MBB, unsigned Idx) const {
1428 unsigned End = getMBBEndIdx(MBB);
1429 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1430 unsigned KillIdx = VNI->kills[j];
1431 if (KillIdx > Idx && KillIdx < End)
1437 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1438 /// during spilling.
1440 struct RewriteInfo {
1445 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1446 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1449 struct RewriteInfoCompare {
1450 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1451 return LHS.Index < RHS.Index;
1456 void LiveIntervals::
1457 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1458 LiveInterval::Ranges::const_iterator &I,
1459 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1460 unsigned Slot, int LdSlot,
1461 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1463 const TargetRegisterClass* rc,
1464 SmallVector<int, 4> &ReMatIds,
1465 const MachineLoopInfo *loopInfo,
1466 BitVector &SpillMBBs,
1467 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1468 BitVector &RestoreMBBs,
1469 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1470 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1471 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1472 bool AllCanFold = true;
1473 unsigned NewVReg = 0;
1474 unsigned start = getBaseIndex(I->start);
1475 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1477 // First collect all the def / use in this live range that will be rewritten.
1478 // Make sure they are sorted according to instruction index.
1479 std::vector<RewriteInfo> RewriteMIs;
1480 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1481 re = mri_->reg_end(); ri != re; ) {
1482 MachineInstr *MI = &*ri;
1483 MachineOperand &O = ri.getOperand();
1485 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1486 unsigned index = getInstructionIndex(MI);
1487 if (index < start || index >= end)
1489 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1490 // Must be defined by an implicit def. It should not be spilled. Note,
1491 // this is for correctness reason. e.g.
1492 // 8 %reg1024<def> = IMPLICIT_DEF
1493 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1494 // The live range [12, 14) are not part of the r1024 live interval since
1495 // it's defined by an implicit def. It will not conflicts with live
1496 // interval of r1025. Now suppose both registers are spilled, you can
1497 // easily see a situation where both registers are reloaded before
1498 // the INSERT_SUBREG and both target registers that would overlap.
1500 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1502 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1504 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1505 // Now rewrite the defs and uses.
1506 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1507 RewriteInfo &rwi = RewriteMIs[i];
1509 unsigned index = rwi.Index;
1510 bool MIHasUse = rwi.HasUse;
1511 bool MIHasDef = rwi.HasDef;
1512 MachineInstr *MI = rwi.MI;
1513 // If MI def and/or use the same register multiple times, then there
1514 // are multiple entries.
1515 unsigned NumUses = MIHasUse;
1516 while (i != e && RewriteMIs[i].MI == MI) {
1517 assert(RewriteMIs[i].Index == index);
1518 bool isUse = RewriteMIs[i].HasUse;
1519 if (isUse) ++NumUses;
1521 MIHasDef |= RewriteMIs[i].HasDef;
1524 MachineBasicBlock *MBB = MI->getParent();
1526 if (ImpUse && MI != ReMatDefMI) {
1527 // Re-matting an instruction with virtual register use. Update the
1528 // register interval's spill weight to HUGE_VALF to prevent it from
1530 LiveInterval &ImpLi = getInterval(ImpUse);
1531 ImpLi.weight = HUGE_VALF;
1534 unsigned MBBId = MBB->getNumber();
1535 unsigned ThisVReg = 0;
1537 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1538 if (NVI != MBBVRegsMap.end()) {
1539 ThisVReg = NVI->second;
1546 // It's better to start a new interval to avoid artifically
1547 // extend the new interval.
1548 if (MIHasDef && !MIHasUse) {
1549 MBBVRegsMap.erase(MBB->getNumber());
1555 bool IsNew = ThisVReg == 0;
1557 // This ends the previous live interval. If all of its def / use
1558 // can be folded, give it a low spill weight.
1559 if (NewVReg && TrySplit && AllCanFold) {
1560 LiveInterval &nI = getOrCreateInterval(NewVReg);
1567 bool HasDef = false;
1568 bool HasUse = false;
1569 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1570 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1571 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1572 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1573 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1574 if (!HasDef && !HasUse)
1577 AllCanFold &= CanFold;
1579 // Update weight of spill interval.
1580 LiveInterval &nI = getOrCreateInterval(NewVReg);
1582 // The spill weight is now infinity as it cannot be spilled again.
1583 nI.weight = HUGE_VALF;
1587 // Keep track of the last def and first use in each MBB.
1589 if (MI != ReMatOrigDefMI || !CanDelete) {
1590 bool HasKill = false;
1592 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1594 // If this is a two-address code, then this index starts a new VNInfo.
1595 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1597 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1599 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1600 SpillIdxes.find(MBBId);
1602 if (SII == SpillIdxes.end()) {
1603 std::vector<SRInfo> S;
1604 S.push_back(SRInfo(index, NewVReg, true));
1605 SpillIdxes.insert(std::make_pair(MBBId, S));
1606 } else if (SII->second.back().vreg != NewVReg) {
1607 SII->second.push_back(SRInfo(index, NewVReg, true));
1608 } else if ((int)index > SII->second.back().index) {
1609 // If there is an earlier def and this is a two-address
1610 // instruction, then it's not possible to fold the store (which
1611 // would also fold the load).
1612 SRInfo &Info = SII->second.back();
1614 Info.canFold = !HasUse;
1616 SpillMBBs.set(MBBId);
1617 } else if (SII != SpillIdxes.end() &&
1618 SII->second.back().vreg == NewVReg &&
1619 (int)index > SII->second.back().index) {
1620 // There is an earlier def that's not killed (must be two-address).
1621 // The spill is no longer needed.
1622 SII->second.pop_back();
1623 if (SII->second.empty()) {
1624 SpillIdxes.erase(MBBId);
1625 SpillMBBs.reset(MBBId);
1632 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1633 SpillIdxes.find(MBBId);
1634 if (SII != SpillIdxes.end() &&
1635 SII->second.back().vreg == NewVReg &&
1636 (int)index > SII->second.back().index)
1637 // Use(s) following the last def, it's not safe to fold the spill.
1638 SII->second.back().canFold = false;
1639 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1640 RestoreIdxes.find(MBBId);
1641 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1642 // If we are splitting live intervals, only fold if it's the first
1643 // use and there isn't another use later in the MBB.
1644 RII->second.back().canFold = false;
1646 // Only need a reload if there isn't an earlier def / use.
1647 if (RII == RestoreIdxes.end()) {
1648 std::vector<SRInfo> Infos;
1649 Infos.push_back(SRInfo(index, NewVReg, true));
1650 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1652 RII->second.push_back(SRInfo(index, NewVReg, true));
1654 RestoreMBBs.set(MBBId);
1658 // Update spill weight.
1659 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1660 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1663 if (NewVReg && TrySplit && AllCanFold) {
1664 // If all of its def / use can be folded, give it a low spill weight.
1665 LiveInterval &nI = getOrCreateInterval(NewVReg);
1670 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1671 BitVector &RestoreMBBs,
1672 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1673 if (!RestoreMBBs[Id])
1675 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1676 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1677 if (Restores[i].index == index &&
1678 Restores[i].vreg == vr &&
1679 Restores[i].canFold)
1684 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1685 BitVector &RestoreMBBs,
1686 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1687 if (!RestoreMBBs[Id])
1689 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1690 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1691 if (Restores[i].index == index && Restores[i].vreg)
1692 Restores[i].index = -1;
1695 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1696 /// spilled and create empty intervals for their uses.
1698 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1699 const TargetRegisterClass* rc,
1700 std::vector<LiveInterval*> &NewLIs) {
1701 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1702 re = mri_->reg_end(); ri != re; ) {
1703 MachineOperand &O = ri.getOperand();
1704 MachineInstr *MI = &*ri;
1707 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1708 "Register def was not rewritten?");
1709 RemoveMachineInstrFromMaps(MI);
1710 vrm.RemoveMachineInstrFromMaps(MI);
1711 MI->eraseFromParent();
1713 // This must be an use of an implicit_def so it's not part of the live
1714 // interval. Create a new empty live interval for it.
1715 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1716 unsigned NewVReg = mri_->createVirtualRegister(rc);
1718 vrm.setIsImplicitlyDefined(NewVReg);
1719 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1720 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1721 MachineOperand &MO = MI->getOperand(i);
1722 if (MO.isReg() && MO.getReg() == li.reg)
1731 bool operator()(LiveInterval* A, LiveInterval* B) {
1732 return A->beginNumber() < B->beginNumber();
1737 std::vector<LiveInterval*> LiveIntervals::
1738 addIntervalsForSpillsFast(const LiveInterval &li,
1739 const MachineLoopInfo *loopInfo,
1740 VirtRegMap &vrm, float& SSWeight) {
1741 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1743 std::vector<LiveInterval*> added;
1745 assert(li.weight != HUGE_VALF &&
1746 "attempt to spill already spilled interval!");
1748 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1752 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1756 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1757 while (RI != mri_->reg_end()) {
1758 MachineInstr* MI = &*RI;
1760 SmallVector<unsigned, 2> Indices;
1761 bool HasUse = false;
1762 bool HasDef = false;
1764 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1765 MachineOperand& mop = MI->getOperand(i);
1766 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1768 HasUse |= MI->getOperand(i).isUse();
1769 HasDef |= MI->getOperand(i).isDef();
1771 Indices.push_back(i);
1774 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1775 Indices, true, slot, li.reg)) {
1776 unsigned NewVReg = mri_->createVirtualRegister(rc);
1778 vrm.assignVirt2StackSlot(NewVReg, slot);
1780 // create a new register for this spill
1781 LiveInterval &nI = getOrCreateInterval(NewVReg);
1783 // the spill weight is now infinity as it
1784 // cannot be spilled again
1785 nI.weight = HUGE_VALF;
1787 // Rewrite register operands to use the new vreg.
1788 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1789 E = Indices.end(); I != E; ++I) {
1790 MI->getOperand(*I).setReg(NewVReg);
1792 if (MI->getOperand(*I).isUse())
1793 MI->getOperand(*I).setIsKill(true);
1796 // Fill in the new live interval.
1797 unsigned index = getInstructionIndex(MI);
1799 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1800 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1803 vrm.addRestorePoint(NewVReg, MI);
1806 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1807 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1810 vrm.addSpillPoint(NewVReg, true, MI);
1813 added.push_back(&nI);
1815 DOUT << "\t\t\t\tadded new interval: ";
1819 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1822 SSWeight += getSpillWeight(true, true, loopDepth);
1824 SSWeight += getSpillWeight(false, true, loopDepth);
1826 SSWeight += getSpillWeight(true, false, loopDepth);
1830 RI = mri_->reg_begin(li.reg);
1833 // Clients expect the new intervals to be returned in sorted order.
1834 std::sort(added.begin(), added.end(), LISorter());
1839 std::vector<LiveInterval*> LiveIntervals::
1840 addIntervalsForSpills(const LiveInterval &li,
1841 SmallVectorImpl<LiveInterval*> &SpillIs,
1842 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1845 if (EnableFastSpilling)
1846 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1848 assert(li.weight != HUGE_VALF &&
1849 "attempt to spill already spilled interval!");
1851 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1852 li.print(DOUT, tri_);
1855 // Spill slot weight.
1858 // Each bit specify whether a spill is required in the MBB.
1859 BitVector SpillMBBs(mf_->getNumBlockIDs());
1860 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1861 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1862 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1863 DenseMap<unsigned,unsigned> MBBVRegsMap;
1864 std::vector<LiveInterval*> NewLIs;
1865 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1867 unsigned NumValNums = li.getNumValNums();
1868 SmallVector<MachineInstr*, 4> ReMatDefs;
1869 ReMatDefs.resize(NumValNums, NULL);
1870 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1871 ReMatOrigDefs.resize(NumValNums, NULL);
1872 SmallVector<int, 4> ReMatIds;
1873 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1874 BitVector ReMatDelete(NumValNums);
1875 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1877 // Spilling a split live interval. It cannot be split any further. Also,
1878 // it's also guaranteed to be a single val# / range interval.
1879 if (vrm.getPreSplitReg(li.reg)) {
1880 vrm.setIsSplitFromReg(li.reg, 0);
1881 // Unset the split kill marker on the last use.
1882 unsigned KillIdx = vrm.getKillPoint(li.reg);
1884 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1885 assert(KillMI && "Last use disappeared?");
1886 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1887 assert(KillOp != -1 && "Last use disappeared?");
1888 KillMI->getOperand(KillOp).setIsKill(false);
1890 vrm.removeKillPoint(li.reg);
1891 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1892 Slot = vrm.getStackSlot(li.reg);
1893 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1894 MachineInstr *ReMatDefMI = DefIsReMat ?
1895 vrm.getReMaterializedMI(li.reg) : NULL;
1897 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1898 bool isLoad = isLoadSS ||
1899 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1900 bool IsFirstRange = true;
1901 for (LiveInterval::Ranges::const_iterator
1902 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1903 // If this is a split live interval with multiple ranges, it means there
1904 // are two-address instructions that re-defined the value. Only the
1905 // first def can be rematerialized!
1907 // Note ReMatOrigDefMI has already been deleted.
1908 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1909 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1910 false, vrm, rc, ReMatIds, loopInfo,
1911 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1912 MBBVRegsMap, NewLIs, SSWeight);
1914 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1915 Slot, 0, false, false, false,
1916 false, vrm, rc, ReMatIds, loopInfo,
1917 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1918 MBBVRegsMap, NewLIs, SSWeight);
1920 IsFirstRange = false;
1923 SSWeight = 0.0f; // Already accounted for when split.
1924 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1928 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1929 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1933 bool NeedStackSlot = false;
1934 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1936 const VNInfo *VNI = *i;
1937 unsigned VN = VNI->id;
1938 unsigned DefIdx = VNI->def;
1940 continue; // Dead val#.
1941 // Is the def for the val# rematerializable?
1942 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1943 ? 0 : getInstructionFromIndex(DefIdx);
1945 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1946 // Remember how to remat the def of this val#.
1947 ReMatOrigDefs[VN] = ReMatDefMI;
1948 // Original def may be modified so we have to make a copy here.
1949 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1950 ClonedMIs.push_back(Clone);
1951 ReMatDefs[VN] = Clone;
1953 bool CanDelete = true;
1954 if (VNI->hasPHIKill) {
1955 // A kill is a phi node, not all of its uses can be rematerialized.
1956 // It must not be deleted.
1958 // Need a stack slot if there is any live range where uses cannot be
1960 NeedStackSlot = true;
1963 ReMatDelete.set(VN);
1965 // Need a stack slot if there is any live range where uses cannot be
1967 NeedStackSlot = true;
1971 // One stack slot per live interval.
1972 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1973 Slot = vrm.assignVirt2StackSlot(li.reg);
1975 // Create new intervals and rewrite defs and uses.
1976 for (LiveInterval::Ranges::const_iterator
1977 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1978 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1979 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1980 bool DefIsReMat = ReMatDefMI != NULL;
1981 bool CanDelete = ReMatDelete[I->valno->id];
1983 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1984 bool isLoad = isLoadSS ||
1985 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1986 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1987 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1988 CanDelete, vrm, rc, ReMatIds, loopInfo,
1989 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1990 MBBVRegsMap, NewLIs, SSWeight);
1993 // Insert spills / restores if we are splitting.
1995 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1999 SmallPtrSet<LiveInterval*, 4> AddedKill;
2000 SmallVector<unsigned, 2> Ops;
2001 if (NeedStackSlot) {
2002 int Id = SpillMBBs.find_first();
2004 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2005 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2006 std::vector<SRInfo> &spills = SpillIdxes[Id];
2007 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2008 int index = spills[i].index;
2009 unsigned VReg = spills[i].vreg;
2010 LiveInterval &nI = getOrCreateInterval(VReg);
2011 bool isReMat = vrm.isReMaterialized(VReg);
2012 MachineInstr *MI = getInstructionFromIndex(index);
2013 bool CanFold = false;
2014 bool FoundUse = false;
2016 if (spills[i].canFold) {
2018 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2019 MachineOperand &MO = MI->getOperand(j);
2020 if (!MO.isReg() || MO.getReg() != VReg)
2027 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2028 RestoreMBBs, RestoreIdxes))) {
2029 // MI has two-address uses of the same register. If the use
2030 // isn't the first and only use in the BB, then we can't fold
2031 // it. FIXME: Move this to rewriteInstructionsForSpills.
2038 // Fold the store into the def if possible.
2039 bool Folded = false;
2040 if (CanFold && !Ops.empty()) {
2041 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2044 // Also folded uses, do not issue a load.
2045 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2046 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2048 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2052 // Otherwise tell the spiller to issue a spill.
2054 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2055 bool isKill = LR->end == getStoreIndex(index);
2056 if (!MI->registerDefIsDead(nI.reg))
2057 // No need to spill a dead def.
2058 vrm.addSpillPoint(VReg, isKill, MI);
2060 AddedKill.insert(&nI);
2063 // Update spill slot weight.
2065 SSWeight += getSpillWeight(true, false, loopDepth);
2067 Id = SpillMBBs.find_next(Id);
2071 int Id = RestoreMBBs.find_first();
2073 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2074 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2076 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2077 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2078 int index = restores[i].index;
2081 unsigned VReg = restores[i].vreg;
2082 LiveInterval &nI = getOrCreateInterval(VReg);
2083 bool isReMat = vrm.isReMaterialized(VReg);
2084 MachineInstr *MI = getInstructionFromIndex(index);
2085 bool CanFold = false;
2087 if (restores[i].canFold) {
2089 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2090 MachineOperand &MO = MI->getOperand(j);
2091 if (!MO.isReg() || MO.getReg() != VReg)
2095 // If this restore were to be folded, it would have been folded
2104 // Fold the load into the use if possible.
2105 bool Folded = false;
2106 if (CanFold && !Ops.empty()) {
2108 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2110 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2112 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2113 // If the rematerializable def is a load, also try to fold it.
2114 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2115 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2116 Ops, isLoadSS, LdSlot, VReg);
2118 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2120 // Re-matting an instruction with virtual register use. Add the
2121 // register as an implicit use on the use MI and update the register
2122 // interval's spill weight to HUGE_VALF to prevent it from being
2124 LiveInterval &ImpLi = getInterval(ImpUse);
2125 ImpLi.weight = HUGE_VALF;
2126 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2131 // If folding is not possible / failed, then tell the spiller to issue a
2132 // load / rematerialization for us.
2134 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2136 vrm.addRestorePoint(VReg, MI);
2138 // Update spill slot weight.
2140 SSWeight += getSpillWeight(false, true, loopDepth);
2142 Id = RestoreMBBs.find_next(Id);
2145 // Finalize intervals: add kills, finalize spill weights, and filter out
2147 std::vector<LiveInterval*> RetNewLIs;
2148 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2149 LiveInterval *LI = NewLIs[i];
2151 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2152 if (!AddedKill.count(LI)) {
2153 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2154 unsigned LastUseIdx = getBaseIndex(LR->end);
2155 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2156 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2157 assert(UseIdx != -1);
2158 if (LastUse->getOperand(UseIdx).isImplicit() ||
2159 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2160 LastUse->getOperand(UseIdx).setIsKill();
2161 vrm.addKillPoint(LI->reg, LastUseIdx);
2164 RetNewLIs.push_back(LI);
2168 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2172 /// hasAllocatableSuperReg - Return true if the specified physical register has
2173 /// any super register that's allocatable.
2174 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2175 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2176 if (allocatableRegs_[*AS] && hasInterval(*AS))
2181 /// getRepresentativeReg - Find the largest super register of the specified
2182 /// physical register.
2183 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2184 // Find the largest super-register that is allocatable.
2185 unsigned BestReg = Reg;
2186 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2187 unsigned SuperReg = *AS;
2188 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2196 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2197 /// specified interval that conflicts with the specified physical register.
2198 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2199 unsigned PhysReg) const {
2200 unsigned NumConflicts = 0;
2201 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2202 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2203 E = mri_->reg_end(); I != E; ++I) {
2204 MachineOperand &O = I.getOperand();
2205 MachineInstr *MI = O.getParent();
2206 unsigned Index = getInstructionIndex(MI);
2207 if (pli.liveAt(Index))
2210 return NumConflicts;
2213 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2214 /// around all defs and uses of the specified interval.
2215 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2216 unsigned PhysReg, VirtRegMap &vrm) {
2217 unsigned SpillReg = getRepresentativeReg(PhysReg);
2219 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2220 // If there are registers which alias PhysReg, but which are not a
2221 // sub-register of the chosen representative super register. Assert
2222 // since we can't handle it yet.
2223 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2224 tri_->isSuperRegister(*AS, SpillReg));
2226 LiveInterval &pli = getInterval(SpillReg);
2227 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2228 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2229 E = mri_->reg_end(); I != E; ++I) {
2230 MachineOperand &O = I.getOperand();
2231 MachineInstr *MI = O.getParent();
2232 if (SeenMIs.count(MI))
2235 unsigned Index = getInstructionIndex(MI);
2236 if (pli.liveAt(Index)) {
2237 vrm.addEmergencySpill(SpillReg, MI);
2238 unsigned StartIdx = getLoadIndex(Index);
2239 unsigned EndIdx = getStoreIndex(Index)+1;
2240 if (pli.isInOneLiveRange(StartIdx, EndIdx))
2241 pli.removeRange(StartIdx, EndIdx);
2243 cerr << "Ran out of registers during register allocation!\n";
2244 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2245 cerr << "Please check your inline asm statement for invalid "
2246 << "constraints:\n";
2247 MI->print(cerr.stream(), tm_);
2251 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2252 if (!hasInterval(*AS))
2254 LiveInterval &spli = getInterval(*AS);
2255 if (spli.liveAt(Index))
2256 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2262 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2263 MachineInstr* startInst) {
2264 LiveInterval& Interval = getOrCreateInterval(reg);
2265 VNInfo* VN = Interval.getNextValue(
2266 getInstructionIndex(startInst) + InstrSlots::DEF,
2267 startInst, getVNInfoAllocator());
2268 VN->hasPHIKill = true;
2269 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2270 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2271 getMBBEndIdx(startInst->getParent()) + 1, VN);
2272 Interval.addRange(LR);