1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
352 if (MO.isUse() && !CheckUse)
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
357 if (tri_->isSubRegister(Reg, PhysReg))
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404 // Earlyclobbers move back one.
405 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
407 assert(ValNo->id == 0 && "First value in interval is not 0?");
409 // Loop over all of the blocks that the vreg is defined in. There are
410 // two cases we have to handle here. The most common case is a vreg
411 // whose lifetime is contained within a basic block. In this case there
412 // will be a single kill, in MBB, which comes after the definition.
413 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
414 // FIXME: what about dead vars?
416 if (vi.Kills[0] != mi)
417 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419 killIdx = defIndex+1;
421 // If the kill happens after the definition, we have an intra-block
423 if (killIdx > defIndex) {
424 assert(vi.AliveBlocks.none() &&
425 "Shouldn't be alive across any blocks!");
426 LiveRange LR(defIndex, killIdx, ValNo);
427 interval.addRange(LR);
428 DOUT << " +" << LR << "\n";
429 interval.addKill(ValNo, killIdx);
434 // The other case we handle is when a virtual register lives to the end
435 // of the defining block, potentially live across some blocks, then is
436 // live into some number of blocks, but gets killed. Start by adding a
437 // range that goes from this definition to the end of the defining block.
438 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
439 DOUT << " +" << NewLR;
440 interval.addRange(NewLR);
442 // Iterate over all of the blocks that the variable is completely
443 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445 for (int i = vi.AliveBlocks.find_first(); i != -1;
446 i = vi.AliveBlocks.find_next(i)) {
447 LiveRange LR(getMBBStartIdx(i),
448 getMBBEndIdx(i)+1, // MBB ends at -1.
450 interval.addRange(LR);
454 // Finally, this virtual register is live from the start of any killing
455 // block to the 'use' slot of the killing instruction.
456 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
457 MachineInstr *Kill = vi.Kills[i];
458 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
459 LiveRange LR(getMBBStartIdx(Kill->getParent()),
461 interval.addRange(LR);
462 interval.addKill(ValNo, killIdx);
467 // If this is the second time we see a virtual register definition, it
468 // must be due to phi elimination or two addr elimination. If this is
469 // the result of two address elimination, then the vreg is one of the
470 // def-and-use register operand.
471 if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
472 // If this is a two-address definition, then we have already processed
473 // the live range. The only problem is that we didn't realize there
474 // are actually two values in the live interval. Because of this we
475 // need to take the LiveRegion that defines this register and split it
477 assert(interval.containsOneValue());
478 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
479 unsigned RedefIndex = getDefIndex(MIIdx);
480 // It cannot be an early clobber MO.
481 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
483 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
484 VNInfo *OldValNo = OldLR->valno;
486 // Delete the initial value, which should be short and continuous,
487 // because the 2-addr copy must be in the same MBB as the redef.
488 interval.removeRange(DefIndex, RedefIndex);
490 // Two-address vregs should always only be redefined once. This means
491 // that at this point, there should be exactly one value number in it.
492 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
494 // The new value number (#1) is defined by the instruction we claimed
496 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
499 // Value#0 is now defined by the 2-addr instruction.
500 OldValNo->def = RedefIndex;
503 // Add the new live interval which replaces the range for the input copy.
504 LiveRange LR(DefIndex, RedefIndex, ValNo);
505 DOUT << " replace range with " << LR;
506 interval.addRange(LR);
507 interval.addKill(ValNo, RedefIndex);
509 // If this redefinition is dead, we need to add a dummy unit live
510 // range covering the def slot.
512 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
515 interval.print(DOUT, tri_);
518 // Otherwise, this must be because of phi elimination. If this is the
519 // first redefinition of the vreg that we have seen, go back and change
520 // the live range in the PHI block to be a different value number.
521 if (interval.containsOneValue()) {
522 assert(vi.Kills.size() == 1 &&
523 "PHI elimination vreg should have one kill, the PHI itself!");
525 // Remove the old range that we now know has an incorrect number.
526 VNInfo *VNI = interval.getValNumInfo(0);
527 MachineInstr *Killer = vi.Kills[0];
528 unsigned Start = getMBBStartIdx(Killer->getParent());
529 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
530 DOUT << " Removing [" << Start << "," << End << "] from: ";
531 interval.print(DOUT, tri_); DOUT << "\n";
532 interval.removeRange(Start, End);
533 VNI->hasPHIKill = true;
534 DOUT << " RESULT: "; interval.print(DOUT, tri_);
536 // Replace the interval with one of a NEW value number. Note that this
537 // value number isn't actually defined by an instruction, weird huh? :)
538 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
539 DOUT << " replace range with " << LR;
540 interval.addRange(LR);
541 interval.addKill(LR.valno, End);
542 DOUT << " RESULT: "; interval.print(DOUT, tri_);
545 // In the case of PHI elimination, each variable definition is only
546 // live until the end of the block. We've already taken care of the
547 // rest of the live range.
548 unsigned defIndex = getDefIndex(MIIdx);
549 // It cannot be an early clobber MO.
550 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
553 MachineInstr *CopyMI = NULL;
554 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
555 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
556 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
557 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
559 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
561 unsigned killIndex = getMBBEndIdx(mbb) + 1;
562 LiveRange LR(defIndex, killIndex, ValNo);
563 interval.addRange(LR);
564 interval.addKill(ValNo, killIndex);
565 ValNo->hasPHIKill = true;
573 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
574 MachineBasicBlock::iterator mi,
577 LiveInterval &interval,
578 MachineInstr *CopyMI) {
579 // A physical register cannot be live across basic block, so its
580 // lifetime must end somewhere in its defining basic block.
581 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
583 unsigned baseIndex = MIIdx;
584 unsigned start = getDefIndex(baseIndex);
585 // Earlyclobbers move back one.
586 if (MO.isEarlyClobber())
587 start = getUseIndex(MIIdx);
588 unsigned end = start;
590 // If it is not used after definition, it is considered dead at
591 // the instruction defining it. Hence its interval is:
592 // [defSlot(def), defSlot(def)+1)
599 // If it is not dead on definition, it must be killed by a
600 // subsequent instruction. Hence its interval is:
601 // [defSlot(def), useSlot(kill)+1)
602 baseIndex += InstrSlots::NUM;
603 while (++mi != MBB->end()) {
604 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
605 getInstructionFromIndex(baseIndex) == 0)
606 baseIndex += InstrSlots::NUM;
607 if (mi->killsRegister(interval.reg, tri_)) {
609 end = getUseIndex(baseIndex) + 1;
611 } else if (mi->modifiesRegister(interval.reg, tri_)) {
612 // Another instruction redefines the register before it is ever read.
613 // Then the register is essentially dead at the instruction that defines
614 // it. Hence its interval is:
615 // [defSlot(def), defSlot(def)+1)
621 baseIndex += InstrSlots::NUM;
624 // The only case we should have a dead physreg here without a killing or
625 // instruction where we know it's dead is if it is live-in to the function
627 assert(!CopyMI && "physreg was not killed in defining block!");
631 assert(start < end && "did not find end of interval?");
633 // Already exists? Extend old live interval.
634 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
635 bool Extend = OldLR != interval.end();
636 VNInfo *ValNo = Extend
637 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
638 if (MO.isEarlyClobber() && Extend)
639 ValNo->redefByEC = true;
640 LiveRange LR(start, end, ValNo);
641 interval.addRange(LR);
642 interval.addKill(LR.valno, end);
643 DOUT << " +" << LR << '\n';
646 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
647 MachineBasicBlock::iterator MI,
651 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
652 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
653 getOrCreateInterval(MO.getReg()));
654 else if (allocatableRegs_[MO.getReg()]) {
655 MachineInstr *CopyMI = NULL;
656 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
657 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
658 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
659 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
661 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
662 getOrCreateInterval(MO.getReg()), CopyMI);
663 // Def of a register also defines its sub-registers.
664 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
665 // If MI also modifies the sub-register explicitly, avoid processing it
666 // more than once. Do not pass in TRI here so it checks for exact match.
667 if (!MI->modifiesRegister(*AS))
668 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
669 getOrCreateInterval(*AS), 0);
673 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
675 LiveInterval &interval, bool isAlias) {
676 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
678 // Look for kills, if it reaches a def before it's killed, then it shouldn't
679 // be considered a livein.
680 MachineBasicBlock::iterator mi = MBB->begin();
681 unsigned baseIndex = MIIdx;
682 unsigned start = baseIndex;
683 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
684 getInstructionFromIndex(baseIndex) == 0)
685 baseIndex += InstrSlots::NUM;
686 unsigned end = baseIndex;
687 bool SeenDefUse = false;
689 while (mi != MBB->end()) {
690 if (mi->killsRegister(interval.reg, tri_)) {
692 end = getUseIndex(baseIndex) + 1;
695 } else if (mi->modifiesRegister(interval.reg, tri_)) {
696 // Another instruction redefines the register before it is ever read.
697 // Then the register is essentially dead at the instruction that defines
698 // it. Hence its interval is:
699 // [defSlot(def), defSlot(def)+1)
701 end = getDefIndex(start) + 1;
706 baseIndex += InstrSlots::NUM;
708 if (mi != MBB->end()) {
709 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
710 getInstructionFromIndex(baseIndex) == 0)
711 baseIndex += InstrSlots::NUM;
716 // Live-in register might not be used at all.
720 end = getDefIndex(MIIdx) + 1;
722 DOUT << " live through";
727 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
728 interval.addRange(LR);
729 interval.addKill(LR.valno, end);
730 DOUT << " +" << LR << '\n';
733 /// computeIntervals - computes the live intervals for virtual
734 /// registers. for some ordering of the machine instructions [1,N] a
735 /// live interval is an interval [i, j) where 1 <= i <= j < N for
736 /// which a variable is live
737 void LiveIntervals::computeIntervals() {
739 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
740 << "********** Function: "
741 << ((Value*)mf_->getFunction())->getName() << '\n';
743 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
745 MachineBasicBlock *MBB = MBBI;
746 // Track the index of the current machine instr.
747 unsigned MIIndex = getMBBStartIdx(MBB);
748 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
750 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
752 // Create intervals for live-ins to this BB first.
753 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
754 LE = MBB->livein_end(); LI != LE; ++LI) {
755 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
756 // Multiple live-ins can alias the same register.
757 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
758 if (!hasInterval(*AS))
759 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
763 // Skip over empty initial indices.
764 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
765 getInstructionFromIndex(MIIndex) == 0)
766 MIIndex += InstrSlots::NUM;
768 for (; MI != miEnd; ++MI) {
769 DOUT << MIIndex << "\t" << *MI;
772 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
773 MachineOperand &MO = MI->getOperand(i);
774 // handle register defs - build intervals
775 if (MO.isReg() && MO.getReg() && MO.isDef()) {
776 handleRegisterDef(MBB, MI, MIIndex, MO, i);
780 // Skip over the empty slots after each instruction.
781 unsigned Slots = MI->getDesc().getNumDefs();
784 MIIndex += InstrSlots::NUM * Slots;
786 // Skip over empty indices.
787 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
788 getInstructionFromIndex(MIIndex) == 0)
789 MIIndex += InstrSlots::NUM;
794 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
795 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
796 std::vector<IdxMBBPair>::const_iterator I =
797 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
800 while (I != Idx2MBBMap.end()) {
803 MBBs.push_back(I->second);
810 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
811 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
812 std::vector<IdxMBBPair>::const_iterator I =
813 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
816 while (I != Idx2MBBMap.end()) {
819 MachineBasicBlock *MBB = I->second;
820 if (getMBBEndIdx(MBB) > End)
822 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
823 SE = MBB->succ_end(); SI != SE; ++SI)
831 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
832 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
833 return new LiveInterval(reg, Weight);
836 /// dupInterval - Duplicate a live interval. The caller is responsible for
837 /// managing the allocated memory.
838 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
839 LiveInterval *NewLI = createInterval(li->reg);
840 NewLI->Copy(*li, getVNInfoAllocator());
844 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
845 /// copy field and returns the source register that defines it.
846 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
850 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
851 // If it's extracting out of a physical register, return the sub-register.
852 unsigned Reg = VNI->copy->getOperand(1).getReg();
853 if (TargetRegisterInfo::isPhysicalRegister(Reg))
854 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
856 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
857 return VNI->copy->getOperand(2).getReg();
859 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
860 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
862 assert(0 && "Unrecognized copy instruction!");
866 //===----------------------------------------------------------------------===//
867 // Register allocator hooks.
870 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
871 /// allow one) virtual register operand, then its uses are implicitly using
872 /// the register. Returns the virtual register.
873 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
874 MachineInstr *MI) const {
876 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
877 MachineOperand &MO = MI->getOperand(i);
878 if (!MO.isReg() || !MO.isUse())
880 unsigned Reg = MO.getReg();
881 if (Reg == 0 || Reg == li.reg)
883 // FIXME: For now, only remat MI with at most one register operand.
885 "Can't rematerialize instruction with multiple register operand!");
894 /// isValNoAvailableAt - Return true if the val# of the specified interval
895 /// which reaches the given instruction also reaches the specified use index.
896 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
897 unsigned UseIdx) const {
898 unsigned Index = getInstructionIndex(MI);
899 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
900 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
901 return UI != li.end() && UI->valno == ValNo;
904 /// isReMaterializable - Returns true if the definition MI of the specified
905 /// val# of the specified interval is re-materializable.
906 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
907 const VNInfo *ValNo, MachineInstr *MI,
908 SmallVectorImpl<LiveInterval*> &SpillIs,
913 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
917 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
918 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
919 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
920 // this but remember this is not safe to fold into a two-address
922 // This is a load from fixed stack slot. It can be rematerialized.
925 // If the target-specific rules don't identify an instruction as
926 // being trivially rematerializable, use some target-independent
928 if (!MI->getDesc().isRematerializable() ||
929 !tii_->isTriviallyReMaterializable(MI)) {
930 if (!EnableAggressiveRemat)
933 // If the instruction accesses memory but the memoperands have been lost,
934 // we can't analyze it.
935 const TargetInstrDesc &TID = MI->getDesc();
936 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
939 // Avoid instructions obviously unsafe for remat.
940 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
943 // If the instruction accesses memory and the memory could be non-constant,
944 // assume the instruction is not rematerializable.
945 for (std::list<MachineMemOperand>::const_iterator
946 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
947 const MachineMemOperand &MMO = *I;
948 if (MMO.isVolatile() || MMO.isStore())
950 const Value *V = MMO.getValue();
953 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
954 if (!PSV->isConstant(mf_->getFrameInfo()))
956 } else if (!aa_->pointsToConstantMemory(V))
960 // If any of the registers accessed are non-constant, conservatively assume
961 // the instruction is not rematerializable.
963 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
964 const MachineOperand &MO = MI->getOperand(i);
966 unsigned Reg = MO.getReg();
969 if (TargetRegisterInfo::isPhysicalRegister(Reg))
972 // Only allow one def, and that in the first operand.
973 if (MO.isDef() != (i == 0))
976 // Only allow constant-valued registers.
977 bool IsLiveIn = mri_->isLiveIn(Reg);
978 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
981 // For the def, it should be the only def of that register.
982 if (MO.isDef() && (next(I) != E || IsLiveIn))
986 // Only allow one use other register use, as that's all the
987 // remat mechanisms support currently.
991 else if (Reg != ImpUse)
994 // For the use, there should be only one associated def.
995 if (I != E && (next(I) != E || IsLiveIn))
1002 unsigned ImpUse = getReMatImplicitUse(li, MI);
1004 const LiveInterval &ImpLi = getInterval(ImpUse);
1005 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1006 re = mri_->use_end(); ri != re; ++ri) {
1007 MachineInstr *UseMI = &*ri;
1008 unsigned UseIdx = getInstructionIndex(UseMI);
1009 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1011 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1015 // If a register operand of the re-materialized instruction is going to
1016 // be spilled next, then it's not legal to re-materialize this instruction.
1017 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1018 if (ImpUse == SpillIs[i]->reg)
1024 /// isReMaterializable - Returns true if the definition MI of the specified
1025 /// val# of the specified interval is re-materializable.
1026 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1027 const VNInfo *ValNo, MachineInstr *MI) {
1028 SmallVector<LiveInterval*, 4> Dummy1;
1030 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1033 /// isReMaterializable - Returns true if every definition of MI of every
1034 /// val# of the specified interval is re-materializable.
1035 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1036 SmallVectorImpl<LiveInterval*> &SpillIs,
1039 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1041 const VNInfo *VNI = *i;
1042 unsigned DefIdx = VNI->def;
1044 continue; // Dead val#.
1045 // Is the def for the val# rematerializable?
1048 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1049 bool DefIsLoad = false;
1051 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1053 isLoad |= DefIsLoad;
1058 /// FilterFoldedOps - Filter out two-address use operands. Return
1059 /// true if it finds any issue with the operands that ought to prevent
1061 static bool FilterFoldedOps(MachineInstr *MI,
1062 SmallVector<unsigned, 2> &Ops,
1064 SmallVector<unsigned, 2> &FoldOps) {
1066 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1067 unsigned OpIdx = Ops[i];
1068 MachineOperand &MO = MI->getOperand(OpIdx);
1069 // FIXME: fold subreg use.
1073 MRInfo |= (unsigned)VirtRegMap::isMod;
1075 // Filter out two-address use operand(s).
1076 if (MI->isRegTiedToDefOperand(OpIdx)) {
1077 MRInfo = VirtRegMap::isModRef;
1080 MRInfo |= (unsigned)VirtRegMap::isRef;
1082 FoldOps.push_back(OpIdx);
1088 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1089 /// slot / to reg or any rematerialized load into ith operand of specified
1090 /// MI. If it is successul, MI is updated with the newly created MI and
1092 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1093 VirtRegMap &vrm, MachineInstr *DefMI,
1095 SmallVector<unsigned, 2> &Ops,
1096 bool isSS, int Slot, unsigned Reg) {
1097 // If it is an implicit def instruction, just delete it.
1098 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1099 RemoveMachineInstrFromMaps(MI);
1100 vrm.RemoveMachineInstrFromMaps(MI);
1101 MI->eraseFromParent();
1106 // Filter the list of operand indexes that are to be folded. Abort if
1107 // any operand will prevent folding.
1108 unsigned MRInfo = 0;
1109 SmallVector<unsigned, 2> FoldOps;
1110 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1113 // The only time it's safe to fold into a two address instruction is when
1114 // it's folding reload and spill from / into a spill stack slot.
1115 if (DefMI && (MRInfo & VirtRegMap::isMod))
1118 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1119 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1121 // Remember this instruction uses the spill slot.
1122 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1124 // Attempt to fold the memory reference into the instruction. If
1125 // we can do this, we don't need to insert spill code.
1126 MachineBasicBlock &MBB = *MI->getParent();
1127 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1128 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1129 vrm.transferSpillPts(MI, fmi);
1130 vrm.transferRestorePts(MI, fmi);
1131 vrm.transferEmergencySpills(MI, fmi);
1133 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1134 mi2iMap_[fmi] = InstrIdx;
1135 MI = MBB.insert(MBB.erase(MI), fmi);
1142 /// canFoldMemoryOperand - Returns true if the specified load / store
1143 /// folding is possible.
1144 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1145 SmallVector<unsigned, 2> &Ops,
1147 // Filter the list of operand indexes that are to be folded. Abort if
1148 // any operand will prevent folding.
1149 unsigned MRInfo = 0;
1150 SmallVector<unsigned, 2> FoldOps;
1151 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1154 // It's only legal to remat for a use, not a def.
1155 if (ReMat && (MRInfo & VirtRegMap::isMod))
1158 return tii_->canFoldMemoryOperand(MI, FoldOps);
1161 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1162 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1163 for (LiveInterval::Ranges::const_iterator
1164 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1165 std::vector<IdxMBBPair>::const_iterator II =
1166 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1167 if (II == Idx2MBBMap.end())
1169 if (I->end > II->first) // crossing a MBB.
1171 MBBs.insert(II->second);
1172 if (MBBs.size() > 1)
1178 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1179 /// interval on to-be re-materialized operands of MI) with new register.
1180 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1181 MachineInstr *MI, unsigned NewVReg,
1183 // There is an implicit use. That means one of the other operand is
1184 // being remat'ed and the remat'ed instruction has li.reg as an
1185 // use operand. Make sure we rewrite that as well.
1186 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1187 MachineOperand &MO = MI->getOperand(i);
1190 unsigned Reg = MO.getReg();
1191 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1193 if (!vrm.isReMaterialized(Reg))
1195 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1196 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1198 UseMO->setReg(NewVReg);
1202 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1203 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1204 bool LiveIntervals::
1205 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1206 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1207 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1208 unsigned Slot, int LdSlot,
1209 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1211 const TargetRegisterClass* rc,
1212 SmallVector<int, 4> &ReMatIds,
1213 const MachineLoopInfo *loopInfo,
1214 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1215 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1216 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1217 MachineBasicBlock *MBB = MI->getParent();
1218 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1219 bool CanFold = false;
1221 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1222 MachineOperand& mop = MI->getOperand(i);
1225 unsigned Reg = mop.getReg();
1226 unsigned RegI = Reg;
1227 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1232 bool TryFold = !DefIsReMat;
1233 bool FoldSS = true; // Default behavior unless it's a remat.
1234 int FoldSlot = Slot;
1236 // If this is the rematerializable definition MI itself and
1237 // all of its uses are rematerialized, simply delete it.
1238 if (MI == ReMatOrigDefMI && CanDelete) {
1239 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1241 RemoveMachineInstrFromMaps(MI);
1242 vrm.RemoveMachineInstrFromMaps(MI);
1243 MI->eraseFromParent();
1247 // If def for this use can't be rematerialized, then try folding.
1248 // If def is rematerializable and it's a load, also try folding.
1249 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1251 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1257 // Scan all of the operands of this instruction rewriting operands
1258 // to use NewVReg instead of li.reg as appropriate. We do this for
1261 // 1. If the instr reads the same spilled vreg multiple times, we
1262 // want to reuse the NewVReg.
1263 // 2. If the instr is a two-addr instruction, we are required to
1264 // keep the src/dst regs pinned.
1266 // Keep track of whether we replace a use and/or def so that we can
1267 // create the spill interval with the appropriate range.
1269 HasUse = mop.isUse();
1270 HasDef = mop.isDef();
1271 SmallVector<unsigned, 2> Ops;
1273 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1274 const MachineOperand &MOj = MI->getOperand(j);
1277 unsigned RegJ = MOj.getReg();
1278 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1282 HasUse |= MOj.isUse();
1283 HasDef |= MOj.isDef();
1287 if (HasUse && !li.liveAt(getUseIndex(index)))
1288 // Must be defined by an implicit def. It should not be spilled. Note,
1289 // this is for correctness reason. e.g.
1290 // 8 %reg1024<def> = IMPLICIT_DEF
1291 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1292 // The live range [12, 14) are not part of the r1024 live interval since
1293 // it's defined by an implicit def. It will not conflicts with live
1294 // interval of r1025. Now suppose both registers are spilled, you can
1295 // easily see a situation where both registers are reloaded before
1296 // the INSERT_SUBREG and both target registers that would overlap.
1299 // Update stack slot spill weight if we are splitting.
1300 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1304 // Create a new virtual register for the spill interval.
1305 // Create the new register now so we can map the fold instruction
1306 // to the new register so when it is unfolded we get the correct
1308 bool CreatedNewVReg = false;
1310 NewVReg = mri_->createVirtualRegister(rc);
1312 CreatedNewVReg = true;
1318 // Do not fold load / store here if we are splitting. We'll find an
1319 // optimal point to insert a load / store later.
1321 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1322 Ops, FoldSS, FoldSlot, NewVReg)) {
1323 // Folding the load/store can completely change the instruction in
1324 // unpredictable ways, rescan it from the beginning.
1327 // We need to give the new vreg the same stack slot as the
1328 // spilled interval.
1329 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1335 if (isRemoved(MI)) {
1339 goto RestartInstruction;
1342 // We'll try to fold it later if it's profitable.
1343 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1347 mop.setReg(NewVReg);
1348 if (mop.isImplicit())
1349 rewriteImplicitOps(li, MI, NewVReg, vrm);
1351 // Reuse NewVReg for other reads.
1352 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1353 MachineOperand &mopj = MI->getOperand(Ops[j]);
1354 mopj.setReg(NewVReg);
1355 if (mopj.isImplicit())
1356 rewriteImplicitOps(li, MI, NewVReg, vrm);
1359 if (CreatedNewVReg) {
1361 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1362 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1363 // Each valnum may have its own remat id.
1364 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1366 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1368 if (!CanDelete || (HasUse && HasDef)) {
1369 // If this is a two-addr instruction then its use operands are
1370 // rematerializable but its def is not. It should be assigned a
1372 vrm.assignVirt2StackSlot(NewVReg, Slot);
1375 vrm.assignVirt2StackSlot(NewVReg, Slot);
1377 } else if (HasUse && HasDef &&
1378 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1379 // If this interval hasn't been assigned a stack slot (because earlier
1380 // def is a deleted remat def), do it now.
1381 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1382 vrm.assignVirt2StackSlot(NewVReg, Slot);
1385 // Re-matting an instruction with virtual register use. Add the
1386 // register as an implicit use on the use MI.
1387 if (DefIsReMat && ImpUse)
1388 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1390 // create a new register interval for this spill / remat.
1391 LiveInterval &nI = getOrCreateInterval(NewVReg);
1392 if (CreatedNewVReg) {
1393 NewLIs.push_back(&nI);
1394 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1396 vrm.setIsSplitFromReg(NewVReg, li.reg);
1400 if (CreatedNewVReg) {
1401 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1402 nI.getNextValue(~0U, 0, VNInfoAllocator));
1406 // Extend the split live interval to this def / use.
1407 unsigned End = getUseIndex(index)+1;
1408 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1409 nI.getValNumInfo(nI.getNumValNums()-1));
1415 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1416 nI.getNextValue(~0U, 0, VNInfoAllocator));
1421 DOUT << "\t\t\t\tAdded new interval: ";
1422 nI.print(DOUT, tri_);
1427 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1429 MachineBasicBlock *MBB, unsigned Idx) const {
1430 unsigned End = getMBBEndIdx(MBB);
1431 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1432 unsigned KillIdx = VNI->kills[j];
1433 if (KillIdx > Idx && KillIdx < End)
1439 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1440 /// during spilling.
1442 struct RewriteInfo {
1447 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1448 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1451 struct RewriteInfoCompare {
1452 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1453 return LHS.Index < RHS.Index;
1458 void LiveIntervals::
1459 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1460 LiveInterval::Ranges::const_iterator &I,
1461 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1462 unsigned Slot, int LdSlot,
1463 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1465 const TargetRegisterClass* rc,
1466 SmallVector<int, 4> &ReMatIds,
1467 const MachineLoopInfo *loopInfo,
1468 BitVector &SpillMBBs,
1469 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1470 BitVector &RestoreMBBs,
1471 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1472 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1473 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1474 bool AllCanFold = true;
1475 unsigned NewVReg = 0;
1476 unsigned start = getBaseIndex(I->start);
1477 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1479 // First collect all the def / use in this live range that will be rewritten.
1480 // Make sure they are sorted according to instruction index.
1481 std::vector<RewriteInfo> RewriteMIs;
1482 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1483 re = mri_->reg_end(); ri != re; ) {
1484 MachineInstr *MI = &*ri;
1485 MachineOperand &O = ri.getOperand();
1487 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1488 unsigned index = getInstructionIndex(MI);
1489 if (index < start || index >= end)
1491 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1492 // Must be defined by an implicit def. It should not be spilled. Note,
1493 // this is for correctness reason. e.g.
1494 // 8 %reg1024<def> = IMPLICIT_DEF
1495 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1496 // The live range [12, 14) are not part of the r1024 live interval since
1497 // it's defined by an implicit def. It will not conflicts with live
1498 // interval of r1025. Now suppose both registers are spilled, you can
1499 // easily see a situation where both registers are reloaded before
1500 // the INSERT_SUBREG and both target registers that would overlap.
1502 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1504 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1506 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1507 // Now rewrite the defs and uses.
1508 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1509 RewriteInfo &rwi = RewriteMIs[i];
1511 unsigned index = rwi.Index;
1512 bool MIHasUse = rwi.HasUse;
1513 bool MIHasDef = rwi.HasDef;
1514 MachineInstr *MI = rwi.MI;
1515 // If MI def and/or use the same register multiple times, then there
1516 // are multiple entries.
1517 unsigned NumUses = MIHasUse;
1518 while (i != e && RewriteMIs[i].MI == MI) {
1519 assert(RewriteMIs[i].Index == index);
1520 bool isUse = RewriteMIs[i].HasUse;
1521 if (isUse) ++NumUses;
1523 MIHasDef |= RewriteMIs[i].HasDef;
1526 MachineBasicBlock *MBB = MI->getParent();
1528 if (ImpUse && MI != ReMatDefMI) {
1529 // Re-matting an instruction with virtual register use. Update the
1530 // register interval's spill weight to HUGE_VALF to prevent it from
1532 LiveInterval &ImpLi = getInterval(ImpUse);
1533 ImpLi.weight = HUGE_VALF;
1536 unsigned MBBId = MBB->getNumber();
1537 unsigned ThisVReg = 0;
1539 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1540 if (NVI != MBBVRegsMap.end()) {
1541 ThisVReg = NVI->second;
1548 // It's better to start a new interval to avoid artifically
1549 // extend the new interval.
1550 if (MIHasDef && !MIHasUse) {
1551 MBBVRegsMap.erase(MBB->getNumber());
1557 bool IsNew = ThisVReg == 0;
1559 // This ends the previous live interval. If all of its def / use
1560 // can be folded, give it a low spill weight.
1561 if (NewVReg && TrySplit && AllCanFold) {
1562 LiveInterval &nI = getOrCreateInterval(NewVReg);
1569 bool HasDef = false;
1570 bool HasUse = false;
1571 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1572 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1573 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1574 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1575 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1576 if (!HasDef && !HasUse)
1579 AllCanFold &= CanFold;
1581 // Update weight of spill interval.
1582 LiveInterval &nI = getOrCreateInterval(NewVReg);
1584 // The spill weight is now infinity as it cannot be spilled again.
1585 nI.weight = HUGE_VALF;
1589 // Keep track of the last def and first use in each MBB.
1591 if (MI != ReMatOrigDefMI || !CanDelete) {
1592 bool HasKill = false;
1594 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1596 // If this is a two-address code, then this index starts a new VNInfo.
1597 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1599 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1601 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1602 SpillIdxes.find(MBBId);
1604 if (SII == SpillIdxes.end()) {
1605 std::vector<SRInfo> S;
1606 S.push_back(SRInfo(index, NewVReg, true));
1607 SpillIdxes.insert(std::make_pair(MBBId, S));
1608 } else if (SII->second.back().vreg != NewVReg) {
1609 SII->second.push_back(SRInfo(index, NewVReg, true));
1610 } else if ((int)index > SII->second.back().index) {
1611 // If there is an earlier def and this is a two-address
1612 // instruction, then it's not possible to fold the store (which
1613 // would also fold the load).
1614 SRInfo &Info = SII->second.back();
1616 Info.canFold = !HasUse;
1618 SpillMBBs.set(MBBId);
1619 } else if (SII != SpillIdxes.end() &&
1620 SII->second.back().vreg == NewVReg &&
1621 (int)index > SII->second.back().index) {
1622 // There is an earlier def that's not killed (must be two-address).
1623 // The spill is no longer needed.
1624 SII->second.pop_back();
1625 if (SII->second.empty()) {
1626 SpillIdxes.erase(MBBId);
1627 SpillMBBs.reset(MBBId);
1634 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1635 SpillIdxes.find(MBBId);
1636 if (SII != SpillIdxes.end() &&
1637 SII->second.back().vreg == NewVReg &&
1638 (int)index > SII->second.back().index)
1639 // Use(s) following the last def, it's not safe to fold the spill.
1640 SII->second.back().canFold = false;
1641 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1642 RestoreIdxes.find(MBBId);
1643 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1644 // If we are splitting live intervals, only fold if it's the first
1645 // use and there isn't another use later in the MBB.
1646 RII->second.back().canFold = false;
1648 // Only need a reload if there isn't an earlier def / use.
1649 if (RII == RestoreIdxes.end()) {
1650 std::vector<SRInfo> Infos;
1651 Infos.push_back(SRInfo(index, NewVReg, true));
1652 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1654 RII->second.push_back(SRInfo(index, NewVReg, true));
1656 RestoreMBBs.set(MBBId);
1660 // Update spill weight.
1661 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1662 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1665 if (NewVReg && TrySplit && AllCanFold) {
1666 // If all of its def / use can be folded, give it a low spill weight.
1667 LiveInterval &nI = getOrCreateInterval(NewVReg);
1672 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1673 BitVector &RestoreMBBs,
1674 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1675 if (!RestoreMBBs[Id])
1677 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1678 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1679 if (Restores[i].index == index &&
1680 Restores[i].vreg == vr &&
1681 Restores[i].canFold)
1686 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1687 BitVector &RestoreMBBs,
1688 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1689 if (!RestoreMBBs[Id])
1691 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1692 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1693 if (Restores[i].index == index && Restores[i].vreg)
1694 Restores[i].index = -1;
1697 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1698 /// spilled and create empty intervals for their uses.
1700 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1701 const TargetRegisterClass* rc,
1702 std::vector<LiveInterval*> &NewLIs) {
1703 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1704 re = mri_->reg_end(); ri != re; ) {
1705 MachineOperand &O = ri.getOperand();
1706 MachineInstr *MI = &*ri;
1709 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1710 "Register def was not rewritten?");
1711 RemoveMachineInstrFromMaps(MI);
1712 vrm.RemoveMachineInstrFromMaps(MI);
1713 MI->eraseFromParent();
1715 // This must be an use of an implicit_def so it's not part of the live
1716 // interval. Create a new empty live interval for it.
1717 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1718 unsigned NewVReg = mri_->createVirtualRegister(rc);
1720 vrm.setIsImplicitlyDefined(NewVReg);
1721 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1722 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1723 MachineOperand &MO = MI->getOperand(i);
1724 if (MO.isReg() && MO.getReg() == li.reg)
1733 bool operator()(LiveInterval* A, LiveInterval* B) {
1734 return A->beginNumber() < B->beginNumber();
1739 std::vector<LiveInterval*> LiveIntervals::
1740 addIntervalsForSpillsFast(const LiveInterval &li,
1741 const MachineLoopInfo *loopInfo,
1742 VirtRegMap &vrm, float& SSWeight) {
1743 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1745 std::vector<LiveInterval*> added;
1747 assert(li.weight != HUGE_VALF &&
1748 "attempt to spill already spilled interval!");
1750 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1754 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1758 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1759 while (RI != mri_->reg_end()) {
1760 MachineInstr* MI = &*RI;
1762 SmallVector<unsigned, 2> Indices;
1763 bool HasUse = false;
1764 bool HasDef = false;
1766 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1767 MachineOperand& mop = MI->getOperand(i);
1768 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1770 HasUse |= MI->getOperand(i).isUse();
1771 HasDef |= MI->getOperand(i).isDef();
1773 Indices.push_back(i);
1776 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1777 Indices, true, slot, li.reg)) {
1778 unsigned NewVReg = mri_->createVirtualRegister(rc);
1780 vrm.assignVirt2StackSlot(NewVReg, slot);
1782 // create a new register for this spill
1783 LiveInterval &nI = getOrCreateInterval(NewVReg);
1785 // the spill weight is now infinity as it
1786 // cannot be spilled again
1787 nI.weight = HUGE_VALF;
1789 // Rewrite register operands to use the new vreg.
1790 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1791 E = Indices.end(); I != E; ++I) {
1792 MI->getOperand(*I).setReg(NewVReg);
1794 if (MI->getOperand(*I).isUse())
1795 MI->getOperand(*I).setIsKill(true);
1798 // Fill in the new live interval.
1799 unsigned index = getInstructionIndex(MI);
1801 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1802 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1805 vrm.addRestorePoint(NewVReg, MI);
1808 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1809 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1812 vrm.addSpillPoint(NewVReg, true, MI);
1815 added.push_back(&nI);
1817 DOUT << "\t\t\t\tadded new interval: ";
1821 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1824 SSWeight += getSpillWeight(true, true, loopDepth);
1826 SSWeight += getSpillWeight(false, true, loopDepth);
1828 SSWeight += getSpillWeight(true, false, loopDepth);
1832 RI = mri_->reg_begin(li.reg);
1835 // Clients expect the new intervals to be returned in sorted order.
1836 std::sort(added.begin(), added.end(), LISorter());
1841 std::vector<LiveInterval*> LiveIntervals::
1842 addIntervalsForSpills(const LiveInterval &li,
1843 SmallVectorImpl<LiveInterval*> &SpillIs,
1844 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1847 if (EnableFastSpilling)
1848 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1850 assert(li.weight != HUGE_VALF &&
1851 "attempt to spill already spilled interval!");
1853 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1854 li.print(DOUT, tri_);
1857 // Spill slot weight.
1860 // Each bit specify whether a spill is required in the MBB.
1861 BitVector SpillMBBs(mf_->getNumBlockIDs());
1862 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1863 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1864 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1865 DenseMap<unsigned,unsigned> MBBVRegsMap;
1866 std::vector<LiveInterval*> NewLIs;
1867 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1869 unsigned NumValNums = li.getNumValNums();
1870 SmallVector<MachineInstr*, 4> ReMatDefs;
1871 ReMatDefs.resize(NumValNums, NULL);
1872 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1873 ReMatOrigDefs.resize(NumValNums, NULL);
1874 SmallVector<int, 4> ReMatIds;
1875 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1876 BitVector ReMatDelete(NumValNums);
1877 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1879 // Spilling a split live interval. It cannot be split any further. Also,
1880 // it's also guaranteed to be a single val# / range interval.
1881 if (vrm.getPreSplitReg(li.reg)) {
1882 vrm.setIsSplitFromReg(li.reg, 0);
1883 // Unset the split kill marker on the last use.
1884 unsigned KillIdx = vrm.getKillPoint(li.reg);
1886 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1887 assert(KillMI && "Last use disappeared?");
1888 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1889 assert(KillOp != -1 && "Last use disappeared?");
1890 KillMI->getOperand(KillOp).setIsKill(false);
1892 vrm.removeKillPoint(li.reg);
1893 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1894 Slot = vrm.getStackSlot(li.reg);
1895 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1896 MachineInstr *ReMatDefMI = DefIsReMat ?
1897 vrm.getReMaterializedMI(li.reg) : NULL;
1899 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1900 bool isLoad = isLoadSS ||
1901 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1902 bool IsFirstRange = true;
1903 for (LiveInterval::Ranges::const_iterator
1904 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1905 // If this is a split live interval with multiple ranges, it means there
1906 // are two-address instructions that re-defined the value. Only the
1907 // first def can be rematerialized!
1909 // Note ReMatOrigDefMI has already been deleted.
1910 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1911 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1912 false, vrm, rc, ReMatIds, loopInfo,
1913 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1914 MBBVRegsMap, NewLIs, SSWeight);
1916 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1917 Slot, 0, false, false, false,
1918 false, vrm, rc, ReMatIds, loopInfo,
1919 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1920 MBBVRegsMap, NewLIs, SSWeight);
1922 IsFirstRange = false;
1925 SSWeight = 0.0f; // Already accounted for when split.
1926 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1930 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1931 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1935 bool NeedStackSlot = false;
1936 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1938 const VNInfo *VNI = *i;
1939 unsigned VN = VNI->id;
1940 unsigned DefIdx = VNI->def;
1942 continue; // Dead val#.
1943 // Is the def for the val# rematerializable?
1944 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1945 ? 0 : getInstructionFromIndex(DefIdx);
1947 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1948 // Remember how to remat the def of this val#.
1949 ReMatOrigDefs[VN] = ReMatDefMI;
1950 // Original def may be modified so we have to make a copy here.
1951 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1952 ClonedMIs.push_back(Clone);
1953 ReMatDefs[VN] = Clone;
1955 bool CanDelete = true;
1956 if (VNI->hasPHIKill) {
1957 // A kill is a phi node, not all of its uses can be rematerialized.
1958 // It must not be deleted.
1960 // Need a stack slot if there is any live range where uses cannot be
1962 NeedStackSlot = true;
1965 ReMatDelete.set(VN);
1967 // Need a stack slot if there is any live range where uses cannot be
1969 NeedStackSlot = true;
1973 // One stack slot per live interval.
1974 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1975 Slot = vrm.assignVirt2StackSlot(li.reg);
1977 // Create new intervals and rewrite defs and uses.
1978 for (LiveInterval::Ranges::const_iterator
1979 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1980 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1981 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1982 bool DefIsReMat = ReMatDefMI != NULL;
1983 bool CanDelete = ReMatDelete[I->valno->id];
1985 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1986 bool isLoad = isLoadSS ||
1987 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1988 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1989 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1990 CanDelete, vrm, rc, ReMatIds, loopInfo,
1991 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1992 MBBVRegsMap, NewLIs, SSWeight);
1995 // Insert spills / restores if we are splitting.
1997 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2001 SmallPtrSet<LiveInterval*, 4> AddedKill;
2002 SmallVector<unsigned, 2> Ops;
2003 if (NeedStackSlot) {
2004 int Id = SpillMBBs.find_first();
2006 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2007 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2008 std::vector<SRInfo> &spills = SpillIdxes[Id];
2009 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2010 int index = spills[i].index;
2011 unsigned VReg = spills[i].vreg;
2012 LiveInterval &nI = getOrCreateInterval(VReg);
2013 bool isReMat = vrm.isReMaterialized(VReg);
2014 MachineInstr *MI = getInstructionFromIndex(index);
2015 bool CanFold = false;
2016 bool FoundUse = false;
2018 if (spills[i].canFold) {
2020 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2021 MachineOperand &MO = MI->getOperand(j);
2022 if (!MO.isReg() || MO.getReg() != VReg)
2029 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2030 RestoreMBBs, RestoreIdxes))) {
2031 // MI has two-address uses of the same register. If the use
2032 // isn't the first and only use in the BB, then we can't fold
2033 // it. FIXME: Move this to rewriteInstructionsForSpills.
2040 // Fold the store into the def if possible.
2041 bool Folded = false;
2042 if (CanFold && !Ops.empty()) {
2043 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2046 // Also folded uses, do not issue a load.
2047 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2048 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2050 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2054 // Otherwise tell the spiller to issue a spill.
2056 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2057 bool isKill = LR->end == getStoreIndex(index);
2058 if (!MI->registerDefIsDead(nI.reg))
2059 // No need to spill a dead def.
2060 vrm.addSpillPoint(VReg, isKill, MI);
2062 AddedKill.insert(&nI);
2065 // Update spill slot weight.
2067 SSWeight += getSpillWeight(true, false, loopDepth);
2069 Id = SpillMBBs.find_next(Id);
2073 int Id = RestoreMBBs.find_first();
2075 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2076 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2078 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2079 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2080 int index = restores[i].index;
2083 unsigned VReg = restores[i].vreg;
2084 LiveInterval &nI = getOrCreateInterval(VReg);
2085 bool isReMat = vrm.isReMaterialized(VReg);
2086 MachineInstr *MI = getInstructionFromIndex(index);
2087 bool CanFold = false;
2089 if (restores[i].canFold) {
2091 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2092 MachineOperand &MO = MI->getOperand(j);
2093 if (!MO.isReg() || MO.getReg() != VReg)
2097 // If this restore were to be folded, it would have been folded
2106 // Fold the load into the use if possible.
2107 bool Folded = false;
2108 if (CanFold && !Ops.empty()) {
2110 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2112 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2114 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2115 // If the rematerializable def is a load, also try to fold it.
2116 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2117 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2118 Ops, isLoadSS, LdSlot, VReg);
2120 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2122 // Re-matting an instruction with virtual register use. Add the
2123 // register as an implicit use on the use MI and update the register
2124 // interval's spill weight to HUGE_VALF to prevent it from being
2126 LiveInterval &ImpLi = getInterval(ImpUse);
2127 ImpLi.weight = HUGE_VALF;
2128 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2133 // If folding is not possible / failed, then tell the spiller to issue a
2134 // load / rematerialization for us.
2136 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2138 vrm.addRestorePoint(VReg, MI);
2140 // Update spill slot weight.
2142 SSWeight += getSpillWeight(false, true, loopDepth);
2144 Id = RestoreMBBs.find_next(Id);
2147 // Finalize intervals: add kills, finalize spill weights, and filter out
2149 std::vector<LiveInterval*> RetNewLIs;
2150 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2151 LiveInterval *LI = NewLIs[i];
2153 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2154 if (!AddedKill.count(LI)) {
2155 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2156 unsigned LastUseIdx = getBaseIndex(LR->end);
2157 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2158 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2159 assert(UseIdx != -1);
2160 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2161 LastUse->getOperand(UseIdx).setIsKill();
2162 vrm.addKillPoint(LI->reg, LastUseIdx);
2165 RetNewLIs.push_back(LI);
2169 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2173 /// hasAllocatableSuperReg - Return true if the specified physical register has
2174 /// any super register that's allocatable.
2175 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2176 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2177 if (allocatableRegs_[*AS] && hasInterval(*AS))
2182 /// getRepresentativeReg - Find the largest super register of the specified
2183 /// physical register.
2184 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2185 // Find the largest super-register that is allocatable.
2186 unsigned BestReg = Reg;
2187 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2188 unsigned SuperReg = *AS;
2189 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2197 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2198 /// specified interval that conflicts with the specified physical register.
2199 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2200 unsigned PhysReg) const {
2201 unsigned NumConflicts = 0;
2202 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2203 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2204 E = mri_->reg_end(); I != E; ++I) {
2205 MachineOperand &O = I.getOperand();
2206 MachineInstr *MI = O.getParent();
2207 unsigned Index = getInstructionIndex(MI);
2208 if (pli.liveAt(Index))
2211 return NumConflicts;
2214 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2215 /// around all defs and uses of the specified interval.
2216 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2217 unsigned PhysReg, VirtRegMap &vrm) {
2218 unsigned SpillReg = getRepresentativeReg(PhysReg);
2220 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2221 // If there are registers which alias PhysReg, but which are not a
2222 // sub-register of the chosen representative super register. Assert
2223 // since we can't handle it yet.
2224 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2225 tri_->isSuperRegister(*AS, SpillReg));
2227 LiveInterval &pli = getInterval(SpillReg);
2228 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2229 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2230 E = mri_->reg_end(); I != E; ++I) {
2231 MachineOperand &O = I.getOperand();
2232 MachineInstr *MI = O.getParent();
2233 if (SeenMIs.count(MI))
2236 unsigned Index = getInstructionIndex(MI);
2237 if (pli.liveAt(Index)) {
2238 vrm.addEmergencySpill(SpillReg, MI);
2239 unsigned StartIdx = getLoadIndex(Index);
2240 unsigned EndIdx = getStoreIndex(Index)+1;
2241 if (pli.isInOneLiveRange(StartIdx, EndIdx))
2242 pli.removeRange(StartIdx, EndIdx);
2244 cerr << "Ran out of registers during register allocation!\n";
2245 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2246 cerr << "Please check your inline asm statement for invalid "
2247 << "constraints:\n";
2248 MI->print(cerr.stream(), tm_);
2252 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2253 if (!hasInterval(*AS))
2255 LiveInterval &spli = getInterval(*AS);
2256 if (spli.liveAt(Index))
2257 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2263 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2264 MachineInstr* startInst) {
2265 LiveInterval& Interval = getOrCreateInterval(reg);
2266 VNInfo* VN = Interval.getNextValue(
2267 getInstructionIndex(startInst) + InstrSlots::DEF,
2268 startInst, getVNInfoAllocator());
2269 VN->hasPHIKill = true;
2270 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2271 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2272 getMBBEndIdx(startInst->getParent()) + 1, VN);
2273 Interval.addRange(LR);