1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 void LiveIntervals::printRegName(unsigned reg) const {
327 if (TargetRegisterInfo::isPhysicalRegister(reg))
328 cerr << tri_->getName(reg);
330 cerr << "%reg" << reg;
333 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
334 MachineBasicBlock::iterator mi,
335 unsigned MIIdx, MachineOperand& MO,
337 LiveInterval &interval) {
338 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
339 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
341 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
342 DOUT << "is a implicit_def\n";
346 // Virtual registers may be defined multiple times (due to phi
347 // elimination and 2-addr elimination). Much of what we do only has to be
348 // done once for the vreg. We use an empty interval to detect the first
349 // time we see a vreg.
350 if (interval.empty()) {
351 // Get the Idx of the defining instructions.
352 unsigned defIndex = getDefIndex(MIIdx);
353 // Earlyclobbers move back one.
354 if (MO.isEarlyClobber())
355 defIndex = getUseIndex(MIIdx);
357 MachineInstr *CopyMI = NULL;
358 unsigned SrcReg, DstReg;
359 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
360 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
361 tii_->isMoveInstr(*mi, SrcReg, DstReg))
363 // Earlyclobbers move back one.
364 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
366 assert(ValNo->id == 0 && "First value in interval is not 0?");
368 // Loop over all of the blocks that the vreg is defined in. There are
369 // two cases we have to handle here. The most common case is a vreg
370 // whose lifetime is contained within a basic block. In this case there
371 // will be a single kill, in MBB, which comes after the definition.
372 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
373 // FIXME: what about dead vars?
375 if (vi.Kills[0] != mi)
376 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
378 killIdx = defIndex+1;
380 // If the kill happens after the definition, we have an intra-block
382 if (killIdx > defIndex) {
383 assert(vi.AliveBlocks.none() &&
384 "Shouldn't be alive across any blocks!");
385 LiveRange LR(defIndex, killIdx, ValNo);
386 interval.addRange(LR);
387 DOUT << " +" << LR << "\n";
388 interval.addKill(ValNo, killIdx);
393 // The other case we handle is when a virtual register lives to the end
394 // of the defining block, potentially live across some blocks, then is
395 // live into some number of blocks, but gets killed. Start by adding a
396 // range that goes from this definition to the end of the defining block.
397 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
398 DOUT << " +" << NewLR;
399 interval.addRange(NewLR);
401 // Iterate over all of the blocks that the variable is completely
402 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
404 for (int i = vi.AliveBlocks.find_first(); i != -1;
405 i = vi.AliveBlocks.find_next(i)) {
406 LiveRange LR(getMBBStartIdx(i),
407 getMBBEndIdx(i)+1, // MBB ends at -1.
409 interval.addRange(LR);
413 // Finally, this virtual register is live from the start of any killing
414 // block to the 'use' slot of the killing instruction.
415 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
416 MachineInstr *Kill = vi.Kills[i];
417 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
418 LiveRange LR(getMBBStartIdx(Kill->getParent()),
420 interval.addRange(LR);
421 interval.addKill(ValNo, killIdx);
426 // If this is the second time we see a virtual register definition, it
427 // must be due to phi elimination or two addr elimination. If this is
428 // the result of two address elimination, then the vreg is one of the
429 // def-and-use register operand.
430 if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
431 // If this is a two-address definition, then we have already processed
432 // the live range. The only problem is that we didn't realize there
433 // are actually two values in the live interval. Because of this we
434 // need to take the LiveRegion that defines this register and split it
436 assert(interval.containsOneValue());
437 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
438 unsigned RedefIndex = getDefIndex(MIIdx);
439 // It cannot be an early clobber MO.
440 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
442 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
443 VNInfo *OldValNo = OldLR->valno;
445 // Delete the initial value, which should be short and continuous,
446 // because the 2-addr copy must be in the same MBB as the redef.
447 interval.removeRange(DefIndex, RedefIndex);
449 // Two-address vregs should always only be redefined once. This means
450 // that at this point, there should be exactly one value number in it.
451 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
453 // The new value number (#1) is defined by the instruction we claimed
455 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
458 // Value#0 is now defined by the 2-addr instruction.
459 OldValNo->def = RedefIndex;
462 // Add the new live interval which replaces the range for the input copy.
463 LiveRange LR(DefIndex, RedefIndex, ValNo);
464 DOUT << " replace range with " << LR;
465 interval.addRange(LR);
466 interval.addKill(ValNo, RedefIndex);
468 // If this redefinition is dead, we need to add a dummy unit live
469 // range covering the def slot.
471 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
474 interval.print(DOUT, tri_);
477 // Otherwise, this must be because of phi elimination. If this is the
478 // first redefinition of the vreg that we have seen, go back and change
479 // the live range in the PHI block to be a different value number.
480 if (interval.containsOneValue()) {
481 assert(vi.Kills.size() == 1 &&
482 "PHI elimination vreg should have one kill, the PHI itself!");
484 // Remove the old range that we now know has an incorrect number.
485 VNInfo *VNI = interval.getValNumInfo(0);
486 MachineInstr *Killer = vi.Kills[0];
487 unsigned Start = getMBBStartIdx(Killer->getParent());
488 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
489 DOUT << " Removing [" << Start << "," << End << "] from: ";
490 interval.print(DOUT, tri_); DOUT << "\n";
491 interval.removeRange(Start, End);
492 VNI->hasPHIKill = true;
493 DOUT << " RESULT: "; interval.print(DOUT, tri_);
495 // Replace the interval with one of a NEW value number. Note that this
496 // value number isn't actually defined by an instruction, weird huh? :)
497 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
498 DOUT << " replace range with " << LR;
499 interval.addRange(LR);
500 interval.addKill(LR.valno, End);
501 DOUT << " RESULT: "; interval.print(DOUT, tri_);
504 // In the case of PHI elimination, each variable definition is only
505 // live until the end of the block. We've already taken care of the
506 // rest of the live range.
507 unsigned defIndex = getDefIndex(MIIdx);
508 // It cannot be an early clobber MO.
509 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
512 MachineInstr *CopyMI = NULL;
513 unsigned SrcReg, DstReg;
514 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
515 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
516 tii_->isMoveInstr(*mi, SrcReg, DstReg))
518 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
520 unsigned killIndex = getMBBEndIdx(mbb) + 1;
521 LiveRange LR(defIndex, killIndex, ValNo);
522 interval.addRange(LR);
523 interval.addKill(ValNo, killIndex);
524 ValNo->hasPHIKill = true;
532 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
533 MachineBasicBlock::iterator mi,
536 LiveInterval &interval,
537 MachineInstr *CopyMI) {
538 // A physical register cannot be live across basic block, so its
539 // lifetime must end somewhere in its defining basic block.
540 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
542 unsigned baseIndex = MIIdx;
543 unsigned start = getDefIndex(baseIndex);
544 // Earlyclobbers move back one.
545 if (MO.isEarlyClobber())
546 start = getUseIndex(MIIdx);
547 unsigned end = start;
549 // If it is not used after definition, it is considered dead at
550 // the instruction defining it. Hence its interval is:
551 // [defSlot(def), defSlot(def)+1)
558 // If it is not dead on definition, it must be killed by a
559 // subsequent instruction. Hence its interval is:
560 // [defSlot(def), useSlot(kill)+1)
561 baseIndex += InstrSlots::NUM;
562 while (++mi != MBB->end()) {
563 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
564 getInstructionFromIndex(baseIndex) == 0)
565 baseIndex += InstrSlots::NUM;
566 if (mi->killsRegister(interval.reg, tri_)) {
568 end = getUseIndex(baseIndex) + 1;
570 } else if (mi->modifiesRegister(interval.reg, tri_)) {
571 // Another instruction redefines the register before it is ever read.
572 // Then the register is essentially dead at the instruction that defines
573 // it. Hence its interval is:
574 // [defSlot(def), defSlot(def)+1)
580 baseIndex += InstrSlots::NUM;
583 // The only case we should have a dead physreg here without a killing or
584 // instruction where we know it's dead is if it is live-in to the function
586 assert(!CopyMI && "physreg was not killed in defining block!");
590 assert(start < end && "did not find end of interval?");
592 // Already exists? Extend old live interval.
593 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
594 bool Extend = OldLR != interval.end();
595 VNInfo *ValNo = Extend
596 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
597 if (MO.isEarlyClobber() && Extend)
598 ValNo->redefByEC = true;
599 LiveRange LR(start, end, ValNo);
600 interval.addRange(LR);
601 interval.addKill(LR.valno, end);
602 DOUT << " +" << LR << '\n';
605 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
606 MachineBasicBlock::iterator MI,
610 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
611 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
612 getOrCreateInterval(MO.getReg()));
613 else if (allocatableRegs_[MO.getReg()]) {
614 MachineInstr *CopyMI = NULL;
615 unsigned SrcReg, DstReg;
616 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
617 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
618 tii_->isMoveInstr(*MI, SrcReg, DstReg))
620 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
621 getOrCreateInterval(MO.getReg()), CopyMI);
622 // Def of a register also defines its sub-registers.
623 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
624 // If MI also modifies the sub-register explicitly, avoid processing it
625 // more than once. Do not pass in TRI here so it checks for exact match.
626 if (!MI->modifiesRegister(*AS))
627 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
628 getOrCreateInterval(*AS), 0);
632 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
634 LiveInterval &interval, bool isAlias) {
635 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
637 // Look for kills, if it reaches a def before it's killed, then it shouldn't
638 // be considered a livein.
639 MachineBasicBlock::iterator mi = MBB->begin();
640 unsigned baseIndex = MIIdx;
641 unsigned start = baseIndex;
642 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
643 getInstructionFromIndex(baseIndex) == 0)
644 baseIndex += InstrSlots::NUM;
645 unsigned end = baseIndex;
647 while (mi != MBB->end()) {
648 if (mi->killsRegister(interval.reg, tri_)) {
650 end = getUseIndex(baseIndex) + 1;
652 } else if (mi->modifiesRegister(interval.reg, tri_)) {
653 // Another instruction redefines the register before it is ever read.
654 // Then the register is essentially dead at the instruction that defines
655 // it. Hence its interval is:
656 // [defSlot(def), defSlot(def)+1)
658 end = getDefIndex(start) + 1;
662 baseIndex += InstrSlots::NUM;
663 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
664 getInstructionFromIndex(baseIndex) == 0)
665 baseIndex += InstrSlots::NUM;
670 // Live-in register might not be used at all.
674 end = getDefIndex(MIIdx) + 1;
676 DOUT << " live through";
681 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
682 interval.addRange(LR);
683 interval.addKill(LR.valno, end);
684 DOUT << " +" << LR << '\n';
687 /// computeIntervals - computes the live intervals for virtual
688 /// registers. for some ordering of the machine instructions [1,N] a
689 /// live interval is an interval [i, j) where 1 <= i <= j < N for
690 /// which a variable is live
691 void LiveIntervals::computeIntervals() {
693 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
694 << "********** Function: "
695 << ((Value*)mf_->getFunction())->getName() << '\n';
697 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
699 MachineBasicBlock *MBB = MBBI;
700 // Track the index of the current machine instr.
701 unsigned MIIndex = getMBBStartIdx(MBB);
702 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
704 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
706 // Create intervals for live-ins to this BB first.
707 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
708 LE = MBB->livein_end(); LI != LE; ++LI) {
709 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
710 // Multiple live-ins can alias the same register.
711 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
712 if (!hasInterval(*AS))
713 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
717 // Skip over empty initial indices.
718 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
719 getInstructionFromIndex(MIIndex) == 0)
720 MIIndex += InstrSlots::NUM;
722 for (; MI != miEnd; ++MI) {
723 DOUT << MIIndex << "\t" << *MI;
726 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
727 MachineOperand &MO = MI->getOperand(i);
728 // handle register defs - build intervals
729 if (MO.isReg() && MO.getReg() && MO.isDef()) {
730 handleRegisterDef(MBB, MI, MIIndex, MO, i);
734 // Skip over the empty slots after each instruction.
735 unsigned Slots = MI->getDesc().getNumDefs();
738 MIIndex += InstrSlots::NUM * Slots;
740 // Skip over empty indices.
741 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
742 getInstructionFromIndex(MIIndex) == 0)
743 MIIndex += InstrSlots::NUM;
748 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
749 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
750 std::vector<IdxMBBPair>::const_iterator I =
751 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
754 while (I != Idx2MBBMap.end()) {
757 MBBs.push_back(I->second);
764 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
765 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
766 std::vector<IdxMBBPair>::const_iterator I =
767 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
770 while (I != Idx2MBBMap.end()) {
773 MachineBasicBlock *MBB = I->second;
774 if (getMBBEndIdx(MBB) > End)
776 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
777 SE = MBB->succ_end(); SI != SE; ++SI)
785 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
786 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
788 return new LiveInterval(reg, Weight);
791 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
792 /// copy field and returns the source register that defines it.
793 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
797 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
798 return VNI->copy->getOperand(1).getReg();
799 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
800 return VNI->copy->getOperand(2).getReg();
801 unsigned SrcReg, DstReg;
802 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
804 assert(0 && "Unrecognized copy instruction!");
808 //===----------------------------------------------------------------------===//
809 // Register allocator hooks.
812 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
813 /// allow one) virtual register operand, then its uses are implicitly using
814 /// the register. Returns the virtual register.
815 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
816 MachineInstr *MI) const {
818 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
819 MachineOperand &MO = MI->getOperand(i);
820 if (!MO.isReg() || !MO.isUse())
822 unsigned Reg = MO.getReg();
823 if (Reg == 0 || Reg == li.reg)
825 // FIXME: For now, only remat MI with at most one register operand.
827 "Can't rematerialize instruction with multiple register operand!");
836 /// isValNoAvailableAt - Return true if the val# of the specified interval
837 /// which reaches the given instruction also reaches the specified use index.
838 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
839 unsigned UseIdx) const {
840 unsigned Index = getInstructionIndex(MI);
841 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
842 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
843 return UI != li.end() && UI->valno == ValNo;
846 /// isReMaterializable - Returns true if the definition MI of the specified
847 /// val# of the specified interval is re-materializable.
848 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
849 const VNInfo *ValNo, MachineInstr *MI,
850 SmallVectorImpl<LiveInterval*> &SpillIs,
855 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
859 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
860 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
861 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
862 // this but remember this is not safe to fold into a two-address
864 // This is a load from fixed stack slot. It can be rematerialized.
867 // If the target-specific rules don't identify an instruction as
868 // being trivially rematerializable, use some target-independent
870 if (!MI->getDesc().isRematerializable() ||
871 !tii_->isTriviallyReMaterializable(MI)) {
872 if (!EnableAggressiveRemat)
875 // If the instruction accesses memory but the memoperands have been lost,
876 // we can't analyze it.
877 const TargetInstrDesc &TID = MI->getDesc();
878 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
881 // Avoid instructions obviously unsafe for remat.
882 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
885 // If the instruction accesses memory and the memory could be non-constant,
886 // assume the instruction is not rematerializable.
887 for (std::list<MachineMemOperand>::const_iterator
888 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
889 const MachineMemOperand &MMO = *I;
890 if (MMO.isVolatile() || MMO.isStore())
892 const Value *V = MMO.getValue();
895 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
896 if (!PSV->isConstant(mf_->getFrameInfo()))
898 } else if (!aa_->pointsToConstantMemory(V))
902 // If any of the registers accessed are non-constant, conservatively assume
903 // the instruction is not rematerializable.
905 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
906 const MachineOperand &MO = MI->getOperand(i);
908 unsigned Reg = MO.getReg();
911 if (TargetRegisterInfo::isPhysicalRegister(Reg))
914 // Only allow one def, and that in the first operand.
915 if (MO.isDef() != (i == 0))
918 // Only allow constant-valued registers.
919 bool IsLiveIn = mri_->isLiveIn(Reg);
920 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
923 // For the def, it should be the only def of that register.
924 if (MO.isDef() && (next(I) != E || IsLiveIn))
928 // Only allow one use other register use, as that's all the
929 // remat mechanisms support currently.
933 else if (Reg != ImpUse)
936 // For the use, there should be only one associated def.
937 if (I != E && (next(I) != E || IsLiveIn))
944 unsigned ImpUse = getReMatImplicitUse(li, MI);
946 const LiveInterval &ImpLi = getInterval(ImpUse);
947 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
948 re = mri_->use_end(); ri != re; ++ri) {
949 MachineInstr *UseMI = &*ri;
950 unsigned UseIdx = getInstructionIndex(UseMI);
951 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
953 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
957 // If a register operand of the re-materialized instruction is going to
958 // be spilled next, then it's not legal to re-materialize this instruction.
959 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
960 if (ImpUse == SpillIs[i]->reg)
966 /// isReMaterializable - Returns true if the definition MI of the specified
967 /// val# of the specified interval is re-materializable.
968 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
969 const VNInfo *ValNo, MachineInstr *MI) {
970 SmallVector<LiveInterval*, 4> Dummy1;
972 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
975 /// isReMaterializable - Returns true if every definition of MI of every
976 /// val# of the specified interval is re-materializable.
977 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
978 SmallVectorImpl<LiveInterval*> &SpillIs,
981 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
983 const VNInfo *VNI = *i;
984 unsigned DefIdx = VNI->def;
986 continue; // Dead val#.
987 // Is the def for the val# rematerializable?
990 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
991 bool DefIsLoad = false;
993 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1000 /// FilterFoldedOps - Filter out two-address use operands. Return
1001 /// true if it finds any issue with the operands that ought to prevent
1003 static bool FilterFoldedOps(MachineInstr *MI,
1004 SmallVector<unsigned, 2> &Ops,
1006 SmallVector<unsigned, 2> &FoldOps) {
1007 const TargetInstrDesc &TID = MI->getDesc();
1010 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1011 unsigned OpIdx = Ops[i];
1012 MachineOperand &MO = MI->getOperand(OpIdx);
1013 // FIXME: fold subreg use.
1017 MRInfo |= (unsigned)VirtRegMap::isMod;
1019 // Filter out two-address use operand(s).
1020 if (!MO.isImplicit() &&
1021 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1022 MRInfo = VirtRegMap::isModRef;
1025 MRInfo |= (unsigned)VirtRegMap::isRef;
1027 FoldOps.push_back(OpIdx);
1033 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1034 /// slot / to reg or any rematerialized load into ith operand of specified
1035 /// MI. If it is successul, MI is updated with the newly created MI and
1037 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1038 VirtRegMap &vrm, MachineInstr *DefMI,
1040 SmallVector<unsigned, 2> &Ops,
1041 bool isSS, int Slot, unsigned Reg) {
1042 // If it is an implicit def instruction, just delete it.
1043 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1044 RemoveMachineInstrFromMaps(MI);
1045 vrm.RemoveMachineInstrFromMaps(MI);
1046 MI->eraseFromParent();
1051 // Filter the list of operand indexes that are to be folded. Abort if
1052 // any operand will prevent folding.
1053 unsigned MRInfo = 0;
1054 SmallVector<unsigned, 2> FoldOps;
1055 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1058 // The only time it's safe to fold into a two address instruction is when
1059 // it's folding reload and spill from / into a spill stack slot.
1060 if (DefMI && (MRInfo & VirtRegMap::isMod))
1063 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1064 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1066 // Remember this instruction uses the spill slot.
1067 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1069 // Attempt to fold the memory reference into the instruction. If
1070 // we can do this, we don't need to insert spill code.
1071 MachineBasicBlock &MBB = *MI->getParent();
1072 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1073 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1074 vrm.transferSpillPts(MI, fmi);
1075 vrm.transferRestorePts(MI, fmi);
1076 vrm.transferEmergencySpills(MI, fmi);
1078 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1079 mi2iMap_[fmi] = InstrIdx;
1080 MI = MBB.insert(MBB.erase(MI), fmi);
1087 /// canFoldMemoryOperand - Returns true if the specified load / store
1088 /// folding is possible.
1089 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1090 SmallVector<unsigned, 2> &Ops,
1092 // Filter the list of operand indexes that are to be folded. Abort if
1093 // any operand will prevent folding.
1094 unsigned MRInfo = 0;
1095 SmallVector<unsigned, 2> FoldOps;
1096 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1099 // It's only legal to remat for a use, not a def.
1100 if (ReMat && (MRInfo & VirtRegMap::isMod))
1103 return tii_->canFoldMemoryOperand(MI, FoldOps);
1106 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1107 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1108 for (LiveInterval::Ranges::const_iterator
1109 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1110 std::vector<IdxMBBPair>::const_iterator II =
1111 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1112 if (II == Idx2MBBMap.end())
1114 if (I->end > II->first) // crossing a MBB.
1116 MBBs.insert(II->second);
1117 if (MBBs.size() > 1)
1123 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1124 /// interval on to-be re-materialized operands of MI) with new register.
1125 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1126 MachineInstr *MI, unsigned NewVReg,
1128 // There is an implicit use. That means one of the other operand is
1129 // being remat'ed and the remat'ed instruction has li.reg as an
1130 // use operand. Make sure we rewrite that as well.
1131 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1132 MachineOperand &MO = MI->getOperand(i);
1135 unsigned Reg = MO.getReg();
1136 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1138 if (!vrm.isReMaterialized(Reg))
1140 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1141 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1143 UseMO->setReg(NewVReg);
1147 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1148 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1149 bool LiveIntervals::
1150 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1151 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1152 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1153 unsigned Slot, int LdSlot,
1154 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1156 const TargetRegisterClass* rc,
1157 SmallVector<int, 4> &ReMatIds,
1158 const MachineLoopInfo *loopInfo,
1159 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1160 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1161 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1162 MachineBasicBlock *MBB = MI->getParent();
1163 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1164 bool CanFold = false;
1166 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1167 MachineOperand& mop = MI->getOperand(i);
1170 unsigned Reg = mop.getReg();
1171 unsigned RegI = Reg;
1172 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1177 bool TryFold = !DefIsReMat;
1178 bool FoldSS = true; // Default behavior unless it's a remat.
1179 int FoldSlot = Slot;
1181 // If this is the rematerializable definition MI itself and
1182 // all of its uses are rematerialized, simply delete it.
1183 if (MI == ReMatOrigDefMI && CanDelete) {
1184 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1186 RemoveMachineInstrFromMaps(MI);
1187 vrm.RemoveMachineInstrFromMaps(MI);
1188 MI->eraseFromParent();
1192 // If def for this use can't be rematerialized, then try folding.
1193 // If def is rematerializable and it's a load, also try folding.
1194 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1196 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1202 // Scan all of the operands of this instruction rewriting operands
1203 // to use NewVReg instead of li.reg as appropriate. We do this for
1206 // 1. If the instr reads the same spilled vreg multiple times, we
1207 // want to reuse the NewVReg.
1208 // 2. If the instr is a two-addr instruction, we are required to
1209 // keep the src/dst regs pinned.
1211 // Keep track of whether we replace a use and/or def so that we can
1212 // create the spill interval with the appropriate range.
1214 HasUse = mop.isUse();
1215 HasDef = mop.isDef();
1216 SmallVector<unsigned, 2> Ops;
1218 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1219 const MachineOperand &MOj = MI->getOperand(j);
1222 unsigned RegJ = MOj.getReg();
1223 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1227 HasUse |= MOj.isUse();
1228 HasDef |= MOj.isDef();
1232 if (HasUse && !li.liveAt(getUseIndex(index)))
1233 // Must be defined by an implicit def. It should not be spilled. Note,
1234 // this is for correctness reason. e.g.
1235 // 8 %reg1024<def> = IMPLICIT_DEF
1236 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1237 // The live range [12, 14) are not part of the r1024 live interval since
1238 // it's defined by an implicit def. It will not conflicts with live
1239 // interval of r1025. Now suppose both registers are spilled, you can
1240 // easily see a situation where both registers are reloaded before
1241 // the INSERT_SUBREG and both target registers that would overlap.
1244 // Update stack slot spill weight if we are splitting.
1245 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1249 // Create a new virtual register for the spill interval.
1250 // Create the new register now so we can map the fold instruction
1251 // to the new register so when it is unfolded we get the correct
1253 bool CreatedNewVReg = false;
1255 NewVReg = mri_->createVirtualRegister(rc);
1257 CreatedNewVReg = true;
1263 // Do not fold load / store here if we are splitting. We'll find an
1264 // optimal point to insert a load / store later.
1266 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1267 Ops, FoldSS, FoldSlot, NewVReg)) {
1268 // Folding the load/store can completely change the instruction in
1269 // unpredictable ways, rescan it from the beginning.
1272 // We need to give the new vreg the same stack slot as the
1273 // spilled interval.
1274 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1280 if (isRemoved(MI)) {
1284 goto RestartInstruction;
1287 // We'll try to fold it later if it's profitable.
1288 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1292 mop.setReg(NewVReg);
1293 if (mop.isImplicit())
1294 rewriteImplicitOps(li, MI, NewVReg, vrm);
1296 // Reuse NewVReg for other reads.
1297 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1298 MachineOperand &mopj = MI->getOperand(Ops[j]);
1299 mopj.setReg(NewVReg);
1300 if (mopj.isImplicit())
1301 rewriteImplicitOps(li, MI, NewVReg, vrm);
1304 if (CreatedNewVReg) {
1306 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1307 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1308 // Each valnum may have its own remat id.
1309 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1311 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1313 if (!CanDelete || (HasUse && HasDef)) {
1314 // If this is a two-addr instruction then its use operands are
1315 // rematerializable but its def is not. It should be assigned a
1317 vrm.assignVirt2StackSlot(NewVReg, Slot);
1320 vrm.assignVirt2StackSlot(NewVReg, Slot);
1322 } else if (HasUse && HasDef &&
1323 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1324 // If this interval hasn't been assigned a stack slot (because earlier
1325 // def is a deleted remat def), do it now.
1326 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1327 vrm.assignVirt2StackSlot(NewVReg, Slot);
1330 // Re-matting an instruction with virtual register use. Add the
1331 // register as an implicit use on the use MI.
1332 if (DefIsReMat && ImpUse)
1333 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1335 // create a new register interval for this spill / remat.
1336 LiveInterval &nI = getOrCreateInterval(NewVReg);
1337 if (CreatedNewVReg) {
1338 NewLIs.push_back(&nI);
1339 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1341 vrm.setIsSplitFromReg(NewVReg, li.reg);
1345 if (CreatedNewVReg) {
1346 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1347 nI.getNextValue(~0U, 0, VNInfoAllocator));
1351 // Extend the split live interval to this def / use.
1352 unsigned End = getUseIndex(index)+1;
1353 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1354 nI.getValNumInfo(nI.getNumValNums()-1));
1360 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1361 nI.getNextValue(~0U, 0, VNInfoAllocator));
1366 DOUT << "\t\t\t\tAdded new interval: ";
1367 nI.print(DOUT, tri_);
1372 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1374 MachineBasicBlock *MBB, unsigned Idx) const {
1375 unsigned End = getMBBEndIdx(MBB);
1376 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1377 unsigned KillIdx = VNI->kills[j];
1378 if (KillIdx > Idx && KillIdx < End)
1384 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1385 /// during spilling.
1387 struct RewriteInfo {
1392 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1393 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1396 struct RewriteInfoCompare {
1397 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1398 return LHS.Index < RHS.Index;
1403 void LiveIntervals::
1404 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1405 LiveInterval::Ranges::const_iterator &I,
1406 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1407 unsigned Slot, int LdSlot,
1408 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1410 const TargetRegisterClass* rc,
1411 SmallVector<int, 4> &ReMatIds,
1412 const MachineLoopInfo *loopInfo,
1413 BitVector &SpillMBBs,
1414 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1415 BitVector &RestoreMBBs,
1416 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1417 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1418 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1419 bool AllCanFold = true;
1420 unsigned NewVReg = 0;
1421 unsigned start = getBaseIndex(I->start);
1422 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1424 // First collect all the def / use in this live range that will be rewritten.
1425 // Make sure they are sorted according to instruction index.
1426 std::vector<RewriteInfo> RewriteMIs;
1427 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1428 re = mri_->reg_end(); ri != re; ) {
1429 MachineInstr *MI = &*ri;
1430 MachineOperand &O = ri.getOperand();
1432 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1433 unsigned index = getInstructionIndex(MI);
1434 if (index < start || index >= end)
1436 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1437 // Must be defined by an implicit def. It should not be spilled. Note,
1438 // this is for correctness reason. e.g.
1439 // 8 %reg1024<def> = IMPLICIT_DEF
1440 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1441 // The live range [12, 14) are not part of the r1024 live interval since
1442 // it's defined by an implicit def. It will not conflicts with live
1443 // interval of r1025. Now suppose both registers are spilled, you can
1444 // easily see a situation where both registers are reloaded before
1445 // the INSERT_SUBREG and both target registers that would overlap.
1447 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1449 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1451 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1452 // Now rewrite the defs and uses.
1453 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1454 RewriteInfo &rwi = RewriteMIs[i];
1456 unsigned index = rwi.Index;
1457 bool MIHasUse = rwi.HasUse;
1458 bool MIHasDef = rwi.HasDef;
1459 MachineInstr *MI = rwi.MI;
1460 // If MI def and/or use the same register multiple times, then there
1461 // are multiple entries.
1462 unsigned NumUses = MIHasUse;
1463 while (i != e && RewriteMIs[i].MI == MI) {
1464 assert(RewriteMIs[i].Index == index);
1465 bool isUse = RewriteMIs[i].HasUse;
1466 if (isUse) ++NumUses;
1468 MIHasDef |= RewriteMIs[i].HasDef;
1471 MachineBasicBlock *MBB = MI->getParent();
1473 if (ImpUse && MI != ReMatDefMI) {
1474 // Re-matting an instruction with virtual register use. Update the
1475 // register interval's spill weight to HUGE_VALF to prevent it from
1477 LiveInterval &ImpLi = getInterval(ImpUse);
1478 ImpLi.weight = HUGE_VALF;
1481 unsigned MBBId = MBB->getNumber();
1482 unsigned ThisVReg = 0;
1484 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1485 if (NVI != MBBVRegsMap.end()) {
1486 ThisVReg = NVI->second;
1493 // It's better to start a new interval to avoid artifically
1494 // extend the new interval.
1495 if (MIHasDef && !MIHasUse) {
1496 MBBVRegsMap.erase(MBB->getNumber());
1502 bool IsNew = ThisVReg == 0;
1504 // This ends the previous live interval. If all of its def / use
1505 // can be folded, give it a low spill weight.
1506 if (NewVReg && TrySplit && AllCanFold) {
1507 LiveInterval &nI = getOrCreateInterval(NewVReg);
1514 bool HasDef = false;
1515 bool HasUse = false;
1516 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1517 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1518 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1519 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1520 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1521 if (!HasDef && !HasUse)
1524 AllCanFold &= CanFold;
1526 // Update weight of spill interval.
1527 LiveInterval &nI = getOrCreateInterval(NewVReg);
1529 // The spill weight is now infinity as it cannot be spilled again.
1530 nI.weight = HUGE_VALF;
1534 // Keep track of the last def and first use in each MBB.
1536 if (MI != ReMatOrigDefMI || !CanDelete) {
1537 bool HasKill = false;
1539 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1541 // If this is a two-address code, then this index starts a new VNInfo.
1542 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1544 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1546 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1547 SpillIdxes.find(MBBId);
1549 if (SII == SpillIdxes.end()) {
1550 std::vector<SRInfo> S;
1551 S.push_back(SRInfo(index, NewVReg, true));
1552 SpillIdxes.insert(std::make_pair(MBBId, S));
1553 } else if (SII->second.back().vreg != NewVReg) {
1554 SII->second.push_back(SRInfo(index, NewVReg, true));
1555 } else if ((int)index > SII->second.back().index) {
1556 // If there is an earlier def and this is a two-address
1557 // instruction, then it's not possible to fold the store (which
1558 // would also fold the load).
1559 SRInfo &Info = SII->second.back();
1561 Info.canFold = !HasUse;
1563 SpillMBBs.set(MBBId);
1564 } else if (SII != SpillIdxes.end() &&
1565 SII->second.back().vreg == NewVReg &&
1566 (int)index > SII->second.back().index) {
1567 // There is an earlier def that's not killed (must be two-address).
1568 // The spill is no longer needed.
1569 SII->second.pop_back();
1570 if (SII->second.empty()) {
1571 SpillIdxes.erase(MBBId);
1572 SpillMBBs.reset(MBBId);
1579 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1580 SpillIdxes.find(MBBId);
1581 if (SII != SpillIdxes.end() &&
1582 SII->second.back().vreg == NewVReg &&
1583 (int)index > SII->second.back().index)
1584 // Use(s) following the last def, it's not safe to fold the spill.
1585 SII->second.back().canFold = false;
1586 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1587 RestoreIdxes.find(MBBId);
1588 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1589 // If we are splitting live intervals, only fold if it's the first
1590 // use and there isn't another use later in the MBB.
1591 RII->second.back().canFold = false;
1593 // Only need a reload if there isn't an earlier def / use.
1594 if (RII == RestoreIdxes.end()) {
1595 std::vector<SRInfo> Infos;
1596 Infos.push_back(SRInfo(index, NewVReg, true));
1597 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1599 RII->second.push_back(SRInfo(index, NewVReg, true));
1601 RestoreMBBs.set(MBBId);
1605 // Update spill weight.
1606 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1607 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1610 if (NewVReg && TrySplit && AllCanFold) {
1611 // If all of its def / use can be folded, give it a low spill weight.
1612 LiveInterval &nI = getOrCreateInterval(NewVReg);
1617 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1618 BitVector &RestoreMBBs,
1619 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1620 if (!RestoreMBBs[Id])
1622 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1623 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1624 if (Restores[i].index == index &&
1625 Restores[i].vreg == vr &&
1626 Restores[i].canFold)
1631 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1632 BitVector &RestoreMBBs,
1633 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1634 if (!RestoreMBBs[Id])
1636 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1637 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1638 if (Restores[i].index == index && Restores[i].vreg)
1639 Restores[i].index = -1;
1642 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1643 /// spilled and create empty intervals for their uses.
1645 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1646 const TargetRegisterClass* rc,
1647 std::vector<LiveInterval*> &NewLIs) {
1648 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1649 re = mri_->reg_end(); ri != re; ) {
1650 MachineOperand &O = ri.getOperand();
1651 MachineInstr *MI = &*ri;
1654 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1655 "Register def was not rewritten?");
1656 RemoveMachineInstrFromMaps(MI);
1657 vrm.RemoveMachineInstrFromMaps(MI);
1658 MI->eraseFromParent();
1660 // This must be an use of an implicit_def so it's not part of the live
1661 // interval. Create a new empty live interval for it.
1662 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1663 unsigned NewVReg = mri_->createVirtualRegister(rc);
1665 vrm.setIsImplicitlyDefined(NewVReg);
1666 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1667 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1668 MachineOperand &MO = MI->getOperand(i);
1669 if (MO.isReg() && MO.getReg() == li.reg)
1678 bool operator()(LiveInterval* A, LiveInterval* B) {
1679 return A->beginNumber() < B->beginNumber();
1684 std::vector<LiveInterval*> LiveIntervals::
1685 addIntervalsForSpillsFast(const LiveInterval &li,
1686 const MachineLoopInfo *loopInfo,
1687 VirtRegMap &vrm, float& SSWeight) {
1688 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1690 std::vector<LiveInterval*> added;
1692 assert(li.weight != HUGE_VALF &&
1693 "attempt to spill already spilled interval!");
1695 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1699 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1703 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1704 while (RI != mri_->reg_end()) {
1705 MachineInstr* MI = &*RI;
1707 SmallVector<unsigned, 2> Indices;
1708 bool HasUse = false;
1709 bool HasDef = false;
1711 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1712 MachineOperand& mop = MI->getOperand(i);
1713 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1715 HasUse |= MI->getOperand(i).isUse();
1716 HasDef |= MI->getOperand(i).isDef();
1718 Indices.push_back(i);
1721 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1722 Indices, true, slot, li.reg)) {
1723 unsigned NewVReg = mri_->createVirtualRegister(rc);
1725 vrm.assignVirt2StackSlot(NewVReg, slot);
1727 // create a new register for this spill
1728 LiveInterval &nI = getOrCreateInterval(NewVReg);
1730 // the spill weight is now infinity as it
1731 // cannot be spilled again
1732 nI.weight = HUGE_VALF;
1734 // Rewrite register operands to use the new vreg.
1735 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1736 E = Indices.end(); I != E; ++I) {
1737 MI->getOperand(*I).setReg(NewVReg);
1739 if (MI->getOperand(*I).isUse())
1740 MI->getOperand(*I).setIsKill(true);
1743 // Fill in the new live interval.
1744 unsigned index = getInstructionIndex(MI);
1746 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1747 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1750 vrm.addRestorePoint(NewVReg, MI);
1753 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1754 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1757 vrm.addSpillPoint(NewVReg, true, MI);
1760 added.push_back(&nI);
1762 DOUT << "\t\t\t\tadded new interval: ";
1766 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1769 SSWeight += getSpillWeight(true, true, loopDepth);
1771 SSWeight += getSpillWeight(false, true, loopDepth);
1773 SSWeight += getSpillWeight(true, false, loopDepth);
1777 RI = mri_->reg_begin(li.reg);
1780 // Clients expect the new intervals to be returned in sorted order.
1781 std::sort(added.begin(), added.end(), LISorter());
1786 std::vector<LiveInterval*> LiveIntervals::
1787 addIntervalsForSpills(const LiveInterval &li,
1788 SmallVectorImpl<LiveInterval*> &SpillIs,
1789 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1792 if (EnableFastSpilling)
1793 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1795 assert(li.weight != HUGE_VALF &&
1796 "attempt to spill already spilled interval!");
1798 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1799 li.print(DOUT, tri_);
1802 // Spill slot weight.
1805 // Each bit specify whether a spill is required in the MBB.
1806 BitVector SpillMBBs(mf_->getNumBlockIDs());
1807 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1808 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1809 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1810 DenseMap<unsigned,unsigned> MBBVRegsMap;
1811 std::vector<LiveInterval*> NewLIs;
1812 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1814 unsigned NumValNums = li.getNumValNums();
1815 SmallVector<MachineInstr*, 4> ReMatDefs;
1816 ReMatDefs.resize(NumValNums, NULL);
1817 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1818 ReMatOrigDefs.resize(NumValNums, NULL);
1819 SmallVector<int, 4> ReMatIds;
1820 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1821 BitVector ReMatDelete(NumValNums);
1822 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1824 // Spilling a split live interval. It cannot be split any further. Also,
1825 // it's also guaranteed to be a single val# / range interval.
1826 if (vrm.getPreSplitReg(li.reg)) {
1827 vrm.setIsSplitFromReg(li.reg, 0);
1828 // Unset the split kill marker on the last use.
1829 unsigned KillIdx = vrm.getKillPoint(li.reg);
1831 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1832 assert(KillMI && "Last use disappeared?");
1833 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1834 assert(KillOp != -1 && "Last use disappeared?");
1835 KillMI->getOperand(KillOp).setIsKill(false);
1837 vrm.removeKillPoint(li.reg);
1838 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1839 Slot = vrm.getStackSlot(li.reg);
1840 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1841 MachineInstr *ReMatDefMI = DefIsReMat ?
1842 vrm.getReMaterializedMI(li.reg) : NULL;
1844 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1845 bool isLoad = isLoadSS ||
1846 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1847 bool IsFirstRange = true;
1848 for (LiveInterval::Ranges::const_iterator
1849 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1850 // If this is a split live interval with multiple ranges, it means there
1851 // are two-address instructions that re-defined the value. Only the
1852 // first def can be rematerialized!
1854 // Note ReMatOrigDefMI has already been deleted.
1855 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1856 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1857 false, vrm, rc, ReMatIds, loopInfo,
1858 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1859 MBBVRegsMap, NewLIs, SSWeight);
1861 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1862 Slot, 0, false, false, false,
1863 false, vrm, rc, ReMatIds, loopInfo,
1864 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1865 MBBVRegsMap, NewLIs, SSWeight);
1867 IsFirstRange = false;
1870 SSWeight = 0.0f; // Already accounted for when split.
1871 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1875 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1876 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1880 bool NeedStackSlot = false;
1881 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1883 const VNInfo *VNI = *i;
1884 unsigned VN = VNI->id;
1885 unsigned DefIdx = VNI->def;
1887 continue; // Dead val#.
1888 // Is the def for the val# rematerializable?
1889 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1890 ? 0 : getInstructionFromIndex(DefIdx);
1892 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1893 // Remember how to remat the def of this val#.
1894 ReMatOrigDefs[VN] = ReMatDefMI;
1895 // Original def may be modified so we have to make a copy here.
1896 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1897 ClonedMIs.push_back(Clone);
1898 ReMatDefs[VN] = Clone;
1900 bool CanDelete = true;
1901 if (VNI->hasPHIKill) {
1902 // A kill is a phi node, not all of its uses can be rematerialized.
1903 // It must not be deleted.
1905 // Need a stack slot if there is any live range where uses cannot be
1907 NeedStackSlot = true;
1910 ReMatDelete.set(VN);
1912 // Need a stack slot if there is any live range where uses cannot be
1914 NeedStackSlot = true;
1918 // One stack slot per live interval.
1919 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1920 Slot = vrm.assignVirt2StackSlot(li.reg);
1922 // Create new intervals and rewrite defs and uses.
1923 for (LiveInterval::Ranges::const_iterator
1924 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1925 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1926 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1927 bool DefIsReMat = ReMatDefMI != NULL;
1928 bool CanDelete = ReMatDelete[I->valno->id];
1930 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1931 bool isLoad = isLoadSS ||
1932 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1933 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1934 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1935 CanDelete, vrm, rc, ReMatIds, loopInfo,
1936 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1937 MBBVRegsMap, NewLIs, SSWeight);
1940 // Insert spills / restores if we are splitting.
1942 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1946 SmallPtrSet<LiveInterval*, 4> AddedKill;
1947 SmallVector<unsigned, 2> Ops;
1948 if (NeedStackSlot) {
1949 int Id = SpillMBBs.find_first();
1951 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1952 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1953 std::vector<SRInfo> &spills = SpillIdxes[Id];
1954 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1955 int index = spills[i].index;
1956 unsigned VReg = spills[i].vreg;
1957 LiveInterval &nI = getOrCreateInterval(VReg);
1958 bool isReMat = vrm.isReMaterialized(VReg);
1959 MachineInstr *MI = getInstructionFromIndex(index);
1960 bool CanFold = false;
1961 bool FoundUse = false;
1963 if (spills[i].canFold) {
1965 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1966 MachineOperand &MO = MI->getOperand(j);
1967 if (!MO.isReg() || MO.getReg() != VReg)
1974 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1975 RestoreMBBs, RestoreIdxes))) {
1976 // MI has two-address uses of the same register. If the use
1977 // isn't the first and only use in the BB, then we can't fold
1978 // it. FIXME: Move this to rewriteInstructionsForSpills.
1985 // Fold the store into the def if possible.
1986 bool Folded = false;
1987 if (CanFold && !Ops.empty()) {
1988 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1991 // Also folded uses, do not issue a load.
1992 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1993 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1995 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1999 // Otherwise tell the spiller to issue a spill.
2001 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2002 bool isKill = LR->end == getStoreIndex(index);
2003 if (!MI->registerDefIsDead(nI.reg))
2004 // No need to spill a dead def.
2005 vrm.addSpillPoint(VReg, isKill, MI);
2007 AddedKill.insert(&nI);
2010 // Update spill slot weight.
2012 SSWeight += getSpillWeight(true, false, loopDepth);
2014 Id = SpillMBBs.find_next(Id);
2018 int Id = RestoreMBBs.find_first();
2020 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2021 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2023 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2024 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2025 int index = restores[i].index;
2028 unsigned VReg = restores[i].vreg;
2029 LiveInterval &nI = getOrCreateInterval(VReg);
2030 bool isReMat = vrm.isReMaterialized(VReg);
2031 MachineInstr *MI = getInstructionFromIndex(index);
2032 bool CanFold = false;
2034 if (restores[i].canFold) {
2036 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2037 MachineOperand &MO = MI->getOperand(j);
2038 if (!MO.isReg() || MO.getReg() != VReg)
2042 // If this restore were to be folded, it would have been folded
2051 // Fold the load into the use if possible.
2052 bool Folded = false;
2053 if (CanFold && !Ops.empty()) {
2055 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2057 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2059 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2060 // If the rematerializable def is a load, also try to fold it.
2061 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2062 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2063 Ops, isLoadSS, LdSlot, VReg);
2065 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2067 // Re-matting an instruction with virtual register use. Add the
2068 // register as an implicit use on the use MI and update the register
2069 // interval's spill weight to HUGE_VALF to prevent it from being
2071 LiveInterval &ImpLi = getInterval(ImpUse);
2072 ImpLi.weight = HUGE_VALF;
2073 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2078 // If folding is not possible / failed, then tell the spiller to issue a
2079 // load / rematerialization for us.
2081 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2083 vrm.addRestorePoint(VReg, MI);
2085 // Update spill slot weight.
2087 SSWeight += getSpillWeight(false, true, loopDepth);
2089 Id = RestoreMBBs.find_next(Id);
2092 // Finalize intervals: add kills, finalize spill weights, and filter out
2094 std::vector<LiveInterval*> RetNewLIs;
2095 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2096 LiveInterval *LI = NewLIs[i];
2098 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2099 if (!AddedKill.count(LI)) {
2100 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2101 unsigned LastUseIdx = getBaseIndex(LR->end);
2102 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2103 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2104 assert(UseIdx != -1);
2105 if (LastUse->getOperand(UseIdx).isImplicit() ||
2106 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2107 LastUse->getOperand(UseIdx).setIsKill();
2108 vrm.addKillPoint(LI->reg, LastUseIdx);
2111 RetNewLIs.push_back(LI);
2115 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2119 /// hasAllocatableSuperReg - Return true if the specified physical register has
2120 /// any super register that's allocatable.
2121 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2122 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2123 if (allocatableRegs_[*AS] && hasInterval(*AS))
2128 /// getRepresentativeReg - Find the largest super register of the specified
2129 /// physical register.
2130 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2131 // Find the largest super-register that is allocatable.
2132 unsigned BestReg = Reg;
2133 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2134 unsigned SuperReg = *AS;
2135 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2143 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2144 /// specified interval that conflicts with the specified physical register.
2145 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2146 unsigned PhysReg) const {
2147 unsigned NumConflicts = 0;
2148 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2149 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2150 E = mri_->reg_end(); I != E; ++I) {
2151 MachineOperand &O = I.getOperand();
2152 MachineInstr *MI = O.getParent();
2153 unsigned Index = getInstructionIndex(MI);
2154 if (pli.liveAt(Index))
2157 return NumConflicts;
2160 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2161 /// around all defs and uses of the specified interval.
2162 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2163 unsigned PhysReg, VirtRegMap &vrm) {
2164 unsigned SpillReg = getRepresentativeReg(PhysReg);
2166 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2167 // If there are registers which alias PhysReg, but which are not a
2168 // sub-register of the chosen representative super register. Assert
2169 // since we can't handle it yet.
2170 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2171 tri_->isSuperRegister(*AS, SpillReg));
2173 LiveInterval &pli = getInterval(SpillReg);
2174 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2175 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2176 E = mri_->reg_end(); I != E; ++I) {
2177 MachineOperand &O = I.getOperand();
2178 MachineInstr *MI = O.getParent();
2179 if (SeenMIs.count(MI))
2182 unsigned Index = getInstructionIndex(MI);
2183 if (pli.liveAt(Index)) {
2184 vrm.addEmergencySpill(SpillReg, MI);
2185 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2186 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2187 if (!hasInterval(*AS))
2189 LiveInterval &spli = getInterval(*AS);
2190 if (spli.liveAt(Index))
2191 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2197 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2198 MachineInstr* startInst) {
2199 LiveInterval& Interval = getOrCreateInterval(reg);
2200 VNInfo* VN = Interval.getNextValue(
2201 getInstructionIndex(startInst) + InstrSlots::DEF,
2202 startInst, getVNInfoAllocator());
2203 VN->hasPHIKill = true;
2204 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2205 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2206 getMBBEndIdx(startInst->getParent()) + 1, VN);
2207 Interval.addRange(LR);