1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
72 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73 VNInfoAllocator.Reset();
74 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
80 void LiveIntervals::computeNumbering() {
81 Index2MiMap OldI2MI = i2miMap_;
88 // Number MachineInstrs and MachineBasicBlocks.
89 // Initialize MBB indexes to a sentinal.
90 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
93 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
95 unsigned StartIdx = MIIndex;
97 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
99 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
100 assert(inserted && "multiple MachineInstr -> index mappings");
101 i2miMap_.push_back(I);
102 MIIndex += InstrSlots::NUM;
105 // Set the MBB2IdxMap entry for this MBB.
106 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
107 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
108 : std::make_pair(StartIdx, MIIndex - 1);
109 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
111 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
113 if (!OldI2MI.empty())
114 for (iterator I = begin(), E = end(); I != E; ++I)
115 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
117 unsigned offset = LI->start % InstrSlots::NUM;
118 LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
120 if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
121 // FIXME: Not correct when the instruction at LI->end has
123 offset = LI->end % InstrSlots::NUM;
124 LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
126 LI->end = i2miMap_.size() * InstrSlots::NUM;
129 VNInfo* vni = LI->valno;
130 offset = vni->def % InstrSlots::NUM;
131 vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
133 for (size_t i = 0; i < vni->kills.size(); ++i) {
134 offset = vni->kills[i] % InstrSlots::NUM;
135 vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
141 /// runOnMachineFunction - Register allocate the whole function
143 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
145 mri_ = &mf_->getRegInfo();
146 tm_ = &fn.getTarget();
147 tri_ = tm_->getRegisterInfo();
148 tii_ = tm_->getInstrInfo();
149 lv_ = &getAnalysis<LiveVariables>();
150 allocatableRegs_ = tri_->getAllocatableSet(fn);
155 numIntervals += getNumIntervals();
157 DOUT << "********** INTERVALS **********\n";
158 for (iterator I = begin(), E = end(); I != E; ++I) {
159 I->second.print(DOUT, tri_);
163 numIntervalsAfter += getNumIntervals();
168 /// print - Implement the dump method.
169 void LiveIntervals::print(std::ostream &O, const Module* ) const {
170 O << "********** INTERVALS **********\n";
171 for (const_iterator I = begin(), E = end(); I != E; ++I) {
172 I->second.print(DOUT, tri_);
176 O << "********** MACHINEINSTRS **********\n";
177 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
178 mbbi != mbbe; ++mbbi) {
179 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
180 for (MachineBasicBlock::iterator mii = mbbi->begin(),
181 mie = mbbi->end(); mii != mie; ++mii) {
182 O << getInstructionIndex(mii) << '\t' << *mii;
187 /// conflictsWithPhysRegDef - Returns true if the specified register
188 /// is defined during the duration of the specified interval.
189 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
190 VirtRegMap &vrm, unsigned reg) {
191 for (LiveInterval::Ranges::const_iterator
192 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
193 for (unsigned index = getBaseIndex(I->start),
194 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
195 index += InstrSlots::NUM) {
196 // skip deleted instructions
197 while (index != end && !getInstructionFromIndex(index))
198 index += InstrSlots::NUM;
199 if (index == end) break;
201 MachineInstr *MI = getInstructionFromIndex(index);
202 unsigned SrcReg, DstReg;
203 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
204 if (SrcReg == li.reg || DstReg == li.reg)
206 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
207 MachineOperand& mop = MI->getOperand(i);
208 if (!mop.isRegister())
210 unsigned PhysReg = mop.getReg();
211 if (PhysReg == 0 || PhysReg == li.reg)
213 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
214 if (!vrm.hasPhys(PhysReg))
216 PhysReg = vrm.getPhys(PhysReg);
218 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
227 void LiveIntervals::printRegName(unsigned reg) const {
228 if (TargetRegisterInfo::isPhysicalRegister(reg))
229 cerr << tri_->getName(reg);
231 cerr << "%reg" << reg;
234 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
235 MachineBasicBlock::iterator mi,
237 LiveInterval &interval) {
238 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
239 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
241 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
242 DOUT << "is a implicit_def\n";
246 // Virtual registers may be defined multiple times (due to phi
247 // elimination and 2-addr elimination). Much of what we do only has to be
248 // done once for the vreg. We use an empty interval to detect the first
249 // time we see a vreg.
250 if (interval.empty()) {
251 // Get the Idx of the defining instructions.
252 unsigned defIndex = getDefIndex(MIIdx);
254 MachineInstr *CopyMI = NULL;
255 unsigned SrcReg, DstReg;
256 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
257 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
258 tii_->isMoveInstr(*mi, SrcReg, DstReg))
260 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
262 assert(ValNo->id == 0 && "First value in interval is not 0?");
264 // Loop over all of the blocks that the vreg is defined in. There are
265 // two cases we have to handle here. The most common case is a vreg
266 // whose lifetime is contained within a basic block. In this case there
267 // will be a single kill, in MBB, which comes after the definition.
268 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
269 // FIXME: what about dead vars?
271 if (vi.Kills[0] != mi)
272 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
274 killIdx = defIndex+1;
276 // If the kill happens after the definition, we have an intra-block
278 if (killIdx > defIndex) {
279 assert(vi.AliveBlocks.none() &&
280 "Shouldn't be alive across any blocks!");
281 LiveRange LR(defIndex, killIdx, ValNo);
282 interval.addRange(LR);
283 DOUT << " +" << LR << "\n";
284 interval.addKill(ValNo, killIdx);
289 // The other case we handle is when a virtual register lives to the end
290 // of the defining block, potentially live across some blocks, then is
291 // live into some number of blocks, but gets killed. Start by adding a
292 // range that goes from this definition to the end of the defining block.
293 LiveRange NewLR(defIndex,
294 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
296 DOUT << " +" << NewLR;
297 interval.addRange(NewLR);
299 // Iterate over all of the blocks that the variable is completely
300 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
302 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
303 if (vi.AliveBlocks[i]) {
304 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
306 LiveRange LR(getMBBStartIdx(i),
307 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
309 interval.addRange(LR);
315 // Finally, this virtual register is live from the start of any killing
316 // block to the 'use' slot of the killing instruction.
317 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
318 MachineInstr *Kill = vi.Kills[i];
319 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
320 LiveRange LR(getMBBStartIdx(Kill->getParent()),
322 interval.addRange(LR);
323 interval.addKill(ValNo, killIdx);
328 // If this is the second time we see a virtual register definition, it
329 // must be due to phi elimination or two addr elimination. If this is
330 // the result of two address elimination, then the vreg is one of the
331 // def-and-use register operand.
332 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
333 // If this is a two-address definition, then we have already processed
334 // the live range. The only problem is that we didn't realize there
335 // are actually two values in the live interval. Because of this we
336 // need to take the LiveRegion that defines this register and split it
338 assert(interval.containsOneValue());
339 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
340 unsigned RedefIndex = getDefIndex(MIIdx);
342 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
343 VNInfo *OldValNo = OldLR->valno;
345 // Delete the initial value, which should be short and continuous,
346 // because the 2-addr copy must be in the same MBB as the redef.
347 interval.removeRange(DefIndex, RedefIndex);
349 // Two-address vregs should always only be redefined once. This means
350 // that at this point, there should be exactly one value number in it.
351 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
353 // The new value number (#1) is defined by the instruction we claimed
355 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
358 // Value#0 is now defined by the 2-addr instruction.
359 OldValNo->def = RedefIndex;
362 // Add the new live interval which replaces the range for the input copy.
363 LiveRange LR(DefIndex, RedefIndex, ValNo);
364 DOUT << " replace range with " << LR;
365 interval.addRange(LR);
366 interval.addKill(ValNo, RedefIndex);
368 // If this redefinition is dead, we need to add a dummy unit live
369 // range covering the def slot.
370 if (mi->registerDefIsDead(interval.reg, tri_))
371 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
374 interval.print(DOUT, tri_);
377 // Otherwise, this must be because of phi elimination. If this is the
378 // first redefinition of the vreg that we have seen, go back and change
379 // the live range in the PHI block to be a different value number.
380 if (interval.containsOneValue()) {
381 assert(vi.Kills.size() == 1 &&
382 "PHI elimination vreg should have one kill, the PHI itself!");
384 // Remove the old range that we now know has an incorrect number.
385 VNInfo *VNI = interval.getValNumInfo(0);
386 MachineInstr *Killer = vi.Kills[0];
387 unsigned Start = getMBBStartIdx(Killer->getParent());
388 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
389 DOUT << " Removing [" << Start << "," << End << "] from: ";
390 interval.print(DOUT, tri_); DOUT << "\n";
391 interval.removeRange(Start, End);
392 VNI->hasPHIKill = true;
393 DOUT << " RESULT: "; interval.print(DOUT, tri_);
395 // Replace the interval with one of a NEW value number. Note that this
396 // value number isn't actually defined by an instruction, weird huh? :)
397 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
398 DOUT << " replace range with " << LR;
399 interval.addRange(LR);
400 interval.addKill(LR.valno, End);
401 DOUT << " RESULT: "; interval.print(DOUT, tri_);
404 // In the case of PHI elimination, each variable definition is only
405 // live until the end of the block. We've already taken care of the
406 // rest of the live range.
407 unsigned defIndex = getDefIndex(MIIdx);
410 MachineInstr *CopyMI = NULL;
411 unsigned SrcReg, DstReg;
412 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
413 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
414 tii_->isMoveInstr(*mi, SrcReg, DstReg))
416 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
418 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
419 LiveRange LR(defIndex, killIndex, ValNo);
420 interval.addRange(LR);
421 interval.addKill(ValNo, killIndex);
422 ValNo->hasPHIKill = true;
430 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
431 MachineBasicBlock::iterator mi,
433 LiveInterval &interval,
434 MachineInstr *CopyMI) {
435 // A physical register cannot be live across basic block, so its
436 // lifetime must end somewhere in its defining basic block.
437 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
439 unsigned baseIndex = MIIdx;
440 unsigned start = getDefIndex(baseIndex);
441 unsigned end = start;
443 // If it is not used after definition, it is considered dead at
444 // the instruction defining it. Hence its interval is:
445 // [defSlot(def), defSlot(def)+1)
446 if (mi->registerDefIsDead(interval.reg, tri_)) {
448 end = getDefIndex(start) + 1;
452 // If it is not dead on definition, it must be killed by a
453 // subsequent instruction. Hence its interval is:
454 // [defSlot(def), useSlot(kill)+1)
455 while (++mi != MBB->end()) {
456 baseIndex += InstrSlots::NUM;
457 if (mi->killsRegister(interval.reg, tri_)) {
459 end = getUseIndex(baseIndex) + 1;
461 } else if (mi->modifiesRegister(interval.reg, tri_)) {
462 // Another instruction redefines the register before it is ever read.
463 // Then the register is essentially dead at the instruction that defines
464 // it. Hence its interval is:
465 // [defSlot(def), defSlot(def)+1)
467 end = getDefIndex(start) + 1;
472 // The only case we should have a dead physreg here without a killing or
473 // instruction where we know it's dead is if it is live-in to the function
475 assert(!CopyMI && "physreg was not killed in defining block!");
476 end = getDefIndex(start) + 1; // It's dead.
479 assert(start < end && "did not find end of interval?");
481 // Already exists? Extend old live interval.
482 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
483 VNInfo *ValNo = (OldLR != interval.end())
484 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
485 LiveRange LR(start, end, ValNo);
486 interval.addRange(LR);
487 interval.addKill(LR.valno, end);
488 DOUT << " +" << LR << '\n';
491 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
492 MachineBasicBlock::iterator MI,
495 if (TargetRegisterInfo::isVirtualRegister(reg))
496 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
497 else if (allocatableRegs_[reg]) {
498 MachineInstr *CopyMI = NULL;
499 unsigned SrcReg, DstReg;
500 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
501 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
502 tii_->isMoveInstr(*MI, SrcReg, DstReg))
504 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
505 // Def of a register also defines its sub-registers.
506 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
507 // If MI also modifies the sub-register explicitly, avoid processing it
508 // more than once. Do not pass in TRI here so it checks for exact match.
509 if (!MI->modifiesRegister(*AS))
510 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
514 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
516 LiveInterval &interval, bool isAlias) {
517 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
519 // Look for kills, if it reaches a def before it's killed, then it shouldn't
520 // be considered a livein.
521 MachineBasicBlock::iterator mi = MBB->begin();
522 unsigned baseIndex = MIIdx;
523 unsigned start = baseIndex;
524 unsigned end = start;
525 while (mi != MBB->end()) {
526 if (mi->killsRegister(interval.reg, tri_)) {
528 end = getUseIndex(baseIndex) + 1;
530 } else if (mi->modifiesRegister(interval.reg, tri_)) {
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that defines
533 // it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
536 end = getDefIndex(start) + 1;
540 baseIndex += InstrSlots::NUM;
545 // Live-in register might not be used at all.
549 end = getDefIndex(MIIdx) + 1;
551 DOUT << " live through";
556 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
557 interval.addRange(LR);
558 interval.addKill(LR.valno, end);
559 DOUT << " +" << LR << '\n';
562 /// computeIntervals - computes the live intervals for virtual
563 /// registers. for some ordering of the machine instructions [1,N] a
564 /// live interval is an interval [i, j) where 1 <= i <= j < N for
565 /// which a variable is live
566 void LiveIntervals::computeIntervals() {
567 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
568 << "********** Function: "
569 << ((Value*)mf_->getFunction())->getName() << '\n';
570 // Track the index of the current machine instr.
571 unsigned MIIndex = 0;
572 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
574 MachineBasicBlock *MBB = MBBI;
575 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
577 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
579 // Create intervals for live-ins to this BB first.
580 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
581 LE = MBB->livein_end(); LI != LE; ++LI) {
582 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
583 // Multiple live-ins can alias the same register.
584 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
585 if (!hasInterval(*AS))
586 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
590 for (; MI != miEnd; ++MI) {
591 DOUT << MIIndex << "\t" << *MI;
594 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
595 MachineOperand &MO = MI->getOperand(i);
596 // handle register defs - build intervals
597 if (MO.isRegister() && MO.getReg() && MO.isDef())
598 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
601 MIIndex += InstrSlots::NUM;
606 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
607 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
608 std::vector<IdxMBBPair>::const_iterator I =
609 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
612 while (I != Idx2MBBMap.end()) {
613 if (LR.end <= I->first)
615 MBBs.push_back(I->second);
623 LiveInterval LiveIntervals::createInterval(unsigned reg) {
624 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
626 return LiveInterval(reg, Weight);
629 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
630 /// copy field and returns the source register that defines it.
631 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
635 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
636 return VNI->copy->getOperand(1).getReg();
637 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
638 return VNI->copy->getOperand(2).getReg();
639 unsigned SrcReg, DstReg;
640 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
642 assert(0 && "Unrecognized copy instruction!");
646 //===----------------------------------------------------------------------===//
647 // Register allocator hooks.
650 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
651 /// allow one) virtual register operand, then its uses are implicitly using
652 /// the register. Returns the virtual register.
653 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
654 MachineInstr *MI) const {
656 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
657 MachineOperand &MO = MI->getOperand(i);
658 if (!MO.isRegister() || !MO.isUse())
660 unsigned Reg = MO.getReg();
661 if (Reg == 0 || Reg == li.reg)
663 // FIXME: For now, only remat MI with at most one register operand.
665 "Can't rematerialize instruction with multiple register operand!");
672 /// isValNoAvailableAt - Return true if the val# of the specified interval
673 /// which reaches the given instruction also reaches the specified use index.
674 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
675 unsigned UseIdx) const {
676 unsigned Index = getInstructionIndex(MI);
677 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
678 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
679 return UI != li.end() && UI->valno == ValNo;
682 /// isReMaterializable - Returns true if the definition MI of the specified
683 /// val# of the specified interval is re-materializable.
684 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
685 const VNInfo *ValNo, MachineInstr *MI,
691 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
695 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
696 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
697 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
698 // this but remember this is not safe to fold into a two-address
700 // This is a load from fixed stack slot. It can be rematerialized.
703 if (tii_->isTriviallyReMaterializable(MI)) {
704 const TargetInstrDesc &TID = MI->getDesc();
705 isLoad = TID.isSimpleLoad();
707 unsigned ImpUse = getReMatImplicitUse(li, MI);
709 const LiveInterval &ImpLi = getInterval(ImpUse);
710 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
711 re = mri_->use_end(); ri != re; ++ri) {
712 MachineInstr *UseMI = &*ri;
713 unsigned UseIdx = getInstructionIndex(UseMI);
714 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
716 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
726 /// isReMaterializable - Returns true if every definition of MI of every
727 /// val# of the specified interval is re-materializable.
728 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
730 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
732 const VNInfo *VNI = *i;
733 unsigned DefIdx = VNI->def;
735 continue; // Dead val#.
736 // Is the def for the val# rematerializable?
739 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
740 bool DefIsLoad = false;
742 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
749 /// FilterFoldedOps - Filter out two-address use operands. Return
750 /// true if it finds any issue with the operands that ought to prevent
752 static bool FilterFoldedOps(MachineInstr *MI,
753 SmallVector<unsigned, 2> &Ops,
755 SmallVector<unsigned, 2> &FoldOps) {
756 const TargetInstrDesc &TID = MI->getDesc();
759 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
760 unsigned OpIdx = Ops[i];
761 MachineOperand &MO = MI->getOperand(OpIdx);
762 // FIXME: fold subreg use.
766 MRInfo |= (unsigned)VirtRegMap::isMod;
768 // Filter out two-address use operand(s).
769 if (!MO.isImplicit() &&
770 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
771 MRInfo = VirtRegMap::isModRef;
774 MRInfo |= (unsigned)VirtRegMap::isRef;
776 FoldOps.push_back(OpIdx);
782 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
783 /// slot / to reg or any rematerialized load into ith operand of specified
784 /// MI. If it is successul, MI is updated with the newly created MI and
786 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
787 VirtRegMap &vrm, MachineInstr *DefMI,
789 SmallVector<unsigned, 2> &Ops,
790 bool isSS, int Slot, unsigned Reg) {
791 // If it is an implicit def instruction, just delete it.
792 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
793 RemoveMachineInstrFromMaps(MI);
794 vrm.RemoveMachineInstrFromMaps(MI);
795 MI->eraseFromParent();
800 // Filter the list of operand indexes that are to be folded. Abort if
801 // any operand will prevent folding.
803 SmallVector<unsigned, 2> FoldOps;
804 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
807 // The only time it's safe to fold into a two address instruction is when
808 // it's folding reload and spill from / into a spill stack slot.
809 if (DefMI && (MRInfo & VirtRegMap::isMod))
812 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
813 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
815 // Remember this instruction uses the spill slot.
816 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
818 // Attempt to fold the memory reference into the instruction. If
819 // we can do this, we don't need to insert spill code.
821 lv_->instructionChanged(MI, fmi);
823 fmi->copyKillDeadInfo(MI, tri_);
824 MachineBasicBlock &MBB = *MI->getParent();
825 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
826 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
827 vrm.transferSpillPts(MI, fmi);
828 vrm.transferRestorePts(MI, fmi);
829 vrm.transferEmergencySpills(MI, fmi);
831 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
832 mi2iMap_[fmi] = InstrIdx;
833 MI = MBB.insert(MBB.erase(MI), fmi);
840 /// canFoldMemoryOperand - Returns true if the specified load / store
841 /// folding is possible.
842 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
843 SmallVector<unsigned, 2> &Ops,
845 // Filter the list of operand indexes that are to be folded. Abort if
846 // any operand will prevent folding.
848 SmallVector<unsigned, 2> FoldOps;
849 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
852 // It's only legal to remat for a use, not a def.
853 if (ReMat && (MRInfo & VirtRegMap::isMod))
856 return tii_->canFoldMemoryOperand(MI, FoldOps);
859 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
860 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
861 for (LiveInterval::Ranges::const_iterator
862 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
863 std::vector<IdxMBBPair>::const_iterator II =
864 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
865 if (II == Idx2MBBMap.end())
867 if (I->end > II->first) // crossing a MBB.
869 MBBs.insert(II->second);
876 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
877 /// interval on to-be re-materialized operands of MI) with new register.
878 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
879 MachineInstr *MI, unsigned NewVReg,
881 // There is an implicit use. That means one of the other operand is
882 // being remat'ed and the remat'ed instruction has li.reg as an
883 // use operand. Make sure we rewrite that as well.
884 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
885 MachineOperand &MO = MI->getOperand(i);
886 if (!MO.isRegister())
888 unsigned Reg = MO.getReg();
889 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
891 if (!vrm.isReMaterialized(Reg))
893 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
894 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
896 UseMO->setReg(NewVReg);
900 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
901 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
903 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
904 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
905 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
906 unsigned Slot, int LdSlot,
907 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
909 const TargetRegisterClass* rc,
910 SmallVector<int, 4> &ReMatIds,
911 const MachineLoopInfo *loopInfo,
912 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
913 std::map<unsigned,unsigned> &MBBVRegsMap,
914 std::vector<LiveInterval*> &NewLIs) {
915 bool CanFold = false;
917 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
918 MachineOperand& mop = MI->getOperand(i);
919 if (!mop.isRegister())
921 unsigned Reg = mop.getReg();
923 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
928 bool TryFold = !DefIsReMat;
929 bool FoldSS = true; // Default behavior unless it's a remat.
932 // If this is the rematerializable definition MI itself and
933 // all of its uses are rematerialized, simply delete it.
934 if (MI == ReMatOrigDefMI && CanDelete) {
935 DOUT << "\t\t\t\tErasing re-materlizable def: ";
937 RemoveMachineInstrFromMaps(MI);
938 vrm.RemoveMachineInstrFromMaps(MI);
939 MI->eraseFromParent();
943 // If def for this use can't be rematerialized, then try folding.
944 // If def is rematerializable and it's a load, also try folding.
945 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
947 // Try fold loads (from stack slot, constant pool, etc.) into uses.
953 // Scan all of the operands of this instruction rewriting operands
954 // to use NewVReg instead of li.reg as appropriate. We do this for
957 // 1. If the instr reads the same spilled vreg multiple times, we
958 // want to reuse the NewVReg.
959 // 2. If the instr is a two-addr instruction, we are required to
960 // keep the src/dst regs pinned.
962 // Keep track of whether we replace a use and/or def so that we can
963 // create the spill interval with the appropriate range.
965 HasUse = mop.isUse();
966 HasDef = mop.isDef();
967 SmallVector<unsigned, 2> Ops;
969 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
970 const MachineOperand &MOj = MI->getOperand(j);
971 if (!MOj.isRegister())
973 unsigned RegJ = MOj.getReg();
974 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
978 HasUse |= MOj.isUse();
979 HasDef |= MOj.isDef();
984 // Do not fold load / store here if we are splitting. We'll find an
985 // optimal point to insert a load / store later.
987 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
988 Ops, FoldSS, FoldSlot, Reg)) {
989 // Folding the load/store can completely change the instruction in
990 // unpredictable ways, rescan it from the beginning.
996 goto RestartInstruction;
999 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1004 // Create a new virtual register for the spill interval.
1005 bool CreatedNewVReg = false;
1007 NewVReg = mri_->createVirtualRegister(rc);
1009 CreatedNewVReg = true;
1011 mop.setReg(NewVReg);
1012 if (mop.isImplicit())
1013 rewriteImplicitOps(li, MI, NewVReg, vrm);
1015 // Reuse NewVReg for other reads.
1016 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1017 MachineOperand &mopj = MI->getOperand(Ops[j]);
1018 mopj.setReg(NewVReg);
1019 if (mopj.isImplicit())
1020 rewriteImplicitOps(li, MI, NewVReg, vrm);
1023 if (CreatedNewVReg) {
1025 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1026 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1027 // Each valnum may have its own remat id.
1028 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1030 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1032 if (!CanDelete || (HasUse && HasDef)) {
1033 // If this is a two-addr instruction then its use operands are
1034 // rematerializable but its def is not. It should be assigned a
1036 vrm.assignVirt2StackSlot(NewVReg, Slot);
1039 vrm.assignVirt2StackSlot(NewVReg, Slot);
1041 } else if (HasUse && HasDef &&
1042 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1043 // If this interval hasn't been assigned a stack slot (because earlier
1044 // def is a deleted remat def), do it now.
1045 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1046 vrm.assignVirt2StackSlot(NewVReg, Slot);
1049 // Re-matting an instruction with virtual register use. Add the
1050 // register as an implicit use on the use MI.
1051 if (DefIsReMat && ImpUse)
1052 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1054 // create a new register interval for this spill / remat.
1055 LiveInterval &nI = getOrCreateInterval(NewVReg);
1056 if (CreatedNewVReg) {
1057 NewLIs.push_back(&nI);
1058 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1060 vrm.setIsSplitFromReg(NewVReg, li.reg);
1064 if (CreatedNewVReg) {
1065 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1066 nI.getNextValue(~0U, 0, VNInfoAllocator));
1070 // Extend the split live interval to this def / use.
1071 unsigned End = getUseIndex(index)+1;
1072 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1073 nI.getValNumInfo(nI.getNumValNums()-1));
1079 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1080 nI.getNextValue(~0U, 0, VNInfoAllocator));
1085 DOUT << "\t\t\t\tAdded new interval: ";
1086 nI.print(DOUT, tri_);
1091 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1093 MachineBasicBlock *MBB, unsigned Idx) const {
1094 unsigned End = getMBBEndIdx(MBB);
1095 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1096 unsigned KillIdx = VNI->kills[j];
1097 if (KillIdx > Idx && KillIdx < End)
1103 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1104 const VNInfo *VNI = NULL;
1105 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1106 e = li.vni_end(); i != e; ++i)
1107 if ((*i)->def == DefIdx) {
1114 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1115 /// during spilling.
1117 struct RewriteInfo {
1122 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1123 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1126 struct RewriteInfoCompare {
1127 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1128 return LHS.Index < RHS.Index;
1133 void LiveIntervals::
1134 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1135 LiveInterval::Ranges::const_iterator &I,
1136 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1137 unsigned Slot, int LdSlot,
1138 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1140 const TargetRegisterClass* rc,
1141 SmallVector<int, 4> &ReMatIds,
1142 const MachineLoopInfo *loopInfo,
1143 BitVector &SpillMBBs,
1144 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1145 BitVector &RestoreMBBs,
1146 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1147 std::map<unsigned,unsigned> &MBBVRegsMap,
1148 std::vector<LiveInterval*> &NewLIs) {
1149 bool AllCanFold = true;
1150 unsigned NewVReg = 0;
1151 unsigned start = getBaseIndex(I->start);
1152 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1154 // First collect all the def / use in this live range that will be rewritten.
1155 // Make sure they are sorted according to instruction index.
1156 std::vector<RewriteInfo> RewriteMIs;
1157 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1158 re = mri_->reg_end(); ri != re; ) {
1159 MachineInstr *MI = &*ri;
1160 MachineOperand &O = ri.getOperand();
1162 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1163 unsigned index = getInstructionIndex(MI);
1164 if (index < start || index >= end)
1166 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1168 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1170 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1171 // Now rewrite the defs and uses.
1172 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1173 RewriteInfo &rwi = RewriteMIs[i];
1175 unsigned index = rwi.Index;
1176 bool MIHasUse = rwi.HasUse;
1177 bool MIHasDef = rwi.HasDef;
1178 MachineInstr *MI = rwi.MI;
1179 // If MI def and/or use the same register multiple times, then there
1180 // are multiple entries.
1181 unsigned NumUses = MIHasUse;
1182 while (i != e && RewriteMIs[i].MI == MI) {
1183 assert(RewriteMIs[i].Index == index);
1184 bool isUse = RewriteMIs[i].HasUse;
1185 if (isUse) ++NumUses;
1187 MIHasDef |= RewriteMIs[i].HasDef;
1190 MachineBasicBlock *MBB = MI->getParent();
1192 if (ImpUse && MI != ReMatDefMI) {
1193 // Re-matting an instruction with virtual register use. Update the
1194 // register interval's spill weight to HUGE_VALF to prevent it from
1196 LiveInterval &ImpLi = getInterval(ImpUse);
1197 ImpLi.weight = HUGE_VALF;
1200 unsigned MBBId = MBB->getNumber();
1201 unsigned ThisVReg = 0;
1203 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1204 if (NVI != MBBVRegsMap.end()) {
1205 ThisVReg = NVI->second;
1212 // It's better to start a new interval to avoid artifically
1213 // extend the new interval.
1214 if (MIHasDef && !MIHasUse) {
1215 MBBVRegsMap.erase(MBB->getNumber());
1221 bool IsNew = ThisVReg == 0;
1223 // This ends the previous live interval. If all of its def / use
1224 // can be folded, give it a low spill weight.
1225 if (NewVReg && TrySplit && AllCanFold) {
1226 LiveInterval &nI = getOrCreateInterval(NewVReg);
1233 bool HasDef = false;
1234 bool HasUse = false;
1235 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1236 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1237 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1238 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1239 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1240 if (!HasDef && !HasUse)
1243 AllCanFold &= CanFold;
1245 // Update weight of spill interval.
1246 LiveInterval &nI = getOrCreateInterval(NewVReg);
1248 // The spill weight is now infinity as it cannot be spilled again.
1249 nI.weight = HUGE_VALF;
1253 // Keep track of the last def and first use in each MBB.
1255 if (MI != ReMatOrigDefMI || !CanDelete) {
1256 bool HasKill = false;
1258 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1260 // If this is a two-address code, then this index starts a new VNInfo.
1261 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1263 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1265 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1266 SpillIdxes.find(MBBId);
1268 if (SII == SpillIdxes.end()) {
1269 std::vector<SRInfo> S;
1270 S.push_back(SRInfo(index, NewVReg, true));
1271 SpillIdxes.insert(std::make_pair(MBBId, S));
1272 } else if (SII->second.back().vreg != NewVReg) {
1273 SII->second.push_back(SRInfo(index, NewVReg, true));
1274 } else if ((int)index > SII->second.back().index) {
1275 // If there is an earlier def and this is a two-address
1276 // instruction, then it's not possible to fold the store (which
1277 // would also fold the load).
1278 SRInfo &Info = SII->second.back();
1280 Info.canFold = !HasUse;
1282 SpillMBBs.set(MBBId);
1283 } else if (SII != SpillIdxes.end() &&
1284 SII->second.back().vreg == NewVReg &&
1285 (int)index > SII->second.back().index) {
1286 // There is an earlier def that's not killed (must be two-address).
1287 // The spill is no longer needed.
1288 SII->second.pop_back();
1289 if (SII->second.empty()) {
1290 SpillIdxes.erase(MBBId);
1291 SpillMBBs.reset(MBBId);
1298 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1299 SpillIdxes.find(MBBId);
1300 if (SII != SpillIdxes.end() &&
1301 SII->second.back().vreg == NewVReg &&
1302 (int)index > SII->second.back().index)
1303 // Use(s) following the last def, it's not safe to fold the spill.
1304 SII->second.back().canFold = false;
1305 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1306 RestoreIdxes.find(MBBId);
1307 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1308 // If we are splitting live intervals, only fold if it's the first
1309 // use and there isn't another use later in the MBB.
1310 RII->second.back().canFold = false;
1312 // Only need a reload if there isn't an earlier def / use.
1313 if (RII == RestoreIdxes.end()) {
1314 std::vector<SRInfo> Infos;
1315 Infos.push_back(SRInfo(index, NewVReg, true));
1316 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1318 RII->second.push_back(SRInfo(index, NewVReg, true));
1320 RestoreMBBs.set(MBBId);
1324 // Update spill weight.
1325 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1326 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1329 if (NewVReg && TrySplit && AllCanFold) {
1330 // If all of its def / use can be folded, give it a low spill weight.
1331 LiveInterval &nI = getOrCreateInterval(NewVReg);
1336 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1337 BitVector &RestoreMBBs,
1338 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1339 if (!RestoreMBBs[Id])
1341 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1342 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1343 if (Restores[i].index == index &&
1344 Restores[i].vreg == vr &&
1345 Restores[i].canFold)
1350 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1351 BitVector &RestoreMBBs,
1352 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1353 if (!RestoreMBBs[Id])
1355 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1356 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1357 if (Restores[i].index == index && Restores[i].vreg)
1358 Restores[i].index = -1;
1361 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1362 /// spilled and create empty intervals for their uses.
1364 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1365 const TargetRegisterClass* rc,
1366 std::vector<LiveInterval*> &NewLIs) {
1367 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1368 re = mri_->reg_end(); ri != re; ) {
1369 MachineOperand &O = ri.getOperand();
1370 MachineInstr *MI = &*ri;
1373 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1374 "Register def was not rewritten?");
1375 RemoveMachineInstrFromMaps(MI);
1376 vrm.RemoveMachineInstrFromMaps(MI);
1377 MI->eraseFromParent();
1379 // This must be an use of an implicit_def so it's not part of the live
1380 // interval. Create a new empty live interval for it.
1381 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1382 unsigned NewVReg = mri_->createVirtualRegister(rc);
1384 vrm.setIsImplicitlyDefined(NewVReg);
1385 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1386 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1387 MachineOperand &MO = MI->getOperand(i);
1388 if (MO.isReg() && MO.getReg() == li.reg)
1396 std::vector<LiveInterval*> LiveIntervals::
1397 addIntervalsForSpills(const LiveInterval &li,
1398 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1399 // Since this is called after the analysis is done we don't know if
1400 // LiveVariables is available
1401 lv_ = getAnalysisToUpdate<LiveVariables>();
1403 assert(li.weight != HUGE_VALF &&
1404 "attempt to spill already spilled interval!");
1406 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1407 li.print(DOUT, tri_);
1410 // Each bit specify whether it a spill is required in the MBB.
1411 BitVector SpillMBBs(mf_->getNumBlockIDs());
1412 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1413 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1414 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1415 std::map<unsigned,unsigned> MBBVRegsMap;
1416 std::vector<LiveInterval*> NewLIs;
1417 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1419 unsigned NumValNums = li.getNumValNums();
1420 SmallVector<MachineInstr*, 4> ReMatDefs;
1421 ReMatDefs.resize(NumValNums, NULL);
1422 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1423 ReMatOrigDefs.resize(NumValNums, NULL);
1424 SmallVector<int, 4> ReMatIds;
1425 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1426 BitVector ReMatDelete(NumValNums);
1427 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1429 // Spilling a split live interval. It cannot be split any further. Also,
1430 // it's also guaranteed to be a single val# / range interval.
1431 if (vrm.getPreSplitReg(li.reg)) {
1432 vrm.setIsSplitFromReg(li.reg, 0);
1433 // Unset the split kill marker on the last use.
1434 unsigned KillIdx = vrm.getKillPoint(li.reg);
1436 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1437 assert(KillMI && "Last use disappeared?");
1438 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1439 assert(KillOp != -1 && "Last use disappeared?");
1440 KillMI->getOperand(KillOp).setIsKill(false);
1442 vrm.removeKillPoint(li.reg);
1443 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1444 Slot = vrm.getStackSlot(li.reg);
1445 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1446 MachineInstr *ReMatDefMI = DefIsReMat ?
1447 vrm.getReMaterializedMI(li.reg) : NULL;
1449 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1450 bool isLoad = isLoadSS ||
1451 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1452 bool IsFirstRange = true;
1453 for (LiveInterval::Ranges::const_iterator
1454 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1455 // If this is a split live interval with multiple ranges, it means there
1456 // are two-address instructions that re-defined the value. Only the
1457 // first def can be rematerialized!
1459 // Note ReMatOrigDefMI has already been deleted.
1460 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1461 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1462 false, vrm, rc, ReMatIds, loopInfo,
1463 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1464 MBBVRegsMap, NewLIs);
1466 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1467 Slot, 0, false, false, false,
1468 false, vrm, rc, ReMatIds, loopInfo,
1469 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1470 MBBVRegsMap, NewLIs);
1472 IsFirstRange = false;
1475 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1479 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1480 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1484 bool NeedStackSlot = false;
1485 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1487 const VNInfo *VNI = *i;
1488 unsigned VN = VNI->id;
1489 unsigned DefIdx = VNI->def;
1491 continue; // Dead val#.
1492 // Is the def for the val# rematerializable?
1493 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1494 ? 0 : getInstructionFromIndex(DefIdx);
1496 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1497 // Remember how to remat the def of this val#.
1498 ReMatOrigDefs[VN] = ReMatDefMI;
1499 // Original def may be modified so we have to make a copy here. vrm must
1501 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1503 bool CanDelete = true;
1504 if (VNI->hasPHIKill) {
1505 // A kill is a phi node, not all of its uses can be rematerialized.
1506 // It must not be deleted.
1508 // Need a stack slot if there is any live range where uses cannot be
1510 NeedStackSlot = true;
1513 ReMatDelete.set(VN);
1515 // Need a stack slot if there is any live range where uses cannot be
1517 NeedStackSlot = true;
1521 // One stack slot per live interval.
1522 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1523 Slot = vrm.assignVirt2StackSlot(li.reg);
1525 // Create new intervals and rewrite defs and uses.
1526 for (LiveInterval::Ranges::const_iterator
1527 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1528 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1529 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1530 bool DefIsReMat = ReMatDefMI != NULL;
1531 bool CanDelete = ReMatDelete[I->valno->id];
1533 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1534 bool isLoad = isLoadSS ||
1535 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1536 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1537 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1538 CanDelete, vrm, rc, ReMatIds, loopInfo,
1539 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1540 MBBVRegsMap, NewLIs);
1543 // Insert spills / restores if we are splitting.
1545 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1549 SmallPtrSet<LiveInterval*, 4> AddedKill;
1550 SmallVector<unsigned, 2> Ops;
1551 if (NeedStackSlot) {
1552 int Id = SpillMBBs.find_first();
1554 std::vector<SRInfo> &spills = SpillIdxes[Id];
1555 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1556 int index = spills[i].index;
1557 unsigned VReg = spills[i].vreg;
1558 LiveInterval &nI = getOrCreateInterval(VReg);
1559 bool isReMat = vrm.isReMaterialized(VReg);
1560 MachineInstr *MI = getInstructionFromIndex(index);
1561 bool CanFold = false;
1562 bool FoundUse = false;
1564 if (spills[i].canFold) {
1566 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1567 MachineOperand &MO = MI->getOperand(j);
1568 if (!MO.isRegister() || MO.getReg() != VReg)
1575 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1576 RestoreMBBs, RestoreIdxes))) {
1577 // MI has two-address uses of the same register. If the use
1578 // isn't the first and only use in the BB, then we can't fold
1579 // it. FIXME: Move this to rewriteInstructionsForSpills.
1586 // Fold the store into the def if possible.
1587 bool Folded = false;
1588 if (CanFold && !Ops.empty()) {
1589 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1592 // Also folded uses, do not issue a load.
1593 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1594 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1596 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1600 // Otherwise tell the spiller to issue a spill.
1602 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1603 bool isKill = LR->end == getStoreIndex(index);
1604 if (!MI->registerDefIsDead(nI.reg))
1605 // No need to spill a dead def.
1606 vrm.addSpillPoint(VReg, isKill, MI);
1608 AddedKill.insert(&nI);
1611 Id = SpillMBBs.find_next(Id);
1615 int Id = RestoreMBBs.find_first();
1617 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1618 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1619 int index = restores[i].index;
1622 unsigned VReg = restores[i].vreg;
1623 LiveInterval &nI = getOrCreateInterval(VReg);
1624 MachineInstr *MI = getInstructionFromIndex(index);
1625 bool CanFold = false;
1627 if (restores[i].canFold) {
1629 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1630 MachineOperand &MO = MI->getOperand(j);
1631 if (!MO.isRegister() || MO.getReg() != VReg)
1635 // If this restore were to be folded, it would have been folded
1644 // Fold the load into the use if possible.
1645 bool Folded = false;
1646 if (CanFold && !Ops.empty()) {
1647 if (!vrm.isReMaterialized(VReg))
1648 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1650 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1652 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1653 // If the rematerializable def is a load, also try to fold it.
1654 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1655 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1656 Ops, isLoadSS, LdSlot, VReg);
1657 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1659 // Re-matting an instruction with virtual register use. Add the
1660 // register as an implicit use on the use MI and update the register
1661 // interval's spill weight to HUGE_VALF to prevent it from being
1663 LiveInterval &ImpLi = getInterval(ImpUse);
1664 ImpLi.weight = HUGE_VALF;
1665 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1669 // If folding is not possible / failed, then tell the spiller to issue a
1670 // load / rematerialization for us.
1672 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1674 vrm.addRestorePoint(VReg, MI);
1676 Id = RestoreMBBs.find_next(Id);
1679 // Finalize intervals: add kills, finalize spill weights, and filter out
1681 std::vector<LiveInterval*> RetNewLIs;
1682 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1683 LiveInterval *LI = NewLIs[i];
1685 LI->weight /= LI->getSize();
1686 if (!AddedKill.count(LI)) {
1687 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1688 unsigned LastUseIdx = getBaseIndex(LR->end);
1689 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1690 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1691 assert(UseIdx != -1);
1692 if (LastUse->getOperand(UseIdx).isImplicit() ||
1693 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1694 LastUse->getOperand(UseIdx).setIsKill();
1695 vrm.addKillPoint(LI->reg, LastUseIdx);
1698 RetNewLIs.push_back(LI);
1702 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1706 /// hasAllocatableSuperReg - Return true if the specified physical register has
1707 /// any super register that's allocatable.
1708 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1709 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1710 if (allocatableRegs_[*AS] && hasInterval(*AS))
1715 /// getRepresentativeReg - Find the largest super register of the specified
1716 /// physical register.
1717 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1718 // Find the largest super-register that is allocatable.
1719 unsigned BestReg = Reg;
1720 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1721 unsigned SuperReg = *AS;
1722 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1730 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1731 /// specified interval that conflicts with the specified physical register.
1732 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1733 unsigned PhysReg) const {
1734 unsigned NumConflicts = 0;
1735 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1736 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1737 E = mri_->reg_end(); I != E; ++I) {
1738 MachineOperand &O = I.getOperand();
1739 MachineInstr *MI = O.getParent();
1740 unsigned Index = getInstructionIndex(MI);
1741 if (pli.liveAt(Index))
1744 return NumConflicts;
1747 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1748 /// around all defs and uses of the specified interval.
1749 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1750 unsigned PhysReg, VirtRegMap &vrm) {
1751 unsigned SpillReg = getRepresentativeReg(PhysReg);
1753 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1754 // If there are registers which alias PhysReg, but which are not a
1755 // sub-register of the chosen representative super register. Assert
1756 // since we can't handle it yet.
1757 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1758 tri_->isSuperRegister(*AS, SpillReg));
1760 LiveInterval &pli = getInterval(SpillReg);
1761 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1762 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1763 E = mri_->reg_end(); I != E; ++I) {
1764 MachineOperand &O = I.getOperand();
1765 MachineInstr *MI = O.getParent();
1766 if (SeenMIs.count(MI))
1769 unsigned Index = getInstructionIndex(MI);
1770 if (pli.liveAt(Index)) {
1771 vrm.addEmergencySpill(SpillReg, MI);
1772 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1773 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1774 if (!hasInterval(*AS))
1776 LiveInterval &spli = getInterval(*AS);
1777 if (spli.liveAt(Index))
1778 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);