1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
47 static cl::opt<bool> EmptyBBIndex("empty-bb-index",
48 cl::init(false), cl::Hidden);
50 STATISTIC(numIntervals, "Number of original intervals");
51 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
53 STATISTIC(numSplits , "Number of intervals split");
55 char LiveIntervals::ID = 0;
56 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
59 AU.addPreserved<LiveVariables>();
60 AU.addRequired<LiveVariables>();
61 AU.addPreservedID(MachineLoopInfoID);
62 AU.addPreservedID(MachineDominatorsID);
63 AU.addPreservedID(PHIEliminationID);
64 AU.addRequiredID(PHIEliminationID);
65 AU.addRequiredID(TwoAddressInstructionPassID);
66 MachineFunctionPass::getAnalysisUsage(AU);
69 void LiveIntervals::releaseMemory() {
75 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
76 VNInfoAllocator.Reset();
77 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
81 void LiveIntervals::computeNumbering() {
82 Index2MiMap OldI2MI = i2miMap_;
89 // Number MachineInstrs and MachineBasicBlocks.
90 // Initialize MBB indexes to a sentinal.
91 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
94 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
96 unsigned StartIdx = MIIndex;
98 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
100 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
101 assert(inserted && "multiple MachineInstr -> index mappings");
102 i2miMap_.push_back(I);
103 MIIndex += InstrSlots::NUM;
106 // Set the MBB2IdxMap entry for this MBB.
108 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
109 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
110 : std::make_pair(StartIdx, MIIndex - 1);
111 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113 if (StartIdx == MIIndex) {
115 MIIndex += InstrSlots::NUM;
116 i2miMap_.push_back(0);
119 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
120 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
123 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
125 if (!OldI2MI.empty())
126 for (iterator I = begin(), E = end(); I != E; ++I)
127 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
130 // Remap the start index of the live range to the corresponding new
131 // number, or our best guess at what it _should_ correspond to if the
132 // original instruction has been erased. This is either the following
133 // instruction or its predecessor.
134 unsigned offset = LI->start % InstrSlots::NUM;
135 if (OldI2MI[LI->start / InstrSlots::NUM])
136 LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
139 MachineInstr* newInstr = 0;
141 newInstr = OldI2MI[LI->start / InstrSlots::NUM + i];
145 if (mi2iMap_[newInstr] ==
146 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
147 LI->start = mi2iMap_[newInstr];
149 LI->start = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
152 // Remap the ending index in the same way that we remapped the start,
153 // except for the final step where we always map to the immediately
154 // following instruction.
155 if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
156 offset = LI->end % InstrSlots::NUM;
157 if (OldI2MI[LI->end / InstrSlots::NUM])
158 LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
161 MachineInstr* newInstr = 0;
163 newInstr = OldI2MI[LI->end / InstrSlots::NUM + i];
167 LI->end = mi2iMap_[newInstr];
170 LI->end = i2miMap_.size() * InstrSlots::NUM;
173 // Remap the VNInfo def index, which works the same as the
174 // start indices above.
175 VNInfo* vni = LI->valno;
176 offset = vni->def % InstrSlots::NUM;
177 if (OldI2MI[vni->def / InstrSlots::NUM])
178 vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
181 MachineInstr* newInstr = 0;
183 newInstr = OldI2MI[vni->def / InstrSlots::NUM + i];
187 if (mi2iMap_[newInstr] ==
188 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
189 vni->def = mi2iMap_[newInstr];
191 vni->def = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
194 // Remap the VNInfo kill indices, which works the same as
195 // the end indices above.
196 for (size_t i = 0; i < vni->kills.size(); ++i) {
197 offset = vni->kills[i] % InstrSlots::NUM;
198 if (OldI2MI[vni->kills[i] / InstrSlots::NUM])
199 vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
203 MachineInstr* newInstr = 0;
205 newInstr = OldI2MI[vni->kills[i] / InstrSlots::NUM + e];
209 vni->kills[i] = mi2iMap_[newInstr];
215 /// runOnMachineFunction - Register allocate the whole function
217 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
219 mri_ = &mf_->getRegInfo();
220 tm_ = &fn.getTarget();
221 tri_ = tm_->getRegisterInfo();
222 tii_ = tm_->getInstrInfo();
223 lv_ = &getAnalysis<LiveVariables>();
224 allocatableRegs_ = tri_->getAllocatableSet(fn);
229 numIntervals += getNumIntervals();
231 DOUT << "********** INTERVALS **********\n";
232 for (iterator I = begin(), E = end(); I != E; ++I) {
233 I->second.print(DOUT, tri_);
237 numIntervalsAfter += getNumIntervals();
242 /// print - Implement the dump method.
243 void LiveIntervals::print(std::ostream &O, const Module* ) const {
244 O << "********** INTERVALS **********\n";
245 for (const_iterator I = begin(), E = end(); I != E; ++I) {
246 I->second.print(O, tri_);
250 O << "********** MACHINEINSTRS **********\n";
251 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
252 mbbi != mbbe; ++mbbi) {
253 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
254 for (MachineBasicBlock::iterator mii = mbbi->begin(),
255 mie = mbbi->end(); mii != mie; ++mii) {
256 O << getInstructionIndex(mii) << '\t' << *mii;
261 /// conflictsWithPhysRegDef - Returns true if the specified register
262 /// is defined during the duration of the specified interval.
263 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
264 VirtRegMap &vrm, unsigned reg) {
265 for (LiveInterval::Ranges::const_iterator
266 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
267 for (unsigned index = getBaseIndex(I->start),
268 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
269 index += InstrSlots::NUM) {
270 // skip deleted instructions
271 while (index != end && !getInstructionFromIndex(index))
272 index += InstrSlots::NUM;
273 if (index == end) break;
275 MachineInstr *MI = getInstructionFromIndex(index);
276 unsigned SrcReg, DstReg;
277 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
278 if (SrcReg == li.reg || DstReg == li.reg)
280 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
281 MachineOperand& mop = MI->getOperand(i);
282 if (!mop.isRegister())
284 unsigned PhysReg = mop.getReg();
285 if (PhysReg == 0 || PhysReg == li.reg)
287 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
288 if (!vrm.hasPhys(PhysReg))
290 PhysReg = vrm.getPhys(PhysReg);
292 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
301 void LiveIntervals::printRegName(unsigned reg) const {
302 if (TargetRegisterInfo::isPhysicalRegister(reg))
303 cerr << tri_->getName(reg);
305 cerr << "%reg" << reg;
308 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
309 MachineBasicBlock::iterator mi,
311 LiveInterval &interval) {
312 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
313 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
315 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
316 DOUT << "is a implicit_def\n";
320 // Virtual registers may be defined multiple times (due to phi
321 // elimination and 2-addr elimination). Much of what we do only has to be
322 // done once for the vreg. We use an empty interval to detect the first
323 // time we see a vreg.
324 if (interval.empty()) {
325 // Get the Idx of the defining instructions.
326 unsigned defIndex = getDefIndex(MIIdx);
328 MachineInstr *CopyMI = NULL;
329 unsigned SrcReg, DstReg;
330 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
331 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
332 tii_->isMoveInstr(*mi, SrcReg, DstReg))
334 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
336 assert(ValNo->id == 0 && "First value in interval is not 0?");
338 // Loop over all of the blocks that the vreg is defined in. There are
339 // two cases we have to handle here. The most common case is a vreg
340 // whose lifetime is contained within a basic block. In this case there
341 // will be a single kill, in MBB, which comes after the definition.
342 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
343 // FIXME: what about dead vars?
345 if (vi.Kills[0] != mi)
346 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
348 killIdx = defIndex+1;
350 // If the kill happens after the definition, we have an intra-block
352 if (killIdx > defIndex) {
353 assert(vi.AliveBlocks.none() &&
354 "Shouldn't be alive across any blocks!");
355 LiveRange LR(defIndex, killIdx, ValNo);
356 interval.addRange(LR);
357 DOUT << " +" << LR << "\n";
358 interval.addKill(ValNo, killIdx);
363 // The other case we handle is when a virtual register lives to the end
364 // of the defining block, potentially live across some blocks, then is
365 // live into some number of blocks, but gets killed. Start by adding a
366 // range that goes from this definition to the end of the defining block.
367 LiveRange NewLR(defIndex,
368 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
370 DOUT << " +" << NewLR;
371 interval.addRange(NewLR);
373 // Iterate over all of the blocks that the variable is completely
374 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
376 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
377 if (vi.AliveBlocks[i]) {
378 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
380 LiveRange LR(getMBBStartIdx(i),
381 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
383 interval.addRange(LR);
389 // Finally, this virtual register is live from the start of any killing
390 // block to the 'use' slot of the killing instruction.
391 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
392 MachineInstr *Kill = vi.Kills[i];
393 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
394 LiveRange LR(getMBBStartIdx(Kill->getParent()),
396 interval.addRange(LR);
397 interval.addKill(ValNo, killIdx);
402 // If this is the second time we see a virtual register definition, it
403 // must be due to phi elimination or two addr elimination. If this is
404 // the result of two address elimination, then the vreg is one of the
405 // def-and-use register operand.
406 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
407 // If this is a two-address definition, then we have already processed
408 // the live range. The only problem is that we didn't realize there
409 // are actually two values in the live interval. Because of this we
410 // need to take the LiveRegion that defines this register and split it
412 assert(interval.containsOneValue());
413 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
414 unsigned RedefIndex = getDefIndex(MIIdx);
416 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
417 VNInfo *OldValNo = OldLR->valno;
419 // Delete the initial value, which should be short and continuous,
420 // because the 2-addr copy must be in the same MBB as the redef.
421 interval.removeRange(DefIndex, RedefIndex);
423 // Two-address vregs should always only be redefined once. This means
424 // that at this point, there should be exactly one value number in it.
425 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
432 // Value#0 is now defined by the 2-addr instruction.
433 OldValNo->def = RedefIndex;
436 // Add the new live interval which replaces the range for the input copy.
437 LiveRange LR(DefIndex, RedefIndex, ValNo);
438 DOUT << " replace range with " << LR;
439 interval.addRange(LR);
440 interval.addKill(ValNo, RedefIndex);
442 // If this redefinition is dead, we need to add a dummy unit live
443 // range covering the def slot.
444 if (mi->registerDefIsDead(interval.reg, tri_))
445 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
448 interval.print(DOUT, tri_);
451 // Otherwise, this must be because of phi elimination. If this is the
452 // first redefinition of the vreg that we have seen, go back and change
453 // the live range in the PHI block to be a different value number.
454 if (interval.containsOneValue()) {
455 assert(vi.Kills.size() == 1 &&
456 "PHI elimination vreg should have one kill, the PHI itself!");
458 // Remove the old range that we now know has an incorrect number.
459 VNInfo *VNI = interval.getValNumInfo(0);
460 MachineInstr *Killer = vi.Kills[0];
461 unsigned Start = getMBBStartIdx(Killer->getParent());
462 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
463 DOUT << " Removing [" << Start << "," << End << "] from: ";
464 interval.print(DOUT, tri_); DOUT << "\n";
465 interval.removeRange(Start, End);
466 VNI->hasPHIKill = true;
467 DOUT << " RESULT: "; interval.print(DOUT, tri_);
469 // Replace the interval with one of a NEW value number. Note that this
470 // value number isn't actually defined by an instruction, weird huh? :)
471 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
472 DOUT << " replace range with " << LR;
473 interval.addRange(LR);
474 interval.addKill(LR.valno, End);
475 DOUT << " RESULT: "; interval.print(DOUT, tri_);
478 // In the case of PHI elimination, each variable definition is only
479 // live until the end of the block. We've already taken care of the
480 // rest of the live range.
481 unsigned defIndex = getDefIndex(MIIdx);
484 MachineInstr *CopyMI = NULL;
485 unsigned SrcReg, DstReg;
486 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
487 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
488 tii_->isMoveInstr(*mi, SrcReg, DstReg))
490 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
492 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
493 LiveRange LR(defIndex, killIndex, ValNo);
494 interval.addRange(LR);
495 interval.addKill(ValNo, killIndex);
496 ValNo->hasPHIKill = true;
504 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
505 MachineBasicBlock::iterator mi,
507 LiveInterval &interval,
508 MachineInstr *CopyMI) {
509 // A physical register cannot be live across basic block, so its
510 // lifetime must end somewhere in its defining basic block.
511 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
513 unsigned baseIndex = MIIdx;
514 unsigned start = getDefIndex(baseIndex);
515 unsigned end = start;
517 // If it is not used after definition, it is considered dead at
518 // the instruction defining it. Hence its interval is:
519 // [defSlot(def), defSlot(def)+1)
520 if (mi->registerDefIsDead(interval.reg, tri_)) {
522 end = getDefIndex(start) + 1;
526 // If it is not dead on definition, it must be killed by a
527 // subsequent instruction. Hence its interval is:
528 // [defSlot(def), useSlot(kill)+1)
529 while (++mi != MBB->end()) {
530 baseIndex += InstrSlots::NUM;
531 if (mi->killsRegister(interval.reg, tri_)) {
533 end = getUseIndex(baseIndex) + 1;
535 } else if (mi->modifiesRegister(interval.reg, tri_)) {
536 // Another instruction redefines the register before it is ever read.
537 // Then the register is essentially dead at the instruction that defines
538 // it. Hence its interval is:
539 // [defSlot(def), defSlot(def)+1)
541 end = getDefIndex(start) + 1;
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
549 assert(!CopyMI && "physreg was not killed in defining block!");
550 end = getDefIndex(start) + 1; // It's dead.
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
557 VNInfo *ValNo = (OldLR != interval.end())
558 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
559 LiveRange LR(start, end, ValNo);
560 interval.addRange(LR);
561 interval.addKill(LR.valno, end);
562 DOUT << " +" << LR << '\n';
565 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
566 MachineBasicBlock::iterator MI,
569 if (TargetRegisterInfo::isVirtualRegister(reg))
570 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
571 else if (allocatableRegs_[reg]) {
572 MachineInstr *CopyMI = NULL;
573 unsigned SrcReg, DstReg;
574 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
575 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
576 tii_->isMoveInstr(*MI, SrcReg, DstReg))
578 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
579 // Def of a register also defines its sub-registers.
580 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
581 // If MI also modifies the sub-register explicitly, avoid processing it
582 // more than once. Do not pass in TRI here so it checks for exact match.
583 if (!MI->modifiesRegister(*AS))
584 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
588 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
590 LiveInterval &interval, bool isAlias) {
591 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
593 // Look for kills, if it reaches a def before it's killed, then it shouldn't
594 // be considered a livein.
595 MachineBasicBlock::iterator mi = MBB->begin();
596 unsigned baseIndex = MIIdx;
597 unsigned start = baseIndex;
598 unsigned end = start;
599 while (mi != MBB->end()) {
600 if (mi->killsRegister(interval.reg, tri_)) {
602 end = getUseIndex(baseIndex) + 1;
604 } else if (mi->modifiesRegister(interval.reg, tri_)) {
605 // Another instruction redefines the register before it is ever read.
606 // Then the register is essentially dead at the instruction that defines
607 // it. Hence its interval is:
608 // [defSlot(def), defSlot(def)+1)
610 end = getDefIndex(start) + 1;
614 baseIndex += InstrSlots::NUM;
619 // Live-in register might not be used at all.
623 end = getDefIndex(MIIdx) + 1;
625 DOUT << " live through";
630 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
631 interval.addRange(LR);
632 interval.addKill(LR.valno, end);
633 DOUT << " +" << LR << '\n';
636 /// computeIntervals - computes the live intervals for virtual
637 /// registers. for some ordering of the machine instructions [1,N] a
638 /// live interval is an interval [i, j) where 1 <= i <= j < N for
639 /// which a variable is live
640 void LiveIntervals::computeIntervals() {
641 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
642 << "********** Function: "
643 << ((Value*)mf_->getFunction())->getName() << '\n';
644 // Track the index of the current machine instr.
645 unsigned MIIndex = 0;
646 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
648 MachineBasicBlock *MBB = MBBI;
649 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
651 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
653 // Create intervals for live-ins to this BB first.
654 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
655 LE = MBB->livein_end(); LI != LE; ++LI) {
656 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
657 // Multiple live-ins can alias the same register.
658 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
659 if (!hasInterval(*AS))
660 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
664 for (; MI != miEnd; ++MI) {
665 DOUT << MIIndex << "\t" << *MI;
668 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
669 MachineOperand &MO = MI->getOperand(i);
670 // handle register defs - build intervals
671 if (MO.isRegister() && MO.getReg() && MO.isDef())
672 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
675 MIIndex += InstrSlots::NUM;
679 if (MBB->begin() == miEnd) MIIndex += InstrSlots::NUM; // Empty MBB
684 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
685 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
686 std::vector<IdxMBBPair>::const_iterator I =
687 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
690 while (I != Idx2MBBMap.end()) {
691 if (LR.end <= I->first)
693 MBBs.push_back(I->second);
701 LiveInterval LiveIntervals::createInterval(unsigned reg) {
702 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
704 return LiveInterval(reg, Weight);
707 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
708 /// copy field and returns the source register that defines it.
709 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
713 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
714 return VNI->copy->getOperand(1).getReg();
715 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
716 return VNI->copy->getOperand(2).getReg();
717 unsigned SrcReg, DstReg;
718 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
720 assert(0 && "Unrecognized copy instruction!");
724 //===----------------------------------------------------------------------===//
725 // Register allocator hooks.
728 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
729 /// allow one) virtual register operand, then its uses are implicitly using
730 /// the register. Returns the virtual register.
731 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
732 MachineInstr *MI) const {
734 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
735 MachineOperand &MO = MI->getOperand(i);
736 if (!MO.isRegister() || !MO.isUse())
738 unsigned Reg = MO.getReg();
739 if (Reg == 0 || Reg == li.reg)
741 // FIXME: For now, only remat MI with at most one register operand.
743 "Can't rematerialize instruction with multiple register operand!");
750 /// isValNoAvailableAt - Return true if the val# of the specified interval
751 /// which reaches the given instruction also reaches the specified use index.
752 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
753 unsigned UseIdx) const {
754 unsigned Index = getInstructionIndex(MI);
755 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
756 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
757 return UI != li.end() && UI->valno == ValNo;
760 /// isReMaterializable - Returns true if the definition MI of the specified
761 /// val# of the specified interval is re-materializable.
762 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
763 const VNInfo *ValNo, MachineInstr *MI,
769 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
773 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
774 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
775 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
776 // this but remember this is not safe to fold into a two-address
778 // This is a load from fixed stack slot. It can be rematerialized.
781 if (tii_->isTriviallyReMaterializable(MI)) {
782 const TargetInstrDesc &TID = MI->getDesc();
783 isLoad = TID.isSimpleLoad();
785 unsigned ImpUse = getReMatImplicitUse(li, MI);
787 const LiveInterval &ImpLi = getInterval(ImpUse);
788 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
789 re = mri_->use_end(); ri != re; ++ri) {
790 MachineInstr *UseMI = &*ri;
791 unsigned UseIdx = getInstructionIndex(UseMI);
792 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
794 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
804 /// isReMaterializable - Returns true if every definition of MI of every
805 /// val# of the specified interval is re-materializable.
806 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
808 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
810 const VNInfo *VNI = *i;
811 unsigned DefIdx = VNI->def;
813 continue; // Dead val#.
814 // Is the def for the val# rematerializable?
817 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
818 bool DefIsLoad = false;
820 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
827 /// FilterFoldedOps - Filter out two-address use operands. Return
828 /// true if it finds any issue with the operands that ought to prevent
830 static bool FilterFoldedOps(MachineInstr *MI,
831 SmallVector<unsigned, 2> &Ops,
833 SmallVector<unsigned, 2> &FoldOps) {
834 const TargetInstrDesc &TID = MI->getDesc();
837 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
838 unsigned OpIdx = Ops[i];
839 MachineOperand &MO = MI->getOperand(OpIdx);
840 // FIXME: fold subreg use.
844 MRInfo |= (unsigned)VirtRegMap::isMod;
846 // Filter out two-address use operand(s).
847 if (!MO.isImplicit() &&
848 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
849 MRInfo = VirtRegMap::isModRef;
852 MRInfo |= (unsigned)VirtRegMap::isRef;
854 FoldOps.push_back(OpIdx);
860 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
861 /// slot / to reg or any rematerialized load into ith operand of specified
862 /// MI. If it is successul, MI is updated with the newly created MI and
864 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
865 VirtRegMap &vrm, MachineInstr *DefMI,
867 SmallVector<unsigned, 2> &Ops,
868 bool isSS, int Slot, unsigned Reg) {
869 // If it is an implicit def instruction, just delete it.
870 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
871 RemoveMachineInstrFromMaps(MI);
872 vrm.RemoveMachineInstrFromMaps(MI);
873 MI->eraseFromParent();
878 // Filter the list of operand indexes that are to be folded. Abort if
879 // any operand will prevent folding.
881 SmallVector<unsigned, 2> FoldOps;
882 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
885 // The only time it's safe to fold into a two address instruction is when
886 // it's folding reload and spill from / into a spill stack slot.
887 if (DefMI && (MRInfo & VirtRegMap::isMod))
890 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
891 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
893 // Remember this instruction uses the spill slot.
894 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
896 // Attempt to fold the memory reference into the instruction. If
897 // we can do this, we don't need to insert spill code.
899 lv_->instructionChanged(MI, fmi);
901 fmi->copyKillDeadInfo(MI, tri_);
902 MachineBasicBlock &MBB = *MI->getParent();
903 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
904 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
905 vrm.transferSpillPts(MI, fmi);
906 vrm.transferRestorePts(MI, fmi);
907 vrm.transferEmergencySpills(MI, fmi);
909 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
910 mi2iMap_[fmi] = InstrIdx;
911 MI = MBB.insert(MBB.erase(MI), fmi);
918 /// canFoldMemoryOperand - Returns true if the specified load / store
919 /// folding is possible.
920 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
921 SmallVector<unsigned, 2> &Ops,
923 // Filter the list of operand indexes that are to be folded. Abort if
924 // any operand will prevent folding.
926 SmallVector<unsigned, 2> FoldOps;
927 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
930 // It's only legal to remat for a use, not a def.
931 if (ReMat && (MRInfo & VirtRegMap::isMod))
934 return tii_->canFoldMemoryOperand(MI, FoldOps);
937 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
938 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
939 for (LiveInterval::Ranges::const_iterator
940 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
941 std::vector<IdxMBBPair>::const_iterator II =
942 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
943 if (II == Idx2MBBMap.end())
945 if (I->end > II->first) // crossing a MBB.
947 MBBs.insert(II->second);
954 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
955 /// interval on to-be re-materialized operands of MI) with new register.
956 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
957 MachineInstr *MI, unsigned NewVReg,
959 // There is an implicit use. That means one of the other operand is
960 // being remat'ed and the remat'ed instruction has li.reg as an
961 // use operand. Make sure we rewrite that as well.
962 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
963 MachineOperand &MO = MI->getOperand(i);
964 if (!MO.isRegister())
966 unsigned Reg = MO.getReg();
967 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
969 if (!vrm.isReMaterialized(Reg))
971 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
972 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
974 UseMO->setReg(NewVReg);
978 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
979 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
981 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
982 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
983 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
984 unsigned Slot, int LdSlot,
985 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
987 const TargetRegisterClass* rc,
988 SmallVector<int, 4> &ReMatIds,
989 const MachineLoopInfo *loopInfo,
990 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
991 std::map<unsigned,unsigned> &MBBVRegsMap,
992 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
993 MachineBasicBlock *MBB = MI->getParent();
994 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
995 bool CanFold = false;
997 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
998 MachineOperand& mop = MI->getOperand(i);
999 if (!mop.isRegister())
1001 unsigned Reg = mop.getReg();
1002 unsigned RegI = Reg;
1003 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1008 bool TryFold = !DefIsReMat;
1009 bool FoldSS = true; // Default behavior unless it's a remat.
1010 int FoldSlot = Slot;
1012 // If this is the rematerializable definition MI itself and
1013 // all of its uses are rematerialized, simply delete it.
1014 if (MI == ReMatOrigDefMI && CanDelete) {
1015 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1017 RemoveMachineInstrFromMaps(MI);
1018 vrm.RemoveMachineInstrFromMaps(MI);
1019 MI->eraseFromParent();
1023 // If def for this use can't be rematerialized, then try folding.
1024 // If def is rematerializable and it's a load, also try folding.
1025 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1027 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1033 // Scan all of the operands of this instruction rewriting operands
1034 // to use NewVReg instead of li.reg as appropriate. We do this for
1037 // 1. If the instr reads the same spilled vreg multiple times, we
1038 // want to reuse the NewVReg.
1039 // 2. If the instr is a two-addr instruction, we are required to
1040 // keep the src/dst regs pinned.
1042 // Keep track of whether we replace a use and/or def so that we can
1043 // create the spill interval with the appropriate range.
1045 HasUse = mop.isUse();
1046 HasDef = mop.isDef();
1047 SmallVector<unsigned, 2> Ops;
1049 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1050 const MachineOperand &MOj = MI->getOperand(j);
1051 if (!MOj.isRegister())
1053 unsigned RegJ = MOj.getReg();
1054 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1058 HasUse |= MOj.isUse();
1059 HasDef |= MOj.isDef();
1063 // Update stack slot spill weight if we are splitting.
1064 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1071 // Do not fold load / store here if we are splitting. We'll find an
1072 // optimal point to insert a load / store later.
1074 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1075 Ops, FoldSS, FoldSlot, Reg)) {
1076 // Folding the load/store can completely change the instruction in
1077 // unpredictable ways, rescan it from the beginning.
1081 if (isRemoved(MI)) {
1085 goto RestartInstruction;
1088 // We'll try to fold it later if it's profitable.
1089 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1093 // Create a new virtual register for the spill interval.
1094 bool CreatedNewVReg = false;
1096 NewVReg = mri_->createVirtualRegister(rc);
1098 CreatedNewVReg = true;
1100 mop.setReg(NewVReg);
1101 if (mop.isImplicit())
1102 rewriteImplicitOps(li, MI, NewVReg, vrm);
1104 // Reuse NewVReg for other reads.
1105 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1106 MachineOperand &mopj = MI->getOperand(Ops[j]);
1107 mopj.setReg(NewVReg);
1108 if (mopj.isImplicit())
1109 rewriteImplicitOps(li, MI, NewVReg, vrm);
1112 if (CreatedNewVReg) {
1114 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1115 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1116 // Each valnum may have its own remat id.
1117 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1119 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1121 if (!CanDelete || (HasUse && HasDef)) {
1122 // If this is a two-addr instruction then its use operands are
1123 // rematerializable but its def is not. It should be assigned a
1125 vrm.assignVirt2StackSlot(NewVReg, Slot);
1128 vrm.assignVirt2StackSlot(NewVReg, Slot);
1130 } else if (HasUse && HasDef &&
1131 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1132 // If this interval hasn't been assigned a stack slot (because earlier
1133 // def is a deleted remat def), do it now.
1134 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1135 vrm.assignVirt2StackSlot(NewVReg, Slot);
1138 // Re-matting an instruction with virtual register use. Add the
1139 // register as an implicit use on the use MI.
1140 if (DefIsReMat && ImpUse)
1141 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1143 // create a new register interval for this spill / remat.
1144 LiveInterval &nI = getOrCreateInterval(NewVReg);
1145 if (CreatedNewVReg) {
1146 NewLIs.push_back(&nI);
1147 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1149 vrm.setIsSplitFromReg(NewVReg, li.reg);
1153 if (CreatedNewVReg) {
1154 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1155 nI.getNextValue(~0U, 0, VNInfoAllocator));
1159 // Extend the split live interval to this def / use.
1160 unsigned End = getUseIndex(index)+1;
1161 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1162 nI.getValNumInfo(nI.getNumValNums()-1));
1168 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1169 nI.getNextValue(~0U, 0, VNInfoAllocator));
1174 DOUT << "\t\t\t\tAdded new interval: ";
1175 nI.print(DOUT, tri_);
1180 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1182 MachineBasicBlock *MBB, unsigned Idx) const {
1183 unsigned End = getMBBEndIdx(MBB);
1184 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1185 unsigned KillIdx = VNI->kills[j];
1186 if (KillIdx > Idx && KillIdx < End)
1192 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1193 /// during spilling.
1195 struct RewriteInfo {
1200 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1201 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1204 struct RewriteInfoCompare {
1205 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1206 return LHS.Index < RHS.Index;
1211 void LiveIntervals::
1212 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1213 LiveInterval::Ranges::const_iterator &I,
1214 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1215 unsigned Slot, int LdSlot,
1216 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1218 const TargetRegisterClass* rc,
1219 SmallVector<int, 4> &ReMatIds,
1220 const MachineLoopInfo *loopInfo,
1221 BitVector &SpillMBBs,
1222 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1223 BitVector &RestoreMBBs,
1224 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1225 std::map<unsigned,unsigned> &MBBVRegsMap,
1226 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1227 bool AllCanFold = true;
1228 unsigned NewVReg = 0;
1229 unsigned start = getBaseIndex(I->start);
1230 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1232 // First collect all the def / use in this live range that will be rewritten.
1233 // Make sure they are sorted according to instruction index.
1234 std::vector<RewriteInfo> RewriteMIs;
1235 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1236 re = mri_->reg_end(); ri != re; ) {
1237 MachineInstr *MI = &*ri;
1238 MachineOperand &O = ri.getOperand();
1240 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1241 unsigned index = getInstructionIndex(MI);
1242 if (index < start || index >= end)
1244 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1246 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1248 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1249 // Now rewrite the defs and uses.
1250 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1251 RewriteInfo &rwi = RewriteMIs[i];
1253 unsigned index = rwi.Index;
1254 bool MIHasUse = rwi.HasUse;
1255 bool MIHasDef = rwi.HasDef;
1256 MachineInstr *MI = rwi.MI;
1257 // If MI def and/or use the same register multiple times, then there
1258 // are multiple entries.
1259 unsigned NumUses = MIHasUse;
1260 while (i != e && RewriteMIs[i].MI == MI) {
1261 assert(RewriteMIs[i].Index == index);
1262 bool isUse = RewriteMIs[i].HasUse;
1263 if (isUse) ++NumUses;
1265 MIHasDef |= RewriteMIs[i].HasDef;
1268 MachineBasicBlock *MBB = MI->getParent();
1270 if (ImpUse && MI != ReMatDefMI) {
1271 // Re-matting an instruction with virtual register use. Update the
1272 // register interval's spill weight to HUGE_VALF to prevent it from
1274 LiveInterval &ImpLi = getInterval(ImpUse);
1275 ImpLi.weight = HUGE_VALF;
1278 unsigned MBBId = MBB->getNumber();
1279 unsigned ThisVReg = 0;
1281 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1282 if (NVI != MBBVRegsMap.end()) {
1283 ThisVReg = NVI->second;
1290 // It's better to start a new interval to avoid artifically
1291 // extend the new interval.
1292 if (MIHasDef && !MIHasUse) {
1293 MBBVRegsMap.erase(MBB->getNumber());
1299 bool IsNew = ThisVReg == 0;
1301 // This ends the previous live interval. If all of its def / use
1302 // can be folded, give it a low spill weight.
1303 if (NewVReg && TrySplit && AllCanFold) {
1304 LiveInterval &nI = getOrCreateInterval(NewVReg);
1311 bool HasDef = false;
1312 bool HasUse = false;
1313 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1314 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1315 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1316 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1317 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1318 if (!HasDef && !HasUse)
1321 AllCanFold &= CanFold;
1323 // Update weight of spill interval.
1324 LiveInterval &nI = getOrCreateInterval(NewVReg);
1326 // The spill weight is now infinity as it cannot be spilled again.
1327 nI.weight = HUGE_VALF;
1331 // Keep track of the last def and first use in each MBB.
1333 if (MI != ReMatOrigDefMI || !CanDelete) {
1334 bool HasKill = false;
1336 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1338 // If this is a two-address code, then this index starts a new VNInfo.
1339 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1341 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1343 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1344 SpillIdxes.find(MBBId);
1346 if (SII == SpillIdxes.end()) {
1347 std::vector<SRInfo> S;
1348 S.push_back(SRInfo(index, NewVReg, true));
1349 SpillIdxes.insert(std::make_pair(MBBId, S));
1350 } else if (SII->second.back().vreg != NewVReg) {
1351 SII->second.push_back(SRInfo(index, NewVReg, true));
1352 } else if ((int)index > SII->second.back().index) {
1353 // If there is an earlier def and this is a two-address
1354 // instruction, then it's not possible to fold the store (which
1355 // would also fold the load).
1356 SRInfo &Info = SII->second.back();
1358 Info.canFold = !HasUse;
1360 SpillMBBs.set(MBBId);
1361 } else if (SII != SpillIdxes.end() &&
1362 SII->second.back().vreg == NewVReg &&
1363 (int)index > SII->second.back().index) {
1364 // There is an earlier def that's not killed (must be two-address).
1365 // The spill is no longer needed.
1366 SII->second.pop_back();
1367 if (SII->second.empty()) {
1368 SpillIdxes.erase(MBBId);
1369 SpillMBBs.reset(MBBId);
1376 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1377 SpillIdxes.find(MBBId);
1378 if (SII != SpillIdxes.end() &&
1379 SII->second.back().vreg == NewVReg &&
1380 (int)index > SII->second.back().index)
1381 // Use(s) following the last def, it's not safe to fold the spill.
1382 SII->second.back().canFold = false;
1383 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1384 RestoreIdxes.find(MBBId);
1385 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1386 // If we are splitting live intervals, only fold if it's the first
1387 // use and there isn't another use later in the MBB.
1388 RII->second.back().canFold = false;
1390 // Only need a reload if there isn't an earlier def / use.
1391 if (RII == RestoreIdxes.end()) {
1392 std::vector<SRInfo> Infos;
1393 Infos.push_back(SRInfo(index, NewVReg, true));
1394 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1396 RII->second.push_back(SRInfo(index, NewVReg, true));
1398 RestoreMBBs.set(MBBId);
1402 // Update spill weight.
1403 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1404 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1407 if (NewVReg && TrySplit && AllCanFold) {
1408 // If all of its def / use can be folded, give it a low spill weight.
1409 LiveInterval &nI = getOrCreateInterval(NewVReg);
1414 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1415 BitVector &RestoreMBBs,
1416 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1417 if (!RestoreMBBs[Id])
1419 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1420 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1421 if (Restores[i].index == index &&
1422 Restores[i].vreg == vr &&
1423 Restores[i].canFold)
1428 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1429 BitVector &RestoreMBBs,
1430 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1431 if (!RestoreMBBs[Id])
1433 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1434 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1435 if (Restores[i].index == index && Restores[i].vreg)
1436 Restores[i].index = -1;
1439 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1440 /// spilled and create empty intervals for their uses.
1442 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1443 const TargetRegisterClass* rc,
1444 std::vector<LiveInterval*> &NewLIs) {
1445 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1446 re = mri_->reg_end(); ri != re; ) {
1447 MachineOperand &O = ri.getOperand();
1448 MachineInstr *MI = &*ri;
1451 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1452 "Register def was not rewritten?");
1453 RemoveMachineInstrFromMaps(MI);
1454 vrm.RemoveMachineInstrFromMaps(MI);
1455 MI->eraseFromParent();
1457 // This must be an use of an implicit_def so it's not part of the live
1458 // interval. Create a new empty live interval for it.
1459 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1460 unsigned NewVReg = mri_->createVirtualRegister(rc);
1462 vrm.setIsImplicitlyDefined(NewVReg);
1463 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1464 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1465 MachineOperand &MO = MI->getOperand(i);
1466 if (MO.isReg() && MO.getReg() == li.reg)
1474 std::vector<LiveInterval*> LiveIntervals::
1475 addIntervalsForSpills(const LiveInterval &li,
1476 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1478 // Since this is called after the analysis is done we don't know if
1479 // LiveVariables is available
1480 lv_ = getAnalysisToUpdate<LiveVariables>();
1482 assert(li.weight != HUGE_VALF &&
1483 "attempt to spill already spilled interval!");
1485 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1486 li.print(DOUT, tri_);
1489 // Spill slot weight.
1492 // Each bit specify whether it a spill is required in the MBB.
1493 BitVector SpillMBBs(mf_->getNumBlockIDs());
1494 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1495 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1496 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1497 std::map<unsigned,unsigned> MBBVRegsMap;
1498 std::vector<LiveInterval*> NewLIs;
1499 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1501 unsigned NumValNums = li.getNumValNums();
1502 SmallVector<MachineInstr*, 4> ReMatDefs;
1503 ReMatDefs.resize(NumValNums, NULL);
1504 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1505 ReMatOrigDefs.resize(NumValNums, NULL);
1506 SmallVector<int, 4> ReMatIds;
1507 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1508 BitVector ReMatDelete(NumValNums);
1509 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1511 // Spilling a split live interval. It cannot be split any further. Also,
1512 // it's also guaranteed to be a single val# / range interval.
1513 if (vrm.getPreSplitReg(li.reg)) {
1514 vrm.setIsSplitFromReg(li.reg, 0);
1515 // Unset the split kill marker on the last use.
1516 unsigned KillIdx = vrm.getKillPoint(li.reg);
1518 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1519 assert(KillMI && "Last use disappeared?");
1520 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1521 assert(KillOp != -1 && "Last use disappeared?");
1522 KillMI->getOperand(KillOp).setIsKill(false);
1524 vrm.removeKillPoint(li.reg);
1525 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1526 Slot = vrm.getStackSlot(li.reg);
1527 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1528 MachineInstr *ReMatDefMI = DefIsReMat ?
1529 vrm.getReMaterializedMI(li.reg) : NULL;
1531 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1532 bool isLoad = isLoadSS ||
1533 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1534 bool IsFirstRange = true;
1535 for (LiveInterval::Ranges::const_iterator
1536 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1537 // If this is a split live interval with multiple ranges, it means there
1538 // are two-address instructions that re-defined the value. Only the
1539 // first def can be rematerialized!
1541 // Note ReMatOrigDefMI has already been deleted.
1542 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1543 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1544 false, vrm, rc, ReMatIds, loopInfo,
1545 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1546 MBBVRegsMap, NewLIs, SSWeight);
1548 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1549 Slot, 0, false, false, false,
1550 false, vrm, rc, ReMatIds, loopInfo,
1551 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1552 MBBVRegsMap, NewLIs, SSWeight);
1554 IsFirstRange = false;
1557 SSWeight = 0.0f; // Already accounted for when split.
1558 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1562 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1563 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1567 bool NeedStackSlot = false;
1568 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1570 const VNInfo *VNI = *i;
1571 unsigned VN = VNI->id;
1572 unsigned DefIdx = VNI->def;
1574 continue; // Dead val#.
1575 // Is the def for the val# rematerializable?
1576 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1577 ? 0 : getInstructionFromIndex(DefIdx);
1579 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1580 // Remember how to remat the def of this val#.
1581 ReMatOrigDefs[VN] = ReMatDefMI;
1582 // Original def may be modified so we have to make a copy here. vrm must
1584 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1586 bool CanDelete = true;
1587 if (VNI->hasPHIKill) {
1588 // A kill is a phi node, not all of its uses can be rematerialized.
1589 // It must not be deleted.
1591 // Need a stack slot if there is any live range where uses cannot be
1593 NeedStackSlot = true;
1596 ReMatDelete.set(VN);
1598 // Need a stack slot if there is any live range where uses cannot be
1600 NeedStackSlot = true;
1604 // One stack slot per live interval.
1605 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1606 Slot = vrm.assignVirt2StackSlot(li.reg);
1608 // Create new intervals and rewrite defs and uses.
1609 for (LiveInterval::Ranges::const_iterator
1610 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1611 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1612 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1613 bool DefIsReMat = ReMatDefMI != NULL;
1614 bool CanDelete = ReMatDelete[I->valno->id];
1616 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1617 bool isLoad = isLoadSS ||
1618 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1619 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1620 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1621 CanDelete, vrm, rc, ReMatIds, loopInfo,
1622 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1623 MBBVRegsMap, NewLIs, SSWeight);
1626 // Insert spills / restores if we are splitting.
1628 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1632 SmallPtrSet<LiveInterval*, 4> AddedKill;
1633 SmallVector<unsigned, 2> Ops;
1634 if (NeedStackSlot) {
1635 int Id = SpillMBBs.find_first();
1637 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1638 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1639 std::vector<SRInfo> &spills = SpillIdxes[Id];
1640 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1641 int index = spills[i].index;
1642 unsigned VReg = spills[i].vreg;
1643 LiveInterval &nI = getOrCreateInterval(VReg);
1644 bool isReMat = vrm.isReMaterialized(VReg);
1645 MachineInstr *MI = getInstructionFromIndex(index);
1646 bool CanFold = false;
1647 bool FoundUse = false;
1649 if (spills[i].canFold) {
1651 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1652 MachineOperand &MO = MI->getOperand(j);
1653 if (!MO.isRegister() || MO.getReg() != VReg)
1660 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1661 RestoreMBBs, RestoreIdxes))) {
1662 // MI has two-address uses of the same register. If the use
1663 // isn't the first and only use in the BB, then we can't fold
1664 // it. FIXME: Move this to rewriteInstructionsForSpills.
1671 // Fold the store into the def if possible.
1672 bool Folded = false;
1673 if (CanFold && !Ops.empty()) {
1674 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1677 // Also folded uses, do not issue a load.
1678 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1679 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1681 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1685 // Otherwise tell the spiller to issue a spill.
1687 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1688 bool isKill = LR->end == getStoreIndex(index);
1689 if (!MI->registerDefIsDead(nI.reg))
1690 // No need to spill a dead def.
1691 vrm.addSpillPoint(VReg, isKill, MI);
1693 AddedKill.insert(&nI);
1696 // Update spill slot weight.
1698 SSWeight += getSpillWeight(true, false, loopDepth);
1700 Id = SpillMBBs.find_next(Id);
1704 int Id = RestoreMBBs.find_first();
1706 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1707 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1709 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1710 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1711 int index = restores[i].index;
1714 unsigned VReg = restores[i].vreg;
1715 LiveInterval &nI = getOrCreateInterval(VReg);
1716 bool isReMat = vrm.isReMaterialized(VReg);
1717 MachineInstr *MI = getInstructionFromIndex(index);
1718 bool CanFold = false;
1720 if (restores[i].canFold) {
1722 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1723 MachineOperand &MO = MI->getOperand(j);
1724 if (!MO.isRegister() || MO.getReg() != VReg)
1728 // If this restore were to be folded, it would have been folded
1737 // Fold the load into the use if possible.
1738 bool Folded = false;
1739 if (CanFold && !Ops.empty()) {
1741 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1743 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1745 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1746 // If the rematerializable def is a load, also try to fold it.
1747 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1748 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1749 Ops, isLoadSS, LdSlot, VReg);
1750 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1752 // Re-matting an instruction with virtual register use. Add the
1753 // register as an implicit use on the use MI and update the register
1754 // interval's spill weight to HUGE_VALF to prevent it from being
1756 LiveInterval &ImpLi = getInterval(ImpUse);
1757 ImpLi.weight = HUGE_VALF;
1758 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1762 // If folding is not possible / failed, then tell the spiller to issue a
1763 // load / rematerialization for us.
1765 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1767 vrm.addRestorePoint(VReg, MI);
1769 // Update spill slot weight.
1771 SSWeight += getSpillWeight(false, true, loopDepth);
1773 Id = RestoreMBBs.find_next(Id);
1776 // Finalize intervals: add kills, finalize spill weights, and filter out
1778 std::vector<LiveInterval*> RetNewLIs;
1779 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1780 LiveInterval *LI = NewLIs[i];
1782 LI->weight /= LI->getSize();
1783 if (!AddedKill.count(LI)) {
1784 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1785 unsigned LastUseIdx = getBaseIndex(LR->end);
1786 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1787 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1788 assert(UseIdx != -1);
1789 if (LastUse->getOperand(UseIdx).isImplicit() ||
1790 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1791 LastUse->getOperand(UseIdx).setIsKill();
1792 vrm.addKillPoint(LI->reg, LastUseIdx);
1795 RetNewLIs.push_back(LI);
1799 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1803 /// hasAllocatableSuperReg - Return true if the specified physical register has
1804 /// any super register that's allocatable.
1805 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1806 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1807 if (allocatableRegs_[*AS] && hasInterval(*AS))
1812 /// getRepresentativeReg - Find the largest super register of the specified
1813 /// physical register.
1814 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1815 // Find the largest super-register that is allocatable.
1816 unsigned BestReg = Reg;
1817 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1818 unsigned SuperReg = *AS;
1819 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1827 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1828 /// specified interval that conflicts with the specified physical register.
1829 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1830 unsigned PhysReg) const {
1831 unsigned NumConflicts = 0;
1832 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1833 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1834 E = mri_->reg_end(); I != E; ++I) {
1835 MachineOperand &O = I.getOperand();
1836 MachineInstr *MI = O.getParent();
1837 unsigned Index = getInstructionIndex(MI);
1838 if (pli.liveAt(Index))
1841 return NumConflicts;
1844 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1845 /// around all defs and uses of the specified interval.
1846 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1847 unsigned PhysReg, VirtRegMap &vrm) {
1848 unsigned SpillReg = getRepresentativeReg(PhysReg);
1850 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1851 // If there are registers which alias PhysReg, but which are not a
1852 // sub-register of the chosen representative super register. Assert
1853 // since we can't handle it yet.
1854 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1855 tri_->isSuperRegister(*AS, SpillReg));
1857 LiveInterval &pli = getInterval(SpillReg);
1858 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1859 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1860 E = mri_->reg_end(); I != E; ++I) {
1861 MachineOperand &O = I.getOperand();
1862 MachineInstr *MI = O.getParent();
1863 if (SeenMIs.count(MI))
1866 unsigned Index = getInstructionIndex(MI);
1867 if (pli.liveAt(Index)) {
1868 vrm.addEmergencySpill(SpillReg, MI);
1869 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1870 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1871 if (!hasInterval(*AS))
1873 LiveInterval &spli = getInterval(*AS);
1874 if (spli.liveAt(Index))
1875 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1881 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1882 MachineInstr* startInst) {
1883 LiveInterval& Interval = getOrCreateInterval(reg);
1884 VNInfo* VN = Interval.getNextValue(
1885 getInstructionIndex(startInst) + InstrSlots::DEF,
1886 startInst, getVNInfoAllocator());
1887 VN->hasPHIKill = true;
1888 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1889 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1890 getMBBEndIdx(startInst->getParent()) + 1, VN);
1891 Interval.addRange(LR);