1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
73 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
74 VNInfoAllocator.Reset();
75 while (!ClonedMIs.empty()) {
76 MachineInstr *MI = ClonedMIs.back();
78 mf_->DeleteMachineInstr(MI);
82 void LiveIntervals::computeNumbering() {
83 Index2MiMap OldI2MI = i2miMap_;
92 // Number MachineInstrs and MachineBasicBlocks.
93 // Initialize MBB indexes to a sentinal.
94 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
97 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99 unsigned StartIdx = MIIndex;
101 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
104 assert(inserted && "multiple MachineInstr -> index mappings");
105 i2miMap_.push_back(I);
106 MIIndex += InstrSlots::NUM;
111 if (StartIdx == MIIndex) {
113 MIIndex += InstrSlots::NUM;
114 i2miMap_.push_back(0);
116 // Set the MBB2IdxMap entry for this MBB.
117 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
118 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
120 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
122 if (!OldI2MI.empty())
123 for (iterator I = begin(), E = end(); I != E; ++I)
124 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
127 // Remap the start index of the live range to the corresponding new
128 // number, or our best guess at what it _should_ correspond to if the
129 // original instruction has been erased. This is either the following
130 // instruction or its predecessor.
131 unsigned offset = LI->start % InstrSlots::NUM;
132 if (OldI2MI[LI->start / InstrSlots::NUM])
133 LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
136 MachineInstr* newInstr = 0;
138 newInstr = OldI2MI[LI->start / InstrSlots::NUM + i];
142 if (mi2iMap_[newInstr] ==
143 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
144 LI->start = mi2iMap_[newInstr];
146 LI->start = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
149 // Remap the ending index in the same way that we remapped the start,
150 // except for the final step where we always map to the immediately
151 // following instruction.
152 if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
153 offset = LI->end % InstrSlots::NUM;
154 if (OldI2MI[LI->end / InstrSlots::NUM])
155 LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
158 MachineInstr* newInstr = 0;
160 newInstr = OldI2MI[LI->end / InstrSlots::NUM + i];
164 LI->end = mi2iMap_[newInstr];
167 LI->end = i2miMap_.size() * InstrSlots::NUM;
170 // Remap the VNInfo def index, which works the same as the
171 // start indices above.
172 VNInfo* vni = LI->valno;
173 offset = vni->def % InstrSlots::NUM;
174 if (OldI2MI[vni->def / InstrSlots::NUM])
175 vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
178 MachineInstr* newInstr = 0;
180 newInstr = OldI2MI[vni->def / InstrSlots::NUM + i];
184 if (mi2iMap_[newInstr] ==
185 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
186 vni->def = mi2iMap_[newInstr];
188 vni->def = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
191 // Remap the VNInfo kill indices, which works the same as
192 // the end indices above.
193 for (size_t i = 0; i < vni->kills.size(); ++i) {
194 offset = vni->kills[i] % InstrSlots::NUM;
195 if (OldI2MI[vni->kills[i] / InstrSlots::NUM])
196 vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
200 MachineInstr* newInstr = 0;
202 newInstr = OldI2MI[vni->kills[i] / InstrSlots::NUM + e];
206 vni->kills[i] = mi2iMap_[newInstr];
212 /// runOnMachineFunction - Register allocate the whole function
214 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
216 mri_ = &mf_->getRegInfo();
217 tm_ = &fn.getTarget();
218 tri_ = tm_->getRegisterInfo();
219 tii_ = tm_->getInstrInfo();
220 lv_ = &getAnalysis<LiveVariables>();
221 allocatableRegs_ = tri_->getAllocatableSet(fn);
226 numIntervals += getNumIntervals();
228 DOUT << "********** INTERVALS **********\n";
229 for (iterator I = begin(), E = end(); I != E; ++I) {
230 I->second.print(DOUT, tri_);
234 numIntervalsAfter += getNumIntervals();
239 /// print - Implement the dump method.
240 void LiveIntervals::print(std::ostream &O, const Module* ) const {
241 O << "********** INTERVALS **********\n";
242 for (const_iterator I = begin(), E = end(); I != E; ++I) {
243 I->second.print(O, tri_);
247 O << "********** MACHINEINSTRS **********\n";
248 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
249 mbbi != mbbe; ++mbbi) {
250 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
251 for (MachineBasicBlock::iterator mii = mbbi->begin(),
252 mie = mbbi->end(); mii != mie; ++mii) {
253 O << getInstructionIndex(mii) << '\t' << *mii;
258 /// conflictsWithPhysRegDef - Returns true if the specified register
259 /// is defined during the duration of the specified interval.
260 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
261 VirtRegMap &vrm, unsigned reg) {
262 for (LiveInterval::Ranges::const_iterator
263 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
264 for (unsigned index = getBaseIndex(I->start),
265 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
266 index += InstrSlots::NUM) {
267 // skip deleted instructions
268 while (index != end && !getInstructionFromIndex(index))
269 index += InstrSlots::NUM;
270 if (index == end) break;
272 MachineInstr *MI = getInstructionFromIndex(index);
273 unsigned SrcReg, DstReg;
274 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
275 if (SrcReg == li.reg || DstReg == li.reg)
277 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
278 MachineOperand& mop = MI->getOperand(i);
279 if (!mop.isRegister())
281 unsigned PhysReg = mop.getReg();
282 if (PhysReg == 0 || PhysReg == li.reg)
284 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
285 if (!vrm.hasPhys(PhysReg))
287 PhysReg = vrm.getPhys(PhysReg);
289 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
298 void LiveIntervals::printRegName(unsigned reg) const {
299 if (TargetRegisterInfo::isPhysicalRegister(reg))
300 cerr << tri_->getName(reg);
302 cerr << "%reg" << reg;
305 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
306 MachineBasicBlock::iterator mi,
307 unsigned MIIdx, MachineOperand& MO,
309 LiveInterval &interval) {
310 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
311 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
313 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
314 DOUT << "is a implicit_def\n";
318 // Virtual registers may be defined multiple times (due to phi
319 // elimination and 2-addr elimination). Much of what we do only has to be
320 // done once for the vreg. We use an empty interval to detect the first
321 // time we see a vreg.
322 if (interval.empty()) {
323 // Get the Idx of the defining instructions.
324 unsigned defIndex = getDefIndex(MIIdx);
326 MachineInstr *CopyMI = NULL;
327 unsigned SrcReg, DstReg;
328 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
329 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
330 tii_->isMoveInstr(*mi, SrcReg, DstReg))
332 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
334 assert(ValNo->id == 0 && "First value in interval is not 0?");
336 // Loop over all of the blocks that the vreg is defined in. There are
337 // two cases we have to handle here. The most common case is a vreg
338 // whose lifetime is contained within a basic block. In this case there
339 // will be a single kill, in MBB, which comes after the definition.
340 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
341 // FIXME: what about dead vars?
343 if (vi.Kills[0] != mi)
344 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
346 killIdx = defIndex+1;
348 // If the kill happens after the definition, we have an intra-block
350 if (killIdx > defIndex) {
351 assert(vi.AliveBlocks.none() &&
352 "Shouldn't be alive across any blocks!");
353 LiveRange LR(defIndex, killIdx, ValNo);
354 interval.addRange(LR);
355 DOUT << " +" << LR << "\n";
356 interval.addKill(ValNo, killIdx);
361 // The other case we handle is when a virtual register lives to the end
362 // of the defining block, potentially live across some blocks, then is
363 // live into some number of blocks, but gets killed. Start by adding a
364 // range that goes from this definition to the end of the defining block.
365 LiveRange NewLR(defIndex,
366 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
368 DOUT << " +" << NewLR;
369 interval.addRange(NewLR);
371 // Iterate over all of the blocks that the variable is completely
372 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
374 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
375 if (vi.AliveBlocks[i]) {
376 LiveRange LR(getMBBStartIdx(i),
377 getMBBEndIdx(i)+1, // MBB ends at -1.
379 interval.addRange(LR);
384 // Finally, this virtual register is live from the start of any killing
385 // block to the 'use' slot of the killing instruction.
386 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
387 MachineInstr *Kill = vi.Kills[i];
388 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
389 LiveRange LR(getMBBStartIdx(Kill->getParent()),
391 interval.addRange(LR);
392 interval.addKill(ValNo, killIdx);
397 // If this is the second time we see a virtual register definition, it
398 // must be due to phi elimination or two addr elimination. If this is
399 // the result of two address elimination, then the vreg is one of the
400 // def-and-use register operand.
401 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
402 // If this is a two-address definition, then we have already processed
403 // the live range. The only problem is that we didn't realize there
404 // are actually two values in the live interval. Because of this we
405 // need to take the LiveRegion that defines this register and split it
407 assert(interval.containsOneValue());
408 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
409 unsigned RedefIndex = getDefIndex(MIIdx);
411 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
412 VNInfo *OldValNo = OldLR->valno;
414 // Delete the initial value, which should be short and continuous,
415 // because the 2-addr copy must be in the same MBB as the redef.
416 interval.removeRange(DefIndex, RedefIndex);
418 // Two-address vregs should always only be redefined once. This means
419 // that at this point, there should be exactly one value number in it.
420 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
422 // The new value number (#1) is defined by the instruction we claimed
424 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
427 // Value#0 is now defined by the 2-addr instruction.
428 OldValNo->def = RedefIndex;
431 // Add the new live interval which replaces the range for the input copy.
432 LiveRange LR(DefIndex, RedefIndex, ValNo);
433 DOUT << " replace range with " << LR;
434 interval.addRange(LR);
435 interval.addKill(ValNo, RedefIndex);
437 // If this redefinition is dead, we need to add a dummy unit live
438 // range covering the def slot.
440 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
443 interval.print(DOUT, tri_);
446 // Otherwise, this must be because of phi elimination. If this is the
447 // first redefinition of the vreg that we have seen, go back and change
448 // the live range in the PHI block to be a different value number.
449 if (interval.containsOneValue()) {
450 assert(vi.Kills.size() == 1 &&
451 "PHI elimination vreg should have one kill, the PHI itself!");
453 // Remove the old range that we now know has an incorrect number.
454 VNInfo *VNI = interval.getValNumInfo(0);
455 MachineInstr *Killer = vi.Kills[0];
456 unsigned Start = getMBBStartIdx(Killer->getParent());
457 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
458 DOUT << " Removing [" << Start << "," << End << "] from: ";
459 interval.print(DOUT, tri_); DOUT << "\n";
460 interval.removeRange(Start, End);
461 VNI->hasPHIKill = true;
462 DOUT << " RESULT: "; interval.print(DOUT, tri_);
464 // Replace the interval with one of a NEW value number. Note that this
465 // value number isn't actually defined by an instruction, weird huh? :)
466 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
467 DOUT << " replace range with " << LR;
468 interval.addRange(LR);
469 interval.addKill(LR.valno, End);
470 DOUT << " RESULT: "; interval.print(DOUT, tri_);
473 // In the case of PHI elimination, each variable definition is only
474 // live until the end of the block. We've already taken care of the
475 // rest of the live range.
476 unsigned defIndex = getDefIndex(MIIdx);
479 MachineInstr *CopyMI = NULL;
480 unsigned SrcReg, DstReg;
481 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
482 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
483 tii_->isMoveInstr(*mi, SrcReg, DstReg))
485 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
487 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
488 LiveRange LR(defIndex, killIndex, ValNo);
489 interval.addRange(LR);
490 interval.addKill(ValNo, killIndex);
491 ValNo->hasPHIKill = true;
499 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
500 MachineBasicBlock::iterator mi,
503 LiveInterval &interval,
504 MachineInstr *CopyMI) {
505 // A physical register cannot be live across basic block, so its
506 // lifetime must end somewhere in its defining basic block.
507 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
509 unsigned baseIndex = MIIdx;
510 unsigned start = getDefIndex(baseIndex);
511 unsigned end = start;
513 // If it is not used after definition, it is considered dead at
514 // the instruction defining it. Hence its interval is:
515 // [defSlot(def), defSlot(def)+1)
518 end = getDefIndex(start) + 1;
522 // If it is not dead on definition, it must be killed by a
523 // subsequent instruction. Hence its interval is:
524 // [defSlot(def), useSlot(kill)+1)
525 while (++mi != MBB->end()) {
526 baseIndex += InstrSlots::NUM;
527 if (mi->killsRegister(interval.reg, tri_)) {
529 end = getUseIndex(baseIndex) + 1;
531 } else if (mi->modifiesRegister(interval.reg, tri_)) {
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that defines
534 // it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
537 end = getDefIndex(start) + 1;
542 // The only case we should have a dead physreg here without a killing or
543 // instruction where we know it's dead is if it is live-in to the function
545 assert(!CopyMI && "physreg was not killed in defining block!");
546 end = getDefIndex(start) + 1; // It's dead.
549 assert(start < end && "did not find end of interval?");
551 // Already exists? Extend old live interval.
552 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
553 VNInfo *ValNo = (OldLR != interval.end())
554 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
555 LiveRange LR(start, end, ValNo);
556 interval.addRange(LR);
557 interval.addKill(LR.valno, end);
558 DOUT << " +" << LR << '\n';
561 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
562 MachineBasicBlock::iterator MI,
566 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
567 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
568 getOrCreateInterval(MO.getReg()));
569 else if (allocatableRegs_[MO.getReg()]) {
570 MachineInstr *CopyMI = NULL;
571 unsigned SrcReg, DstReg;
572 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
573 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
574 tii_->isMoveInstr(*MI, SrcReg, DstReg))
576 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
577 getOrCreateInterval(MO.getReg()), CopyMI);
578 // Def of a register also defines its sub-registers.
579 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
580 // If MI also modifies the sub-register explicitly, avoid processing it
581 // more than once. Do not pass in TRI here so it checks for exact match.
582 if (!MI->modifiesRegister(*AS))
583 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
584 getOrCreateInterval(*AS), 0);
588 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
590 LiveInterval &interval, bool isAlias) {
591 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
593 // Look for kills, if it reaches a def before it's killed, then it shouldn't
594 // be considered a livein.
595 MachineBasicBlock::iterator mi = MBB->begin();
596 unsigned baseIndex = MIIdx;
597 unsigned start = baseIndex;
598 unsigned end = start;
599 while (mi != MBB->end()) {
600 if (mi->killsRegister(interval.reg, tri_)) {
602 end = getUseIndex(baseIndex) + 1;
604 } else if (mi->modifiesRegister(interval.reg, tri_)) {
605 // Another instruction redefines the register before it is ever read.
606 // Then the register is essentially dead at the instruction that defines
607 // it. Hence its interval is:
608 // [defSlot(def), defSlot(def)+1)
610 end = getDefIndex(start) + 1;
614 baseIndex += InstrSlots::NUM;
619 // Live-in register might not be used at all.
623 end = getDefIndex(MIIdx) + 1;
625 DOUT << " live through";
630 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
631 interval.addRange(LR);
632 interval.addKill(LR.valno, end);
633 DOUT << " +" << LR << '\n';
636 /// computeIntervals - computes the live intervals for virtual
637 /// registers. for some ordering of the machine instructions [1,N] a
638 /// live interval is an interval [i, j) where 1 <= i <= j < N for
639 /// which a variable is live
640 void LiveIntervals::computeIntervals() {
641 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
642 << "********** Function: "
643 << ((Value*)mf_->getFunction())->getName() << '\n';
644 // Track the index of the current machine instr.
645 unsigned MIIndex = 0;
646 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
648 MachineBasicBlock *MBB = MBBI;
649 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
651 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
653 // Create intervals for live-ins to this BB first.
654 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
655 LE = MBB->livein_end(); LI != LE; ++LI) {
656 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
657 // Multiple live-ins can alias the same register.
658 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
659 if (!hasInterval(*AS))
660 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
664 for (; MI != miEnd; ++MI) {
665 DOUT << MIIndex << "\t" << *MI;
668 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
669 MachineOperand &MO = MI->getOperand(i);
670 // handle register defs - build intervals
671 if (MO.isRegister() && MO.getReg() && MO.isDef())
672 handleRegisterDef(MBB, MI, MIIndex, MO, i);
675 MIIndex += InstrSlots::NUM;
678 if (MBB->begin() == miEnd) MIIndex += InstrSlots::NUM; // Empty MBB
682 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
683 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
684 std::vector<IdxMBBPair>::const_iterator I =
685 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
688 while (I != Idx2MBBMap.end()) {
689 if (LR.end <= I->first)
691 MBBs.push_back(I->second);
699 LiveInterval LiveIntervals::createInterval(unsigned reg) {
700 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
702 return LiveInterval(reg, Weight);
705 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
706 /// copy field and returns the source register that defines it.
707 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
711 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
712 return VNI->copy->getOperand(1).getReg();
713 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
714 return VNI->copy->getOperand(2).getReg();
715 unsigned SrcReg, DstReg;
716 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
718 assert(0 && "Unrecognized copy instruction!");
722 //===----------------------------------------------------------------------===//
723 // Register allocator hooks.
726 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
727 /// allow one) virtual register operand, then its uses are implicitly using
728 /// the register. Returns the virtual register.
729 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
730 MachineInstr *MI) const {
732 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
733 MachineOperand &MO = MI->getOperand(i);
734 if (!MO.isRegister() || !MO.isUse())
736 unsigned Reg = MO.getReg();
737 if (Reg == 0 || Reg == li.reg)
739 // FIXME: For now, only remat MI with at most one register operand.
741 "Can't rematerialize instruction with multiple register operand!");
748 /// isValNoAvailableAt - Return true if the val# of the specified interval
749 /// which reaches the given instruction also reaches the specified use index.
750 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
751 unsigned UseIdx) const {
752 unsigned Index = getInstructionIndex(MI);
753 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
754 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
755 return UI != li.end() && UI->valno == ValNo;
758 /// isReMaterializable - Returns true if the definition MI of the specified
759 /// val# of the specified interval is re-materializable.
760 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
761 const VNInfo *ValNo, MachineInstr *MI,
767 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
771 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
772 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
773 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
774 // this but remember this is not safe to fold into a two-address
776 // This is a load from fixed stack slot. It can be rematerialized.
779 if (tii_->isTriviallyReMaterializable(MI)) {
780 const TargetInstrDesc &TID = MI->getDesc();
781 isLoad = TID.isSimpleLoad();
783 unsigned ImpUse = getReMatImplicitUse(li, MI);
785 const LiveInterval &ImpLi = getInterval(ImpUse);
786 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
787 re = mri_->use_end(); ri != re; ++ri) {
788 MachineInstr *UseMI = &*ri;
789 unsigned UseIdx = getInstructionIndex(UseMI);
790 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
792 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
802 /// isReMaterializable - Returns true if every definition of MI of every
803 /// val# of the specified interval is re-materializable.
804 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
806 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
808 const VNInfo *VNI = *i;
809 unsigned DefIdx = VNI->def;
811 continue; // Dead val#.
812 // Is the def for the val# rematerializable?
815 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
816 bool DefIsLoad = false;
818 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
825 /// FilterFoldedOps - Filter out two-address use operands. Return
826 /// true if it finds any issue with the operands that ought to prevent
828 static bool FilterFoldedOps(MachineInstr *MI,
829 SmallVector<unsigned, 2> &Ops,
831 SmallVector<unsigned, 2> &FoldOps) {
832 const TargetInstrDesc &TID = MI->getDesc();
835 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
836 unsigned OpIdx = Ops[i];
837 MachineOperand &MO = MI->getOperand(OpIdx);
838 // FIXME: fold subreg use.
842 MRInfo |= (unsigned)VirtRegMap::isMod;
844 // Filter out two-address use operand(s).
845 if (!MO.isImplicit() &&
846 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
847 MRInfo = VirtRegMap::isModRef;
850 MRInfo |= (unsigned)VirtRegMap::isRef;
852 FoldOps.push_back(OpIdx);
858 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
859 /// slot / to reg or any rematerialized load into ith operand of specified
860 /// MI. If it is successul, MI is updated with the newly created MI and
862 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
863 VirtRegMap &vrm, MachineInstr *DefMI,
865 SmallVector<unsigned, 2> &Ops,
866 bool isSS, int Slot, unsigned Reg) {
867 // If it is an implicit def instruction, just delete it.
868 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
869 RemoveMachineInstrFromMaps(MI);
870 vrm.RemoveMachineInstrFromMaps(MI);
871 MI->eraseFromParent();
876 // Filter the list of operand indexes that are to be folded. Abort if
877 // any operand will prevent folding.
879 SmallVector<unsigned, 2> FoldOps;
880 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
883 // The only time it's safe to fold into a two address instruction is when
884 // it's folding reload and spill from / into a spill stack slot.
885 if (DefMI && (MRInfo & VirtRegMap::isMod))
888 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
889 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
891 // Remember this instruction uses the spill slot.
892 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
894 // Attempt to fold the memory reference into the instruction. If
895 // we can do this, we don't need to insert spill code.
896 MachineBasicBlock &MBB = *MI->getParent();
897 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
898 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
899 vrm.transferSpillPts(MI, fmi);
900 vrm.transferRestorePts(MI, fmi);
901 vrm.transferEmergencySpills(MI, fmi);
903 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
904 mi2iMap_[fmi] = InstrIdx;
905 MI = MBB.insert(MBB.erase(MI), fmi);
912 /// canFoldMemoryOperand - Returns true if the specified load / store
913 /// folding is possible.
914 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
915 SmallVector<unsigned, 2> &Ops,
917 // Filter the list of operand indexes that are to be folded. Abort if
918 // any operand will prevent folding.
920 SmallVector<unsigned, 2> FoldOps;
921 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
924 // It's only legal to remat for a use, not a def.
925 if (ReMat && (MRInfo & VirtRegMap::isMod))
928 return tii_->canFoldMemoryOperand(MI, FoldOps);
931 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
932 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
933 for (LiveInterval::Ranges::const_iterator
934 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
935 std::vector<IdxMBBPair>::const_iterator II =
936 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
937 if (II == Idx2MBBMap.end())
939 if (I->end > II->first) // crossing a MBB.
941 MBBs.insert(II->second);
948 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
949 /// interval on to-be re-materialized operands of MI) with new register.
950 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
951 MachineInstr *MI, unsigned NewVReg,
953 // There is an implicit use. That means one of the other operand is
954 // being remat'ed and the remat'ed instruction has li.reg as an
955 // use operand. Make sure we rewrite that as well.
956 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
957 MachineOperand &MO = MI->getOperand(i);
958 if (!MO.isRegister())
960 unsigned Reg = MO.getReg();
961 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
963 if (!vrm.isReMaterialized(Reg))
965 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
966 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
968 UseMO->setReg(NewVReg);
972 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
973 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
975 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
976 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
977 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
978 unsigned Slot, int LdSlot,
979 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
981 const TargetRegisterClass* rc,
982 SmallVector<int, 4> &ReMatIds,
983 const MachineLoopInfo *loopInfo,
984 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
985 std::map<unsigned,unsigned> &MBBVRegsMap,
986 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
987 MachineBasicBlock *MBB = MI->getParent();
988 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
989 bool CanFold = false;
991 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
992 MachineOperand& mop = MI->getOperand(i);
993 if (!mop.isRegister())
995 unsigned Reg = mop.getReg();
997 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1002 bool TryFold = !DefIsReMat;
1003 bool FoldSS = true; // Default behavior unless it's a remat.
1004 int FoldSlot = Slot;
1006 // If this is the rematerializable definition MI itself and
1007 // all of its uses are rematerialized, simply delete it.
1008 if (MI == ReMatOrigDefMI && CanDelete) {
1009 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1011 RemoveMachineInstrFromMaps(MI);
1012 vrm.RemoveMachineInstrFromMaps(MI);
1013 MI->eraseFromParent();
1017 // If def for this use can't be rematerialized, then try folding.
1018 // If def is rematerializable and it's a load, also try folding.
1019 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1021 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1027 // Scan all of the operands of this instruction rewriting operands
1028 // to use NewVReg instead of li.reg as appropriate. We do this for
1031 // 1. If the instr reads the same spilled vreg multiple times, we
1032 // want to reuse the NewVReg.
1033 // 2. If the instr is a two-addr instruction, we are required to
1034 // keep the src/dst regs pinned.
1036 // Keep track of whether we replace a use and/or def so that we can
1037 // create the spill interval with the appropriate range.
1039 HasUse = mop.isUse();
1040 HasDef = mop.isDef();
1041 SmallVector<unsigned, 2> Ops;
1043 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1044 const MachineOperand &MOj = MI->getOperand(j);
1045 if (!MOj.isRegister())
1047 unsigned RegJ = MOj.getReg();
1048 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1052 HasUse |= MOj.isUse();
1053 HasDef |= MOj.isDef();
1057 if (HasUse && !li.liveAt(getUseIndex(index)))
1058 // Must be defined by an implicit def. It should not be spilled. Note,
1059 // this is for correctness reason. e.g.
1060 // 8 %reg1024<def> = IMPLICIT_DEF
1061 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1062 // The live range [12, 14) are not part of the r1024 live interval since
1063 // it's defined by an implicit def. It will not conflicts with live
1064 // interval of r1025. Now suppose both registers are spilled, you can
1065 // easily see a situation where both registers are reloaded before
1066 // the INSERT_SUBREG and both target registers that would overlap.
1069 // Update stack slot spill weight if we are splitting.
1070 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1077 // Do not fold load / store here if we are splitting. We'll find an
1078 // optimal point to insert a load / store later.
1080 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1081 Ops, FoldSS, FoldSlot, Reg)) {
1082 // Folding the load/store can completely change the instruction in
1083 // unpredictable ways, rescan it from the beginning.
1087 if (isRemoved(MI)) {
1091 goto RestartInstruction;
1094 // We'll try to fold it later if it's profitable.
1095 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1099 // Create a new virtual register for the spill interval.
1100 bool CreatedNewVReg = false;
1102 NewVReg = mri_->createVirtualRegister(rc);
1104 CreatedNewVReg = true;
1106 mop.setReg(NewVReg);
1107 if (mop.isImplicit())
1108 rewriteImplicitOps(li, MI, NewVReg, vrm);
1110 // Reuse NewVReg for other reads.
1111 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1112 MachineOperand &mopj = MI->getOperand(Ops[j]);
1113 mopj.setReg(NewVReg);
1114 if (mopj.isImplicit())
1115 rewriteImplicitOps(li, MI, NewVReg, vrm);
1118 if (CreatedNewVReg) {
1120 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1121 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1122 // Each valnum may have its own remat id.
1123 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1125 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1127 if (!CanDelete || (HasUse && HasDef)) {
1128 // If this is a two-addr instruction then its use operands are
1129 // rematerializable but its def is not. It should be assigned a
1131 vrm.assignVirt2StackSlot(NewVReg, Slot);
1134 vrm.assignVirt2StackSlot(NewVReg, Slot);
1136 } else if (HasUse && HasDef &&
1137 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1138 // If this interval hasn't been assigned a stack slot (because earlier
1139 // def is a deleted remat def), do it now.
1140 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1141 vrm.assignVirt2StackSlot(NewVReg, Slot);
1144 // Re-matting an instruction with virtual register use. Add the
1145 // register as an implicit use on the use MI.
1146 if (DefIsReMat && ImpUse)
1147 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1149 // create a new register interval for this spill / remat.
1150 LiveInterval &nI = getOrCreateInterval(NewVReg);
1151 if (CreatedNewVReg) {
1152 NewLIs.push_back(&nI);
1153 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1155 vrm.setIsSplitFromReg(NewVReg, li.reg);
1159 if (CreatedNewVReg) {
1160 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1161 nI.getNextValue(~0U, 0, VNInfoAllocator));
1165 // Extend the split live interval to this def / use.
1166 unsigned End = getUseIndex(index)+1;
1167 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1168 nI.getValNumInfo(nI.getNumValNums()-1));
1174 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1175 nI.getNextValue(~0U, 0, VNInfoAllocator));
1180 DOUT << "\t\t\t\tAdded new interval: ";
1181 nI.print(DOUT, tri_);
1186 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1188 MachineBasicBlock *MBB, unsigned Idx) const {
1189 unsigned End = getMBBEndIdx(MBB);
1190 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1191 unsigned KillIdx = VNI->kills[j];
1192 if (KillIdx > Idx && KillIdx < End)
1198 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1199 /// during spilling.
1201 struct RewriteInfo {
1206 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1207 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1210 struct RewriteInfoCompare {
1211 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1212 return LHS.Index < RHS.Index;
1217 void LiveIntervals::
1218 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1219 LiveInterval::Ranges::const_iterator &I,
1220 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1221 unsigned Slot, int LdSlot,
1222 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1224 const TargetRegisterClass* rc,
1225 SmallVector<int, 4> &ReMatIds,
1226 const MachineLoopInfo *loopInfo,
1227 BitVector &SpillMBBs,
1228 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1229 BitVector &RestoreMBBs,
1230 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1231 std::map<unsigned,unsigned> &MBBVRegsMap,
1232 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1233 bool AllCanFold = true;
1234 unsigned NewVReg = 0;
1235 unsigned start = getBaseIndex(I->start);
1236 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1238 // First collect all the def / use in this live range that will be rewritten.
1239 // Make sure they are sorted according to instruction index.
1240 std::vector<RewriteInfo> RewriteMIs;
1241 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1242 re = mri_->reg_end(); ri != re; ) {
1243 MachineInstr *MI = &*ri;
1244 MachineOperand &O = ri.getOperand();
1246 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1247 unsigned index = getInstructionIndex(MI);
1248 if (index < start || index >= end)
1250 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1251 // Must be defined by an implicit def. It should not be spilled. Note,
1252 // this is for correctness reason. e.g.
1253 // 8 %reg1024<def> = IMPLICIT_DEF
1254 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1255 // The live range [12, 14) are not part of the r1024 live interval since
1256 // it's defined by an implicit def. It will not conflicts with live
1257 // interval of r1025. Now suppose both registers are spilled, you can
1258 // easily see a situation where both registers are reloaded before
1259 // the INSERT_SUBREG and both target registers that would overlap.
1261 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1263 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1265 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1266 // Now rewrite the defs and uses.
1267 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1268 RewriteInfo &rwi = RewriteMIs[i];
1270 unsigned index = rwi.Index;
1271 bool MIHasUse = rwi.HasUse;
1272 bool MIHasDef = rwi.HasDef;
1273 MachineInstr *MI = rwi.MI;
1274 // If MI def and/or use the same register multiple times, then there
1275 // are multiple entries.
1276 unsigned NumUses = MIHasUse;
1277 while (i != e && RewriteMIs[i].MI == MI) {
1278 assert(RewriteMIs[i].Index == index);
1279 bool isUse = RewriteMIs[i].HasUse;
1280 if (isUse) ++NumUses;
1282 MIHasDef |= RewriteMIs[i].HasDef;
1285 MachineBasicBlock *MBB = MI->getParent();
1287 if (ImpUse && MI != ReMatDefMI) {
1288 // Re-matting an instruction with virtual register use. Update the
1289 // register interval's spill weight to HUGE_VALF to prevent it from
1291 LiveInterval &ImpLi = getInterval(ImpUse);
1292 ImpLi.weight = HUGE_VALF;
1295 unsigned MBBId = MBB->getNumber();
1296 unsigned ThisVReg = 0;
1298 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1299 if (NVI != MBBVRegsMap.end()) {
1300 ThisVReg = NVI->second;
1307 // It's better to start a new interval to avoid artifically
1308 // extend the new interval.
1309 if (MIHasDef && !MIHasUse) {
1310 MBBVRegsMap.erase(MBB->getNumber());
1316 bool IsNew = ThisVReg == 0;
1318 // This ends the previous live interval. If all of its def / use
1319 // can be folded, give it a low spill weight.
1320 if (NewVReg && TrySplit && AllCanFold) {
1321 LiveInterval &nI = getOrCreateInterval(NewVReg);
1328 bool HasDef = false;
1329 bool HasUse = false;
1330 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1331 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1332 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1333 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1334 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1335 if (!HasDef && !HasUse)
1338 AllCanFold &= CanFold;
1340 // Update weight of spill interval.
1341 LiveInterval &nI = getOrCreateInterval(NewVReg);
1343 // The spill weight is now infinity as it cannot be spilled again.
1344 nI.weight = HUGE_VALF;
1348 // Keep track of the last def and first use in each MBB.
1350 if (MI != ReMatOrigDefMI || !CanDelete) {
1351 bool HasKill = false;
1353 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1355 // If this is a two-address code, then this index starts a new VNInfo.
1356 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1358 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1360 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1361 SpillIdxes.find(MBBId);
1363 if (SII == SpillIdxes.end()) {
1364 std::vector<SRInfo> S;
1365 S.push_back(SRInfo(index, NewVReg, true));
1366 SpillIdxes.insert(std::make_pair(MBBId, S));
1367 } else if (SII->second.back().vreg != NewVReg) {
1368 SII->second.push_back(SRInfo(index, NewVReg, true));
1369 } else if ((int)index > SII->second.back().index) {
1370 // If there is an earlier def and this is a two-address
1371 // instruction, then it's not possible to fold the store (which
1372 // would also fold the load).
1373 SRInfo &Info = SII->second.back();
1375 Info.canFold = !HasUse;
1377 SpillMBBs.set(MBBId);
1378 } else if (SII != SpillIdxes.end() &&
1379 SII->second.back().vreg == NewVReg &&
1380 (int)index > SII->second.back().index) {
1381 // There is an earlier def that's not killed (must be two-address).
1382 // The spill is no longer needed.
1383 SII->second.pop_back();
1384 if (SII->second.empty()) {
1385 SpillIdxes.erase(MBBId);
1386 SpillMBBs.reset(MBBId);
1393 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1394 SpillIdxes.find(MBBId);
1395 if (SII != SpillIdxes.end() &&
1396 SII->second.back().vreg == NewVReg &&
1397 (int)index > SII->second.back().index)
1398 // Use(s) following the last def, it's not safe to fold the spill.
1399 SII->second.back().canFold = false;
1400 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1401 RestoreIdxes.find(MBBId);
1402 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1403 // If we are splitting live intervals, only fold if it's the first
1404 // use and there isn't another use later in the MBB.
1405 RII->second.back().canFold = false;
1407 // Only need a reload if there isn't an earlier def / use.
1408 if (RII == RestoreIdxes.end()) {
1409 std::vector<SRInfo> Infos;
1410 Infos.push_back(SRInfo(index, NewVReg, true));
1411 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1413 RII->second.push_back(SRInfo(index, NewVReg, true));
1415 RestoreMBBs.set(MBBId);
1419 // Update spill weight.
1420 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1421 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1424 if (NewVReg && TrySplit && AllCanFold) {
1425 // If all of its def / use can be folded, give it a low spill weight.
1426 LiveInterval &nI = getOrCreateInterval(NewVReg);
1431 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1432 BitVector &RestoreMBBs,
1433 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1434 if (!RestoreMBBs[Id])
1436 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1437 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1438 if (Restores[i].index == index &&
1439 Restores[i].vreg == vr &&
1440 Restores[i].canFold)
1445 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1446 BitVector &RestoreMBBs,
1447 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1448 if (!RestoreMBBs[Id])
1450 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1451 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1452 if (Restores[i].index == index && Restores[i].vreg)
1453 Restores[i].index = -1;
1456 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1457 /// spilled and create empty intervals for their uses.
1459 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1460 const TargetRegisterClass* rc,
1461 std::vector<LiveInterval*> &NewLIs) {
1462 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1463 re = mri_->reg_end(); ri != re; ) {
1464 MachineOperand &O = ri.getOperand();
1465 MachineInstr *MI = &*ri;
1468 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1469 "Register def was not rewritten?");
1470 RemoveMachineInstrFromMaps(MI);
1471 vrm.RemoveMachineInstrFromMaps(MI);
1472 MI->eraseFromParent();
1474 // This must be an use of an implicit_def so it's not part of the live
1475 // interval. Create a new empty live interval for it.
1476 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1477 unsigned NewVReg = mri_->createVirtualRegister(rc);
1479 vrm.setIsImplicitlyDefined(NewVReg);
1480 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1481 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1482 MachineOperand &MO = MI->getOperand(i);
1483 if (MO.isReg() && MO.getReg() == li.reg)
1491 std::vector<LiveInterval*> LiveIntervals::
1492 addIntervalsForSpills(const LiveInterval &li,
1493 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1495 assert(li.weight != HUGE_VALF &&
1496 "attempt to spill already spilled interval!");
1498 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1499 li.print(DOUT, tri_);
1502 // Spill slot weight.
1505 // Each bit specify whether it a spill is required in the MBB.
1506 BitVector SpillMBBs(mf_->getNumBlockIDs());
1507 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1508 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1509 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1510 std::map<unsigned,unsigned> MBBVRegsMap;
1511 std::vector<LiveInterval*> NewLIs;
1512 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1514 unsigned NumValNums = li.getNumValNums();
1515 SmallVector<MachineInstr*, 4> ReMatDefs;
1516 ReMatDefs.resize(NumValNums, NULL);
1517 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1518 ReMatOrigDefs.resize(NumValNums, NULL);
1519 SmallVector<int, 4> ReMatIds;
1520 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1521 BitVector ReMatDelete(NumValNums);
1522 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1524 // Spilling a split live interval. It cannot be split any further. Also,
1525 // it's also guaranteed to be a single val# / range interval.
1526 if (vrm.getPreSplitReg(li.reg)) {
1527 vrm.setIsSplitFromReg(li.reg, 0);
1528 // Unset the split kill marker on the last use.
1529 unsigned KillIdx = vrm.getKillPoint(li.reg);
1531 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1532 assert(KillMI && "Last use disappeared?");
1533 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1534 assert(KillOp != -1 && "Last use disappeared?");
1535 KillMI->getOperand(KillOp).setIsKill(false);
1537 vrm.removeKillPoint(li.reg);
1538 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1539 Slot = vrm.getStackSlot(li.reg);
1540 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1541 MachineInstr *ReMatDefMI = DefIsReMat ?
1542 vrm.getReMaterializedMI(li.reg) : NULL;
1544 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1545 bool isLoad = isLoadSS ||
1546 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1547 bool IsFirstRange = true;
1548 for (LiveInterval::Ranges::const_iterator
1549 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1550 // If this is a split live interval with multiple ranges, it means there
1551 // are two-address instructions that re-defined the value. Only the
1552 // first def can be rematerialized!
1554 // Note ReMatOrigDefMI has already been deleted.
1555 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1556 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1557 false, vrm, rc, ReMatIds, loopInfo,
1558 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1559 MBBVRegsMap, NewLIs, SSWeight);
1561 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1562 Slot, 0, false, false, false,
1563 false, vrm, rc, ReMatIds, loopInfo,
1564 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1565 MBBVRegsMap, NewLIs, SSWeight);
1567 IsFirstRange = false;
1570 SSWeight = 0.0f; // Already accounted for when split.
1571 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1575 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1576 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1580 bool NeedStackSlot = false;
1581 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1583 const VNInfo *VNI = *i;
1584 unsigned VN = VNI->id;
1585 unsigned DefIdx = VNI->def;
1587 continue; // Dead val#.
1588 // Is the def for the val# rematerializable?
1589 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1590 ? 0 : getInstructionFromIndex(DefIdx);
1592 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1593 // Remember how to remat the def of this val#.
1594 ReMatOrigDefs[VN] = ReMatDefMI;
1595 // Original def may be modified so we have to make a copy here.
1596 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1597 ClonedMIs.push_back(Clone);
1598 ReMatDefs[VN] = Clone;
1600 bool CanDelete = true;
1601 if (VNI->hasPHIKill) {
1602 // A kill is a phi node, not all of its uses can be rematerialized.
1603 // It must not be deleted.
1605 // Need a stack slot if there is any live range where uses cannot be
1607 NeedStackSlot = true;
1610 ReMatDelete.set(VN);
1612 // Need a stack slot if there is any live range where uses cannot be
1614 NeedStackSlot = true;
1618 // One stack slot per live interval.
1619 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1620 Slot = vrm.assignVirt2StackSlot(li.reg);
1622 // Create new intervals and rewrite defs and uses.
1623 for (LiveInterval::Ranges::const_iterator
1624 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1625 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1626 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1627 bool DefIsReMat = ReMatDefMI != NULL;
1628 bool CanDelete = ReMatDelete[I->valno->id];
1630 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1631 bool isLoad = isLoadSS ||
1632 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1633 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1634 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1635 CanDelete, vrm, rc, ReMatIds, loopInfo,
1636 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1637 MBBVRegsMap, NewLIs, SSWeight);
1640 // Insert spills / restores if we are splitting.
1642 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1646 SmallPtrSet<LiveInterval*, 4> AddedKill;
1647 SmallVector<unsigned, 2> Ops;
1648 if (NeedStackSlot) {
1649 int Id = SpillMBBs.find_first();
1651 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1652 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1653 std::vector<SRInfo> &spills = SpillIdxes[Id];
1654 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1655 int index = spills[i].index;
1656 unsigned VReg = spills[i].vreg;
1657 LiveInterval &nI = getOrCreateInterval(VReg);
1658 bool isReMat = vrm.isReMaterialized(VReg);
1659 MachineInstr *MI = getInstructionFromIndex(index);
1660 bool CanFold = false;
1661 bool FoundUse = false;
1663 if (spills[i].canFold) {
1665 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1666 MachineOperand &MO = MI->getOperand(j);
1667 if (!MO.isRegister() || MO.getReg() != VReg)
1674 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1675 RestoreMBBs, RestoreIdxes))) {
1676 // MI has two-address uses of the same register. If the use
1677 // isn't the first and only use in the BB, then we can't fold
1678 // it. FIXME: Move this to rewriteInstructionsForSpills.
1685 // Fold the store into the def if possible.
1686 bool Folded = false;
1687 if (CanFold && !Ops.empty()) {
1688 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1691 // Also folded uses, do not issue a load.
1692 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1693 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1695 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1699 // Otherwise tell the spiller to issue a spill.
1701 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1702 bool isKill = LR->end == getStoreIndex(index);
1703 if (!MI->registerDefIsDead(nI.reg))
1704 // No need to spill a dead def.
1705 vrm.addSpillPoint(VReg, isKill, MI);
1707 AddedKill.insert(&nI);
1710 // Update spill slot weight.
1712 SSWeight += getSpillWeight(true, false, loopDepth);
1714 Id = SpillMBBs.find_next(Id);
1718 int Id = RestoreMBBs.find_first();
1720 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1721 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1723 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1724 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1725 int index = restores[i].index;
1728 unsigned VReg = restores[i].vreg;
1729 LiveInterval &nI = getOrCreateInterval(VReg);
1730 bool isReMat = vrm.isReMaterialized(VReg);
1731 MachineInstr *MI = getInstructionFromIndex(index);
1732 bool CanFold = false;
1734 if (restores[i].canFold) {
1736 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1737 MachineOperand &MO = MI->getOperand(j);
1738 if (!MO.isRegister() || MO.getReg() != VReg)
1742 // If this restore were to be folded, it would have been folded
1751 // Fold the load into the use if possible.
1752 bool Folded = false;
1753 if (CanFold && !Ops.empty()) {
1755 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1757 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1759 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1760 // If the rematerializable def is a load, also try to fold it.
1761 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1762 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1763 Ops, isLoadSS, LdSlot, VReg);
1764 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1766 // Re-matting an instruction with virtual register use. Add the
1767 // register as an implicit use on the use MI and update the register
1768 // interval's spill weight to HUGE_VALF to prevent it from being
1770 LiveInterval &ImpLi = getInterval(ImpUse);
1771 ImpLi.weight = HUGE_VALF;
1772 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1776 // If folding is not possible / failed, then tell the spiller to issue a
1777 // load / rematerialization for us.
1779 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1781 vrm.addRestorePoint(VReg, MI);
1783 // Update spill slot weight.
1785 SSWeight += getSpillWeight(false, true, loopDepth);
1787 Id = RestoreMBBs.find_next(Id);
1790 // Finalize intervals: add kills, finalize spill weights, and filter out
1792 std::vector<LiveInterval*> RetNewLIs;
1793 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1794 LiveInterval *LI = NewLIs[i];
1796 LI->weight /= getApproximateInstructionCount(*LI);
1797 if (!AddedKill.count(LI)) {
1798 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1799 unsigned LastUseIdx = getBaseIndex(LR->end);
1800 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1801 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1802 assert(UseIdx != -1);
1803 if (LastUse->getOperand(UseIdx).isImplicit() ||
1804 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1805 LastUse->getOperand(UseIdx).setIsKill();
1806 vrm.addKillPoint(LI->reg, LastUseIdx);
1809 RetNewLIs.push_back(LI);
1813 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1817 /// hasAllocatableSuperReg - Return true if the specified physical register has
1818 /// any super register that's allocatable.
1819 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1820 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1821 if (allocatableRegs_[*AS] && hasInterval(*AS))
1826 /// getRepresentativeReg - Find the largest super register of the specified
1827 /// physical register.
1828 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1829 // Find the largest super-register that is allocatable.
1830 unsigned BestReg = Reg;
1831 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1832 unsigned SuperReg = *AS;
1833 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1841 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1842 /// specified interval that conflicts with the specified physical register.
1843 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1844 unsigned PhysReg) const {
1845 unsigned NumConflicts = 0;
1846 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1847 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1848 E = mri_->reg_end(); I != E; ++I) {
1849 MachineOperand &O = I.getOperand();
1850 MachineInstr *MI = O.getParent();
1851 unsigned Index = getInstructionIndex(MI);
1852 if (pli.liveAt(Index))
1855 return NumConflicts;
1858 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1859 /// around all defs and uses of the specified interval.
1860 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1861 unsigned PhysReg, VirtRegMap &vrm) {
1862 unsigned SpillReg = getRepresentativeReg(PhysReg);
1864 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1865 // If there are registers which alias PhysReg, but which are not a
1866 // sub-register of the chosen representative super register. Assert
1867 // since we can't handle it yet.
1868 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1869 tri_->isSuperRegister(*AS, SpillReg));
1871 LiveInterval &pli = getInterval(SpillReg);
1872 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1873 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1874 E = mri_->reg_end(); I != E; ++I) {
1875 MachineOperand &O = I.getOperand();
1876 MachineInstr *MI = O.getParent();
1877 if (SeenMIs.count(MI))
1880 unsigned Index = getInstructionIndex(MI);
1881 if (pli.liveAt(Index)) {
1882 vrm.addEmergencySpill(SpillReg, MI);
1883 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1884 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1885 if (!hasInterval(*AS))
1887 LiveInterval &spli = getInterval(*AS);
1888 if (spli.liveAt(Index))
1889 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1895 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1896 MachineInstr* startInst) {
1897 LiveInterval& Interval = getOrCreateInterval(reg);
1898 VNInfo* VN = Interval.getNextValue(
1899 getInstructionIndex(startInst) + InstrSlots::DEF,
1900 startInst, getVNInfoAllocator());
1901 VN->hasPHIKill = true;
1902 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1903 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1904 getMBBEndIdx(startInst->getParent()) + 1, VN);
1905 Interval.addRange(LR);