1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
73 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
74 VNInfoAllocator.Reset();
75 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79 void LiveIntervals::computeNumbering() {
80 Index2MiMap OldI2MI = i2miMap_;
87 // Number MachineInstrs and MachineBasicBlocks.
88 // Initialize MBB indexes to a sentinal.
89 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
92 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
94 unsigned StartIdx = MIIndex;
96 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
98 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
99 assert(inserted && "multiple MachineInstr -> index mappings");
100 i2miMap_.push_back(I);
101 MIIndex += InstrSlots::NUM;
104 if (StartIdx == MIIndex) {
106 MIIndex += InstrSlots::NUM;
107 i2miMap_.push_back(0);
109 // Set the MBB2IdxMap entry for this MBB.
110 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
111 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
115 if (!OldI2MI.empty())
116 for (iterator I = begin(), E = end(); I != E; ++I)
117 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
120 // Remap the start index of the live range to the corresponding new
121 // number, or our best guess at what it _should_ correspond to if the
122 // original instruction has been erased. This is either the following
123 // instruction or its predecessor.
124 unsigned offset = LI->start % InstrSlots::NUM;
125 if (OldI2MI[LI->start / InstrSlots::NUM])
126 LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
129 MachineInstr* newInstr = 0;
131 newInstr = OldI2MI[LI->start / InstrSlots::NUM + i];
135 if (mi2iMap_[newInstr] ==
136 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
137 LI->start = mi2iMap_[newInstr];
139 LI->start = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
142 // Remap the ending index in the same way that we remapped the start,
143 // except for the final step where we always map to the immediately
144 // following instruction.
145 if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
146 offset = LI->end % InstrSlots::NUM;
147 if (OldI2MI[LI->end / InstrSlots::NUM])
148 LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
151 MachineInstr* newInstr = 0;
153 newInstr = OldI2MI[LI->end / InstrSlots::NUM + i];
157 LI->end = mi2iMap_[newInstr];
160 LI->end = i2miMap_.size() * InstrSlots::NUM;
163 // Remap the VNInfo def index, which works the same as the
164 // start indices above.
165 VNInfo* vni = LI->valno;
166 offset = vni->def % InstrSlots::NUM;
167 if (OldI2MI[vni->def / InstrSlots::NUM])
168 vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
171 MachineInstr* newInstr = 0;
173 newInstr = OldI2MI[vni->def / InstrSlots::NUM + i];
177 if (mi2iMap_[newInstr] ==
178 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
179 vni->def = mi2iMap_[newInstr];
181 vni->def = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
184 // Remap the VNInfo kill indices, which works the same as
185 // the end indices above.
186 for (size_t i = 0; i < vni->kills.size(); ++i) {
187 offset = vni->kills[i] % InstrSlots::NUM;
188 if (OldI2MI[vni->kills[i] / InstrSlots::NUM])
189 vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
193 MachineInstr* newInstr = 0;
195 newInstr = OldI2MI[vni->kills[i] / InstrSlots::NUM + e];
199 vni->kills[i] = mi2iMap_[newInstr];
205 /// runOnMachineFunction - Register allocate the whole function
207 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
209 mri_ = &mf_->getRegInfo();
210 tm_ = &fn.getTarget();
211 tri_ = tm_->getRegisterInfo();
212 tii_ = tm_->getInstrInfo();
213 lv_ = &getAnalysis<LiveVariables>();
214 allocatableRegs_ = tri_->getAllocatableSet(fn);
219 numIntervals += getNumIntervals();
221 DOUT << "********** INTERVALS **********\n";
222 for (iterator I = begin(), E = end(); I != E; ++I) {
223 I->second.print(DOUT, tri_);
227 numIntervalsAfter += getNumIntervals();
232 /// print - Implement the dump method.
233 void LiveIntervals::print(std::ostream &O, const Module* ) const {
234 O << "********** INTERVALS **********\n";
235 for (const_iterator I = begin(), E = end(); I != E; ++I) {
236 I->second.print(O, tri_);
240 O << "********** MACHINEINSTRS **********\n";
241 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
242 mbbi != mbbe; ++mbbi) {
243 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
244 for (MachineBasicBlock::iterator mii = mbbi->begin(),
245 mie = mbbi->end(); mii != mie; ++mii) {
246 O << getInstructionIndex(mii) << '\t' << *mii;
251 /// conflictsWithPhysRegDef - Returns true if the specified register
252 /// is defined during the duration of the specified interval.
253 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
254 VirtRegMap &vrm, unsigned reg) {
255 for (LiveInterval::Ranges::const_iterator
256 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
257 for (unsigned index = getBaseIndex(I->start),
258 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
259 index += InstrSlots::NUM) {
260 // skip deleted instructions
261 while (index != end && !getInstructionFromIndex(index))
262 index += InstrSlots::NUM;
263 if (index == end) break;
265 MachineInstr *MI = getInstructionFromIndex(index);
266 unsigned SrcReg, DstReg;
267 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
268 if (SrcReg == li.reg || DstReg == li.reg)
270 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
271 MachineOperand& mop = MI->getOperand(i);
272 if (!mop.isRegister())
274 unsigned PhysReg = mop.getReg();
275 if (PhysReg == 0 || PhysReg == li.reg)
277 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
278 if (!vrm.hasPhys(PhysReg))
280 PhysReg = vrm.getPhys(PhysReg);
282 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
291 void LiveIntervals::printRegName(unsigned reg) const {
292 if (TargetRegisterInfo::isPhysicalRegister(reg))
293 cerr << tri_->getName(reg);
295 cerr << "%reg" << reg;
298 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
299 MachineBasicBlock::iterator mi,
301 LiveInterval &interval) {
302 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
303 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
305 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
306 DOUT << "is a implicit_def\n";
310 // Virtual registers may be defined multiple times (due to phi
311 // elimination and 2-addr elimination). Much of what we do only has to be
312 // done once for the vreg. We use an empty interval to detect the first
313 // time we see a vreg.
314 if (interval.empty()) {
315 // Get the Idx of the defining instructions.
316 unsigned defIndex = getDefIndex(MIIdx);
318 MachineInstr *CopyMI = NULL;
319 unsigned SrcReg, DstReg;
320 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
321 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
322 tii_->isMoveInstr(*mi, SrcReg, DstReg))
324 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
326 assert(ValNo->id == 0 && "First value in interval is not 0?");
328 // Loop over all of the blocks that the vreg is defined in. There are
329 // two cases we have to handle here. The most common case is a vreg
330 // whose lifetime is contained within a basic block. In this case there
331 // will be a single kill, in MBB, which comes after the definition.
332 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
333 // FIXME: what about dead vars?
335 if (vi.Kills[0] != mi)
336 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
338 killIdx = defIndex+1;
340 // If the kill happens after the definition, we have an intra-block
342 if (killIdx > defIndex) {
343 assert(vi.AliveBlocks.none() &&
344 "Shouldn't be alive across any blocks!");
345 LiveRange LR(defIndex, killIdx, ValNo);
346 interval.addRange(LR);
347 DOUT << " +" << LR << "\n";
348 interval.addKill(ValNo, killIdx);
353 // The other case we handle is when a virtual register lives to the end
354 // of the defining block, potentially live across some blocks, then is
355 // live into some number of blocks, but gets killed. Start by adding a
356 // range that goes from this definition to the end of the defining block.
357 LiveRange NewLR(defIndex,
358 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
360 DOUT << " +" << NewLR;
361 interval.addRange(NewLR);
363 // Iterate over all of the blocks that the variable is completely
364 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
366 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
367 if (vi.AliveBlocks[i]) {
368 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
370 LiveRange LR(getMBBStartIdx(i),
371 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
373 interval.addRange(LR);
379 // Finally, this virtual register is live from the start of any killing
380 // block to the 'use' slot of the killing instruction.
381 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
382 MachineInstr *Kill = vi.Kills[i];
383 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
384 LiveRange LR(getMBBStartIdx(Kill->getParent()),
386 interval.addRange(LR);
387 interval.addKill(ValNo, killIdx);
392 // If this is the second time we see a virtual register definition, it
393 // must be due to phi elimination or two addr elimination. If this is
394 // the result of two address elimination, then the vreg is one of the
395 // def-and-use register operand.
396 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
397 // If this is a two-address definition, then we have already processed
398 // the live range. The only problem is that we didn't realize there
399 // are actually two values in the live interval. Because of this we
400 // need to take the LiveRegion that defines this register and split it
402 assert(interval.containsOneValue());
403 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
404 unsigned RedefIndex = getDefIndex(MIIdx);
406 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
407 VNInfo *OldValNo = OldLR->valno;
409 // Delete the initial value, which should be short and continuous,
410 // because the 2-addr copy must be in the same MBB as the redef.
411 interval.removeRange(DefIndex, RedefIndex);
413 // Two-address vregs should always only be redefined once. This means
414 // that at this point, there should be exactly one value number in it.
415 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
417 // The new value number (#1) is defined by the instruction we claimed
419 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
422 // Value#0 is now defined by the 2-addr instruction.
423 OldValNo->def = RedefIndex;
426 // Add the new live interval which replaces the range for the input copy.
427 LiveRange LR(DefIndex, RedefIndex, ValNo);
428 DOUT << " replace range with " << LR;
429 interval.addRange(LR);
430 interval.addKill(ValNo, RedefIndex);
432 // If this redefinition is dead, we need to add a dummy unit live
433 // range covering the def slot.
434 if (mi->registerDefIsDead(interval.reg, tri_))
435 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
438 interval.print(DOUT, tri_);
441 // Otherwise, this must be because of phi elimination. If this is the
442 // first redefinition of the vreg that we have seen, go back and change
443 // the live range in the PHI block to be a different value number.
444 if (interval.containsOneValue()) {
445 assert(vi.Kills.size() == 1 &&
446 "PHI elimination vreg should have one kill, the PHI itself!");
448 // Remove the old range that we now know has an incorrect number.
449 VNInfo *VNI = interval.getValNumInfo(0);
450 MachineInstr *Killer = vi.Kills[0];
451 unsigned Start = getMBBStartIdx(Killer->getParent());
452 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
453 DOUT << " Removing [" << Start << "," << End << "] from: ";
454 interval.print(DOUT, tri_); DOUT << "\n";
455 interval.removeRange(Start, End);
456 VNI->hasPHIKill = true;
457 DOUT << " RESULT: "; interval.print(DOUT, tri_);
459 // Replace the interval with one of a NEW value number. Note that this
460 // value number isn't actually defined by an instruction, weird huh? :)
461 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
462 DOUT << " replace range with " << LR;
463 interval.addRange(LR);
464 interval.addKill(LR.valno, End);
465 DOUT << " RESULT: "; interval.print(DOUT, tri_);
468 // In the case of PHI elimination, each variable definition is only
469 // live until the end of the block. We've already taken care of the
470 // rest of the live range.
471 unsigned defIndex = getDefIndex(MIIdx);
474 MachineInstr *CopyMI = NULL;
475 unsigned SrcReg, DstReg;
476 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
477 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
478 tii_->isMoveInstr(*mi, SrcReg, DstReg))
480 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
482 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
483 LiveRange LR(defIndex, killIndex, ValNo);
484 interval.addRange(LR);
485 interval.addKill(ValNo, killIndex);
486 ValNo->hasPHIKill = true;
494 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
495 MachineBasicBlock::iterator mi,
497 LiveInterval &interval,
498 MachineInstr *CopyMI) {
499 // A physical register cannot be live across basic block, so its
500 // lifetime must end somewhere in its defining basic block.
501 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
503 unsigned baseIndex = MIIdx;
504 unsigned start = getDefIndex(baseIndex);
505 unsigned end = start;
507 // If it is not used after definition, it is considered dead at
508 // the instruction defining it. Hence its interval is:
509 // [defSlot(def), defSlot(def)+1)
510 if (mi->registerDefIsDead(interval.reg, tri_)) {
512 end = getDefIndex(start) + 1;
516 // If it is not dead on definition, it must be killed by a
517 // subsequent instruction. Hence its interval is:
518 // [defSlot(def), useSlot(kill)+1)
519 while (++mi != MBB->end()) {
520 baseIndex += InstrSlots::NUM;
521 if (mi->killsRegister(interval.reg, tri_)) {
523 end = getUseIndex(baseIndex) + 1;
525 } else if (mi->modifiesRegister(interval.reg, tri_)) {
526 // Another instruction redefines the register before it is ever read.
527 // Then the register is essentially dead at the instruction that defines
528 // it. Hence its interval is:
529 // [defSlot(def), defSlot(def)+1)
531 end = getDefIndex(start) + 1;
536 // The only case we should have a dead physreg here without a killing or
537 // instruction where we know it's dead is if it is live-in to the function
539 assert(!CopyMI && "physreg was not killed in defining block!");
540 end = getDefIndex(start) + 1; // It's dead.
543 assert(start < end && "did not find end of interval?");
545 // Already exists? Extend old live interval.
546 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
547 VNInfo *ValNo = (OldLR != interval.end())
548 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
549 LiveRange LR(start, end, ValNo);
550 interval.addRange(LR);
551 interval.addKill(LR.valno, end);
552 DOUT << " +" << LR << '\n';
555 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
556 MachineBasicBlock::iterator MI,
559 if (TargetRegisterInfo::isVirtualRegister(reg))
560 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
561 else if (allocatableRegs_[reg]) {
562 MachineInstr *CopyMI = NULL;
563 unsigned SrcReg, DstReg;
564 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
565 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
566 tii_->isMoveInstr(*MI, SrcReg, DstReg))
568 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
569 // Def of a register also defines its sub-registers.
570 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
571 // If MI also modifies the sub-register explicitly, avoid processing it
572 // more than once. Do not pass in TRI here so it checks for exact match.
573 if (!MI->modifiesRegister(*AS))
574 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
578 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
580 LiveInterval &interval, bool isAlias) {
581 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 unsigned baseIndex = MIIdx;
587 unsigned start = baseIndex;
588 unsigned end = start;
589 while (mi != MBB->end()) {
590 if (mi->killsRegister(interval.reg, tri_)) {
592 end = getUseIndex(baseIndex) + 1;
594 } else if (mi->modifiesRegister(interval.reg, tri_)) {
595 // Another instruction redefines the register before it is ever read.
596 // Then the register is essentially dead at the instruction that defines
597 // it. Hence its interval is:
598 // [defSlot(def), defSlot(def)+1)
600 end = getDefIndex(start) + 1;
604 baseIndex += InstrSlots::NUM;
609 // Live-in register might not be used at all.
613 end = getDefIndex(MIIdx) + 1;
615 DOUT << " live through";
620 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
621 interval.addRange(LR);
622 interval.addKill(LR.valno, end);
623 DOUT << " +" << LR << '\n';
626 /// computeIntervals - computes the live intervals for virtual
627 /// registers. for some ordering of the machine instructions [1,N] a
628 /// live interval is an interval [i, j) where 1 <= i <= j < N for
629 /// which a variable is live
630 void LiveIntervals::computeIntervals() {
631 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
632 << "********** Function: "
633 << ((Value*)mf_->getFunction())->getName() << '\n';
634 // Track the index of the current machine instr.
635 unsigned MIIndex = 0;
636 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
638 MachineBasicBlock *MBB = MBBI;
639 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
641 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
643 // Create intervals for live-ins to this BB first.
644 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
645 LE = MBB->livein_end(); LI != LE; ++LI) {
646 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
647 // Multiple live-ins can alias the same register.
648 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
649 if (!hasInterval(*AS))
650 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
654 for (; MI != miEnd; ++MI) {
655 DOUT << MIIndex << "\t" << *MI;
658 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
659 MachineOperand &MO = MI->getOperand(i);
660 // handle register defs - build intervals
661 if (MO.isRegister() && MO.getReg() && MO.isDef())
662 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
665 MIIndex += InstrSlots::NUM;
668 if (MBB->begin() == miEnd) MIIndex += InstrSlots::NUM; // Empty MBB
672 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
673 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
674 std::vector<IdxMBBPair>::const_iterator I =
675 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
678 while (I != Idx2MBBMap.end()) {
679 if (LR.end <= I->first)
681 MBBs.push_back(I->second);
689 LiveInterval LiveIntervals::createInterval(unsigned reg) {
690 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
692 return LiveInterval(reg, Weight);
695 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
696 /// copy field and returns the source register that defines it.
697 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
701 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
702 return VNI->copy->getOperand(1).getReg();
703 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
704 return VNI->copy->getOperand(2).getReg();
705 unsigned SrcReg, DstReg;
706 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
708 assert(0 && "Unrecognized copy instruction!");
712 //===----------------------------------------------------------------------===//
713 // Register allocator hooks.
716 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
717 /// allow one) virtual register operand, then its uses are implicitly using
718 /// the register. Returns the virtual register.
719 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
720 MachineInstr *MI) const {
722 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
723 MachineOperand &MO = MI->getOperand(i);
724 if (!MO.isRegister() || !MO.isUse())
726 unsigned Reg = MO.getReg();
727 if (Reg == 0 || Reg == li.reg)
729 // FIXME: For now, only remat MI with at most one register operand.
731 "Can't rematerialize instruction with multiple register operand!");
738 /// isValNoAvailableAt - Return true if the val# of the specified interval
739 /// which reaches the given instruction also reaches the specified use index.
740 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
741 unsigned UseIdx) const {
742 unsigned Index = getInstructionIndex(MI);
743 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
744 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
745 return UI != li.end() && UI->valno == ValNo;
748 /// isReMaterializable - Returns true if the definition MI of the specified
749 /// val# of the specified interval is re-materializable.
750 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
751 const VNInfo *ValNo, MachineInstr *MI,
757 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
761 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
762 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
763 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
764 // this but remember this is not safe to fold into a two-address
766 // This is a load from fixed stack slot. It can be rematerialized.
769 if (tii_->isTriviallyReMaterializable(MI)) {
770 const TargetInstrDesc &TID = MI->getDesc();
771 isLoad = TID.isSimpleLoad();
773 unsigned ImpUse = getReMatImplicitUse(li, MI);
775 const LiveInterval &ImpLi = getInterval(ImpUse);
776 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
777 re = mri_->use_end(); ri != re; ++ri) {
778 MachineInstr *UseMI = &*ri;
779 unsigned UseIdx = getInstructionIndex(UseMI);
780 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
782 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
792 /// isReMaterializable - Returns true if every definition of MI of every
793 /// val# of the specified interval is re-materializable.
794 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
796 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
798 const VNInfo *VNI = *i;
799 unsigned DefIdx = VNI->def;
801 continue; // Dead val#.
802 // Is the def for the val# rematerializable?
805 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
806 bool DefIsLoad = false;
808 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
815 /// FilterFoldedOps - Filter out two-address use operands. Return
816 /// true if it finds any issue with the operands that ought to prevent
818 static bool FilterFoldedOps(MachineInstr *MI,
819 SmallVector<unsigned, 2> &Ops,
821 SmallVector<unsigned, 2> &FoldOps) {
822 const TargetInstrDesc &TID = MI->getDesc();
825 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
826 unsigned OpIdx = Ops[i];
827 MachineOperand &MO = MI->getOperand(OpIdx);
828 // FIXME: fold subreg use.
832 MRInfo |= (unsigned)VirtRegMap::isMod;
834 // Filter out two-address use operand(s).
835 if (!MO.isImplicit() &&
836 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
837 MRInfo = VirtRegMap::isModRef;
840 MRInfo |= (unsigned)VirtRegMap::isRef;
842 FoldOps.push_back(OpIdx);
848 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
849 /// slot / to reg or any rematerialized load into ith operand of specified
850 /// MI. If it is successul, MI is updated with the newly created MI and
852 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
853 VirtRegMap &vrm, MachineInstr *DefMI,
855 SmallVector<unsigned, 2> &Ops,
856 bool isSS, int Slot, unsigned Reg) {
857 // If it is an implicit def instruction, just delete it.
858 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
859 RemoveMachineInstrFromMaps(MI);
860 vrm.RemoveMachineInstrFromMaps(MI);
861 MI->eraseFromParent();
866 // Filter the list of operand indexes that are to be folded. Abort if
867 // any operand will prevent folding.
869 SmallVector<unsigned, 2> FoldOps;
870 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
873 // The only time it's safe to fold into a two address instruction is when
874 // it's folding reload and spill from / into a spill stack slot.
875 if (DefMI && (MRInfo & VirtRegMap::isMod))
878 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
879 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
881 // Remember this instruction uses the spill slot.
882 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
884 // Attempt to fold the memory reference into the instruction. If
885 // we can do this, we don't need to insert spill code.
887 lv_->instructionChanged(MI, fmi);
889 fmi->copyKillDeadInfo(MI, tri_);
890 MachineBasicBlock &MBB = *MI->getParent();
891 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
892 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
893 vrm.transferSpillPts(MI, fmi);
894 vrm.transferRestorePts(MI, fmi);
895 vrm.transferEmergencySpills(MI, fmi);
897 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
898 mi2iMap_[fmi] = InstrIdx;
899 MI = MBB.insert(MBB.erase(MI), fmi);
906 /// canFoldMemoryOperand - Returns true if the specified load / store
907 /// folding is possible.
908 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
909 SmallVector<unsigned, 2> &Ops,
911 // Filter the list of operand indexes that are to be folded. Abort if
912 // any operand will prevent folding.
914 SmallVector<unsigned, 2> FoldOps;
915 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
918 // It's only legal to remat for a use, not a def.
919 if (ReMat && (MRInfo & VirtRegMap::isMod))
922 return tii_->canFoldMemoryOperand(MI, FoldOps);
925 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
926 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
927 for (LiveInterval::Ranges::const_iterator
928 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
929 std::vector<IdxMBBPair>::const_iterator II =
930 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
931 if (II == Idx2MBBMap.end())
933 if (I->end > II->first) // crossing a MBB.
935 MBBs.insert(II->second);
942 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
943 /// interval on to-be re-materialized operands of MI) with new register.
944 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
945 MachineInstr *MI, unsigned NewVReg,
947 // There is an implicit use. That means one of the other operand is
948 // being remat'ed and the remat'ed instruction has li.reg as an
949 // use operand. Make sure we rewrite that as well.
950 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
951 MachineOperand &MO = MI->getOperand(i);
952 if (!MO.isRegister())
954 unsigned Reg = MO.getReg();
955 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
957 if (!vrm.isReMaterialized(Reg))
959 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
960 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
962 UseMO->setReg(NewVReg);
966 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
967 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
969 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
970 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
971 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
972 unsigned Slot, int LdSlot,
973 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
975 const TargetRegisterClass* rc,
976 SmallVector<int, 4> &ReMatIds,
977 const MachineLoopInfo *loopInfo,
978 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
979 std::map<unsigned,unsigned> &MBBVRegsMap,
980 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
981 MachineBasicBlock *MBB = MI->getParent();
982 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
983 bool CanFold = false;
985 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
986 MachineOperand& mop = MI->getOperand(i);
987 if (!mop.isRegister())
989 unsigned Reg = mop.getReg();
991 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
996 bool TryFold = !DefIsReMat;
997 bool FoldSS = true; // Default behavior unless it's a remat.
1000 // If this is the rematerializable definition MI itself and
1001 // all of its uses are rematerialized, simply delete it.
1002 if (MI == ReMatOrigDefMI && CanDelete) {
1003 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1005 RemoveMachineInstrFromMaps(MI);
1006 vrm.RemoveMachineInstrFromMaps(MI);
1007 MI->eraseFromParent();
1011 // If def for this use can't be rematerialized, then try folding.
1012 // If def is rematerializable and it's a load, also try folding.
1013 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1015 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1021 // Scan all of the operands of this instruction rewriting operands
1022 // to use NewVReg instead of li.reg as appropriate. We do this for
1025 // 1. If the instr reads the same spilled vreg multiple times, we
1026 // want to reuse the NewVReg.
1027 // 2. If the instr is a two-addr instruction, we are required to
1028 // keep the src/dst regs pinned.
1030 // Keep track of whether we replace a use and/or def so that we can
1031 // create the spill interval with the appropriate range.
1033 HasUse = mop.isUse();
1034 HasDef = mop.isDef();
1035 SmallVector<unsigned, 2> Ops;
1037 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1038 const MachineOperand &MOj = MI->getOperand(j);
1039 if (!MOj.isRegister())
1041 unsigned RegJ = MOj.getReg();
1042 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1046 HasUse |= MOj.isUse();
1047 HasDef |= MOj.isDef();
1051 // Update stack slot spill weight if we are splitting.
1052 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1059 // Do not fold load / store here if we are splitting. We'll find an
1060 // optimal point to insert a load / store later.
1062 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1063 Ops, FoldSS, FoldSlot, Reg)) {
1064 // Folding the load/store can completely change the instruction in
1065 // unpredictable ways, rescan it from the beginning.
1069 if (isRemoved(MI)) {
1073 goto RestartInstruction;
1076 // We'll try to fold it later if it's profitable.
1077 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1081 // Create a new virtual register for the spill interval.
1082 bool CreatedNewVReg = false;
1084 NewVReg = mri_->createVirtualRegister(rc);
1086 CreatedNewVReg = true;
1088 mop.setReg(NewVReg);
1089 if (mop.isImplicit())
1090 rewriteImplicitOps(li, MI, NewVReg, vrm);
1092 // Reuse NewVReg for other reads.
1093 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1094 MachineOperand &mopj = MI->getOperand(Ops[j]);
1095 mopj.setReg(NewVReg);
1096 if (mopj.isImplicit())
1097 rewriteImplicitOps(li, MI, NewVReg, vrm);
1100 if (CreatedNewVReg) {
1102 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1103 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1104 // Each valnum may have its own remat id.
1105 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1107 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1109 if (!CanDelete || (HasUse && HasDef)) {
1110 // If this is a two-addr instruction then its use operands are
1111 // rematerializable but its def is not. It should be assigned a
1113 vrm.assignVirt2StackSlot(NewVReg, Slot);
1116 vrm.assignVirt2StackSlot(NewVReg, Slot);
1118 } else if (HasUse && HasDef &&
1119 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1120 // If this interval hasn't been assigned a stack slot (because earlier
1121 // def is a deleted remat def), do it now.
1122 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1123 vrm.assignVirt2StackSlot(NewVReg, Slot);
1126 // Re-matting an instruction with virtual register use. Add the
1127 // register as an implicit use on the use MI.
1128 if (DefIsReMat && ImpUse)
1129 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1131 // create a new register interval for this spill / remat.
1132 LiveInterval &nI = getOrCreateInterval(NewVReg);
1133 if (CreatedNewVReg) {
1134 NewLIs.push_back(&nI);
1135 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1137 vrm.setIsSplitFromReg(NewVReg, li.reg);
1141 if (CreatedNewVReg) {
1142 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1143 nI.getNextValue(~0U, 0, VNInfoAllocator));
1147 // Extend the split live interval to this def / use.
1148 unsigned End = getUseIndex(index)+1;
1149 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1150 nI.getValNumInfo(nI.getNumValNums()-1));
1156 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1157 nI.getNextValue(~0U, 0, VNInfoAllocator));
1162 DOUT << "\t\t\t\tAdded new interval: ";
1163 nI.print(DOUT, tri_);
1168 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1170 MachineBasicBlock *MBB, unsigned Idx) const {
1171 unsigned End = getMBBEndIdx(MBB);
1172 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1173 unsigned KillIdx = VNI->kills[j];
1174 if (KillIdx > Idx && KillIdx < End)
1180 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1181 /// during spilling.
1183 struct RewriteInfo {
1188 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1189 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1192 struct RewriteInfoCompare {
1193 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1194 return LHS.Index < RHS.Index;
1199 void LiveIntervals::
1200 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1201 LiveInterval::Ranges::const_iterator &I,
1202 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1203 unsigned Slot, int LdSlot,
1204 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1206 const TargetRegisterClass* rc,
1207 SmallVector<int, 4> &ReMatIds,
1208 const MachineLoopInfo *loopInfo,
1209 BitVector &SpillMBBs,
1210 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1211 BitVector &RestoreMBBs,
1212 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1213 std::map<unsigned,unsigned> &MBBVRegsMap,
1214 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1215 bool AllCanFold = true;
1216 unsigned NewVReg = 0;
1217 unsigned start = getBaseIndex(I->start);
1218 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1220 // First collect all the def / use in this live range that will be rewritten.
1221 // Make sure they are sorted according to instruction index.
1222 std::vector<RewriteInfo> RewriteMIs;
1223 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1224 re = mri_->reg_end(); ri != re; ) {
1225 MachineInstr *MI = &*ri;
1226 MachineOperand &O = ri.getOperand();
1228 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1229 unsigned index = getInstructionIndex(MI);
1230 if (index < start || index >= end)
1232 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1234 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1236 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1237 // Now rewrite the defs and uses.
1238 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1239 RewriteInfo &rwi = RewriteMIs[i];
1241 unsigned index = rwi.Index;
1242 bool MIHasUse = rwi.HasUse;
1243 bool MIHasDef = rwi.HasDef;
1244 MachineInstr *MI = rwi.MI;
1245 // If MI def and/or use the same register multiple times, then there
1246 // are multiple entries.
1247 unsigned NumUses = MIHasUse;
1248 while (i != e && RewriteMIs[i].MI == MI) {
1249 assert(RewriteMIs[i].Index == index);
1250 bool isUse = RewriteMIs[i].HasUse;
1251 if (isUse) ++NumUses;
1253 MIHasDef |= RewriteMIs[i].HasDef;
1256 MachineBasicBlock *MBB = MI->getParent();
1258 if (ImpUse && MI != ReMatDefMI) {
1259 // Re-matting an instruction with virtual register use. Update the
1260 // register interval's spill weight to HUGE_VALF to prevent it from
1262 LiveInterval &ImpLi = getInterval(ImpUse);
1263 ImpLi.weight = HUGE_VALF;
1266 unsigned MBBId = MBB->getNumber();
1267 unsigned ThisVReg = 0;
1269 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1270 if (NVI != MBBVRegsMap.end()) {
1271 ThisVReg = NVI->second;
1278 // It's better to start a new interval to avoid artifically
1279 // extend the new interval.
1280 if (MIHasDef && !MIHasUse) {
1281 MBBVRegsMap.erase(MBB->getNumber());
1287 bool IsNew = ThisVReg == 0;
1289 // This ends the previous live interval. If all of its def / use
1290 // can be folded, give it a low spill weight.
1291 if (NewVReg && TrySplit && AllCanFold) {
1292 LiveInterval &nI = getOrCreateInterval(NewVReg);
1299 bool HasDef = false;
1300 bool HasUse = false;
1301 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1302 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1303 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1304 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1305 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1306 if (!HasDef && !HasUse)
1309 AllCanFold &= CanFold;
1311 // Update weight of spill interval.
1312 LiveInterval &nI = getOrCreateInterval(NewVReg);
1314 // The spill weight is now infinity as it cannot be spilled again.
1315 nI.weight = HUGE_VALF;
1319 // Keep track of the last def and first use in each MBB.
1321 if (MI != ReMatOrigDefMI || !CanDelete) {
1322 bool HasKill = false;
1324 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1326 // If this is a two-address code, then this index starts a new VNInfo.
1327 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1329 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1331 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1332 SpillIdxes.find(MBBId);
1334 if (SII == SpillIdxes.end()) {
1335 std::vector<SRInfo> S;
1336 S.push_back(SRInfo(index, NewVReg, true));
1337 SpillIdxes.insert(std::make_pair(MBBId, S));
1338 } else if (SII->second.back().vreg != NewVReg) {
1339 SII->second.push_back(SRInfo(index, NewVReg, true));
1340 } else if ((int)index > SII->second.back().index) {
1341 // If there is an earlier def and this is a two-address
1342 // instruction, then it's not possible to fold the store (which
1343 // would also fold the load).
1344 SRInfo &Info = SII->second.back();
1346 Info.canFold = !HasUse;
1348 SpillMBBs.set(MBBId);
1349 } else if (SII != SpillIdxes.end() &&
1350 SII->second.back().vreg == NewVReg &&
1351 (int)index > SII->second.back().index) {
1352 // There is an earlier def that's not killed (must be two-address).
1353 // The spill is no longer needed.
1354 SII->second.pop_back();
1355 if (SII->second.empty()) {
1356 SpillIdxes.erase(MBBId);
1357 SpillMBBs.reset(MBBId);
1364 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1365 SpillIdxes.find(MBBId);
1366 if (SII != SpillIdxes.end() &&
1367 SII->second.back().vreg == NewVReg &&
1368 (int)index > SII->second.back().index)
1369 // Use(s) following the last def, it's not safe to fold the spill.
1370 SII->second.back().canFold = false;
1371 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1372 RestoreIdxes.find(MBBId);
1373 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1374 // If we are splitting live intervals, only fold if it's the first
1375 // use and there isn't another use later in the MBB.
1376 RII->second.back().canFold = false;
1378 // Only need a reload if there isn't an earlier def / use.
1379 if (RII == RestoreIdxes.end()) {
1380 std::vector<SRInfo> Infos;
1381 Infos.push_back(SRInfo(index, NewVReg, true));
1382 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1384 RII->second.push_back(SRInfo(index, NewVReg, true));
1386 RestoreMBBs.set(MBBId);
1390 // Update spill weight.
1391 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1392 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1395 if (NewVReg && TrySplit && AllCanFold) {
1396 // If all of its def / use can be folded, give it a low spill weight.
1397 LiveInterval &nI = getOrCreateInterval(NewVReg);
1402 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1403 BitVector &RestoreMBBs,
1404 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1405 if (!RestoreMBBs[Id])
1407 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1408 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1409 if (Restores[i].index == index &&
1410 Restores[i].vreg == vr &&
1411 Restores[i].canFold)
1416 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1417 BitVector &RestoreMBBs,
1418 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1419 if (!RestoreMBBs[Id])
1421 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1422 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1423 if (Restores[i].index == index && Restores[i].vreg)
1424 Restores[i].index = -1;
1427 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1428 /// spilled and create empty intervals for their uses.
1430 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1431 const TargetRegisterClass* rc,
1432 std::vector<LiveInterval*> &NewLIs) {
1433 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1434 re = mri_->reg_end(); ri != re; ) {
1435 MachineOperand &O = ri.getOperand();
1436 MachineInstr *MI = &*ri;
1439 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1440 "Register def was not rewritten?");
1441 RemoveMachineInstrFromMaps(MI);
1442 vrm.RemoveMachineInstrFromMaps(MI);
1443 MI->eraseFromParent();
1445 // This must be an use of an implicit_def so it's not part of the live
1446 // interval. Create a new empty live interval for it.
1447 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1448 unsigned NewVReg = mri_->createVirtualRegister(rc);
1450 vrm.setIsImplicitlyDefined(NewVReg);
1451 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1452 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1453 MachineOperand &MO = MI->getOperand(i);
1454 if (MO.isReg() && MO.getReg() == li.reg)
1462 std::vector<LiveInterval*> LiveIntervals::
1463 addIntervalsForSpills(const LiveInterval &li,
1464 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1466 // Since this is called after the analysis is done we don't know if
1467 // LiveVariables is available
1468 lv_ = getAnalysisToUpdate<LiveVariables>();
1470 assert(li.weight != HUGE_VALF &&
1471 "attempt to spill already spilled interval!");
1473 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1474 li.print(DOUT, tri_);
1477 // Spill slot weight.
1480 // Each bit specify whether it a spill is required in the MBB.
1481 BitVector SpillMBBs(mf_->getNumBlockIDs());
1482 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1483 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1484 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1485 std::map<unsigned,unsigned> MBBVRegsMap;
1486 std::vector<LiveInterval*> NewLIs;
1487 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1489 unsigned NumValNums = li.getNumValNums();
1490 SmallVector<MachineInstr*, 4> ReMatDefs;
1491 ReMatDefs.resize(NumValNums, NULL);
1492 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1493 ReMatOrigDefs.resize(NumValNums, NULL);
1494 SmallVector<int, 4> ReMatIds;
1495 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1496 BitVector ReMatDelete(NumValNums);
1497 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1499 // Spilling a split live interval. It cannot be split any further. Also,
1500 // it's also guaranteed to be a single val# / range interval.
1501 if (vrm.getPreSplitReg(li.reg)) {
1502 vrm.setIsSplitFromReg(li.reg, 0);
1503 // Unset the split kill marker on the last use.
1504 unsigned KillIdx = vrm.getKillPoint(li.reg);
1506 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1507 assert(KillMI && "Last use disappeared?");
1508 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1509 assert(KillOp != -1 && "Last use disappeared?");
1510 KillMI->getOperand(KillOp).setIsKill(false);
1512 vrm.removeKillPoint(li.reg);
1513 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1514 Slot = vrm.getStackSlot(li.reg);
1515 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1516 MachineInstr *ReMatDefMI = DefIsReMat ?
1517 vrm.getReMaterializedMI(li.reg) : NULL;
1519 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1520 bool isLoad = isLoadSS ||
1521 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1522 bool IsFirstRange = true;
1523 for (LiveInterval::Ranges::const_iterator
1524 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1525 // If this is a split live interval with multiple ranges, it means there
1526 // are two-address instructions that re-defined the value. Only the
1527 // first def can be rematerialized!
1529 // Note ReMatOrigDefMI has already been deleted.
1530 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1531 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1532 false, vrm, rc, ReMatIds, loopInfo,
1533 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1534 MBBVRegsMap, NewLIs, SSWeight);
1536 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1537 Slot, 0, false, false, false,
1538 false, vrm, rc, ReMatIds, loopInfo,
1539 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1540 MBBVRegsMap, NewLIs, SSWeight);
1542 IsFirstRange = false;
1545 SSWeight = 0.0f; // Already accounted for when split.
1546 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1550 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1551 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1555 bool NeedStackSlot = false;
1556 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1558 const VNInfo *VNI = *i;
1559 unsigned VN = VNI->id;
1560 unsigned DefIdx = VNI->def;
1562 continue; // Dead val#.
1563 // Is the def for the val# rematerializable?
1564 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1565 ? 0 : getInstructionFromIndex(DefIdx);
1567 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1568 // Remember how to remat the def of this val#.
1569 ReMatOrigDefs[VN] = ReMatDefMI;
1570 // Original def may be modified so we have to make a copy here. vrm must
1572 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1574 bool CanDelete = true;
1575 if (VNI->hasPHIKill) {
1576 // A kill is a phi node, not all of its uses can be rematerialized.
1577 // It must not be deleted.
1579 // Need a stack slot if there is any live range where uses cannot be
1581 NeedStackSlot = true;
1584 ReMatDelete.set(VN);
1586 // Need a stack slot if there is any live range where uses cannot be
1588 NeedStackSlot = true;
1592 // One stack slot per live interval.
1593 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1594 Slot = vrm.assignVirt2StackSlot(li.reg);
1596 // Create new intervals and rewrite defs and uses.
1597 for (LiveInterval::Ranges::const_iterator
1598 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1599 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1600 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1601 bool DefIsReMat = ReMatDefMI != NULL;
1602 bool CanDelete = ReMatDelete[I->valno->id];
1604 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1605 bool isLoad = isLoadSS ||
1606 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1607 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1608 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1609 CanDelete, vrm, rc, ReMatIds, loopInfo,
1610 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1611 MBBVRegsMap, NewLIs, SSWeight);
1614 // Insert spills / restores if we are splitting.
1616 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1620 SmallPtrSet<LiveInterval*, 4> AddedKill;
1621 SmallVector<unsigned, 2> Ops;
1622 if (NeedStackSlot) {
1623 int Id = SpillMBBs.find_first();
1625 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1626 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1627 std::vector<SRInfo> &spills = SpillIdxes[Id];
1628 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1629 int index = spills[i].index;
1630 unsigned VReg = spills[i].vreg;
1631 LiveInterval &nI = getOrCreateInterval(VReg);
1632 bool isReMat = vrm.isReMaterialized(VReg);
1633 MachineInstr *MI = getInstructionFromIndex(index);
1634 bool CanFold = false;
1635 bool FoundUse = false;
1637 if (spills[i].canFold) {
1639 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1640 MachineOperand &MO = MI->getOperand(j);
1641 if (!MO.isRegister() || MO.getReg() != VReg)
1648 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1649 RestoreMBBs, RestoreIdxes))) {
1650 // MI has two-address uses of the same register. If the use
1651 // isn't the first and only use in the BB, then we can't fold
1652 // it. FIXME: Move this to rewriteInstructionsForSpills.
1659 // Fold the store into the def if possible.
1660 bool Folded = false;
1661 if (CanFold && !Ops.empty()) {
1662 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1665 // Also folded uses, do not issue a load.
1666 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1667 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1669 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1673 // Otherwise tell the spiller to issue a spill.
1675 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1676 bool isKill = LR->end == getStoreIndex(index);
1677 if (!MI->registerDefIsDead(nI.reg))
1678 // No need to spill a dead def.
1679 vrm.addSpillPoint(VReg, isKill, MI);
1681 AddedKill.insert(&nI);
1684 // Update spill slot weight.
1686 SSWeight += getSpillWeight(true, false, loopDepth);
1688 Id = SpillMBBs.find_next(Id);
1692 int Id = RestoreMBBs.find_first();
1694 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1695 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1697 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1698 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1699 int index = restores[i].index;
1702 unsigned VReg = restores[i].vreg;
1703 LiveInterval &nI = getOrCreateInterval(VReg);
1704 bool isReMat = vrm.isReMaterialized(VReg);
1705 MachineInstr *MI = getInstructionFromIndex(index);
1706 bool CanFold = false;
1708 if (restores[i].canFold) {
1710 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1711 MachineOperand &MO = MI->getOperand(j);
1712 if (!MO.isRegister() || MO.getReg() != VReg)
1716 // If this restore were to be folded, it would have been folded
1725 // Fold the load into the use if possible.
1726 bool Folded = false;
1727 if (CanFold && !Ops.empty()) {
1729 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1731 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1733 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1734 // If the rematerializable def is a load, also try to fold it.
1735 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1736 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1737 Ops, isLoadSS, LdSlot, VReg);
1738 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1740 // Re-matting an instruction with virtual register use. Add the
1741 // register as an implicit use on the use MI and update the register
1742 // interval's spill weight to HUGE_VALF to prevent it from being
1744 LiveInterval &ImpLi = getInterval(ImpUse);
1745 ImpLi.weight = HUGE_VALF;
1746 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1750 // If folding is not possible / failed, then tell the spiller to issue a
1751 // load / rematerialization for us.
1753 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1755 vrm.addRestorePoint(VReg, MI);
1757 // Update spill slot weight.
1759 SSWeight += getSpillWeight(false, true, loopDepth);
1761 Id = RestoreMBBs.find_next(Id);
1764 // Finalize intervals: add kills, finalize spill weights, and filter out
1766 std::vector<LiveInterval*> RetNewLIs;
1767 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1768 LiveInterval *LI = NewLIs[i];
1770 LI->weight /= LI->getSize();
1771 if (!AddedKill.count(LI)) {
1772 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1773 unsigned LastUseIdx = getBaseIndex(LR->end);
1774 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1775 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1776 assert(UseIdx != -1);
1777 if (LastUse->getOperand(UseIdx).isImplicit() ||
1778 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1779 LastUse->getOperand(UseIdx).setIsKill();
1780 vrm.addKillPoint(LI->reg, LastUseIdx);
1783 RetNewLIs.push_back(LI);
1787 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1791 /// hasAllocatableSuperReg - Return true if the specified physical register has
1792 /// any super register that's allocatable.
1793 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1794 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1795 if (allocatableRegs_[*AS] && hasInterval(*AS))
1800 /// getRepresentativeReg - Find the largest super register of the specified
1801 /// physical register.
1802 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1803 // Find the largest super-register that is allocatable.
1804 unsigned BestReg = Reg;
1805 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1806 unsigned SuperReg = *AS;
1807 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1815 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1816 /// specified interval that conflicts with the specified physical register.
1817 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1818 unsigned PhysReg) const {
1819 unsigned NumConflicts = 0;
1820 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1821 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1822 E = mri_->reg_end(); I != E; ++I) {
1823 MachineOperand &O = I.getOperand();
1824 MachineInstr *MI = O.getParent();
1825 unsigned Index = getInstructionIndex(MI);
1826 if (pli.liveAt(Index))
1829 return NumConflicts;
1832 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1833 /// around all defs and uses of the specified interval.
1834 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1835 unsigned PhysReg, VirtRegMap &vrm) {
1836 unsigned SpillReg = getRepresentativeReg(PhysReg);
1838 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1839 // If there are registers which alias PhysReg, but which are not a
1840 // sub-register of the chosen representative super register. Assert
1841 // since we can't handle it yet.
1842 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1843 tri_->isSuperRegister(*AS, SpillReg));
1845 LiveInterval &pli = getInterval(SpillReg);
1846 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1847 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1848 E = mri_->reg_end(); I != E; ++I) {
1849 MachineOperand &O = I.getOperand();
1850 MachineInstr *MI = O.getParent();
1851 if (SeenMIs.count(MI))
1854 unsigned Index = getInstructionIndex(MI);
1855 if (pli.liveAt(Index)) {
1856 vrm.addEmergencySpill(SpillReg, MI);
1857 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1858 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1859 if (!hasInterval(*AS))
1861 LiveInterval &spli = getInterval(*AS);
1862 if (spli.liveAt(Index))
1863 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1869 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1870 MachineInstr* startInst) {
1871 LiveInterval& Interval = getOrCreateInterval(reg);
1872 VNInfo* VN = Interval.getNextValue(
1873 getInstructionIndex(startInst) + InstrSlots::DEF,
1874 startInst, getVNInfoAllocator());
1875 VN->hasPHIKill = true;
1876 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1877 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1878 getMBBEndIdx(startInst->getParent()) + 1, VN);
1879 Interval.addRange(LR);