1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
72 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73 VNInfoAllocator.Reset();
74 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
78 void LiveIntervals::computeNumbering() {
79 Index2MiMap OldI2MI = i2miMap_;
86 // Number MachineInstrs and MachineBasicBlocks.
87 // Initialize MBB indexes to a sentinal.
88 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
91 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
93 unsigned StartIdx = MIIndex;
95 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
97 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
98 assert(inserted && "multiple MachineInstr -> index mappings");
99 i2miMap_.push_back(I);
100 MIIndex += InstrSlots::NUM;
103 // Set the MBB2IdxMap entry for this MBB.
104 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
105 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
106 : std::make_pair(StartIdx, MIIndex - 1);
107 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
109 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
111 if (!OldI2MI.empty())
112 for (iterator I = begin(), E = end(); I != E; ++I)
113 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
116 // Remap the start index of the live range to the corresponding new
117 // number, or our best guess at what it _should_ correspond to if the
118 // original instruction has been erased. This is either the following
119 // instruction or its predecessor.
120 unsigned offset = LI->start % InstrSlots::NUM;
121 if (OldI2MI[LI->start / InstrSlots::NUM])
122 LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
125 MachineInstr* newInstr = 0;
127 newInstr = OldI2MI[LI->start / InstrSlots::NUM + i];
131 MachineInstr* preceding = i2miMap_[(mi2iMap_[newInstr] -
132 InstrSlots::NUM) / InstrSlots::NUM];
133 if (preceding->getParent() == newInstr->getParent() &&
134 preceding->modifiesRegister(I->second.reg))
135 LI->start = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
137 LI->start = mi2iMap_[newInstr];
140 // Remap the ending index in the same way that we remapped the start,
141 // except for the final step where we always map to the immediately
142 // following instruction.
143 if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
144 offset = LI->end % InstrSlots::NUM;
145 if (OldI2MI[LI->end / InstrSlots::NUM])
146 LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
149 MachineInstr* newInstr = 0;
151 newInstr = OldI2MI[LI->end / InstrSlots::NUM + i];
155 LI->end = mi2iMap_[newInstr];
158 LI->end = i2miMap_.size() * InstrSlots::NUM;
161 // Remap the VNInfo def index, which works the same as the
162 // start indices above.
163 VNInfo* vni = LI->valno;
164 offset = vni->def % InstrSlots::NUM;
165 if (OldI2MI[vni->def / InstrSlots::NUM])
166 vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
169 MachineInstr* newInstr = 0;
171 newInstr = OldI2MI[vni->def / InstrSlots::NUM + i];
175 MachineInstr* preceding = i2miMap_[(mi2iMap_[newInstr] -
176 InstrSlots::NUM) / InstrSlots::NUM];
177 if (preceding->getParent() == newInstr->getParent() &&
178 preceding->modifiesRegister(I->second.reg))
179 vni->def = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
181 vni->def = mi2iMap_[newInstr];
184 // Remap the VNInfo kill indices, which works the same as
185 // the end indices above.
186 for (size_t i = 0; i < vni->kills.size(); ++i) {
187 offset = vni->kills[i] % InstrSlots::NUM;
188 if (OldI2MI[vni->kills[i] / InstrSlots::NUM])
189 vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
193 MachineInstr* newInstr = 0;
195 newInstr = OldI2MI[vni->kills[i] / InstrSlots::NUM + e];
199 vni->kills[i] = mi2iMap_[newInstr];
205 /// runOnMachineFunction - Register allocate the whole function
207 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
209 mri_ = &mf_->getRegInfo();
210 tm_ = &fn.getTarget();
211 tri_ = tm_->getRegisterInfo();
212 tii_ = tm_->getInstrInfo();
213 lv_ = &getAnalysis<LiveVariables>();
214 allocatableRegs_ = tri_->getAllocatableSet(fn);
219 numIntervals += getNumIntervals();
221 DOUT << "********** INTERVALS **********\n";
222 for (iterator I = begin(), E = end(); I != E; ++I) {
223 I->second.print(DOUT, tri_);
227 numIntervalsAfter += getNumIntervals();
232 /// print - Implement the dump method.
233 void LiveIntervals::print(std::ostream &O, const Module* ) const {
234 O << "********** INTERVALS **********\n";
235 for (const_iterator I = begin(), E = end(); I != E; ++I) {
236 I->second.print(DOUT, tri_);
240 O << "********** MACHINEINSTRS **********\n";
241 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
242 mbbi != mbbe; ++mbbi) {
243 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
244 for (MachineBasicBlock::iterator mii = mbbi->begin(),
245 mie = mbbi->end(); mii != mie; ++mii) {
246 O << getInstructionIndex(mii) << '\t' << *mii;
251 /// conflictsWithPhysRegDef - Returns true if the specified register
252 /// is defined during the duration of the specified interval.
253 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
254 VirtRegMap &vrm, unsigned reg) {
255 for (LiveInterval::Ranges::const_iterator
256 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
257 for (unsigned index = getBaseIndex(I->start),
258 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
259 index += InstrSlots::NUM) {
260 // skip deleted instructions
261 while (index != end && !getInstructionFromIndex(index))
262 index += InstrSlots::NUM;
263 if (index == end) break;
265 MachineInstr *MI = getInstructionFromIndex(index);
266 unsigned SrcReg, DstReg;
267 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
268 if (SrcReg == li.reg || DstReg == li.reg)
270 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
271 MachineOperand& mop = MI->getOperand(i);
272 if (!mop.isRegister())
274 unsigned PhysReg = mop.getReg();
275 if (PhysReg == 0 || PhysReg == li.reg)
277 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
278 if (!vrm.hasPhys(PhysReg))
280 PhysReg = vrm.getPhys(PhysReg);
282 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
291 void LiveIntervals::printRegName(unsigned reg) const {
292 if (TargetRegisterInfo::isPhysicalRegister(reg))
293 cerr << tri_->getName(reg);
295 cerr << "%reg" << reg;
298 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
299 MachineBasicBlock::iterator mi,
301 LiveInterval &interval) {
302 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
303 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
305 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
306 DOUT << "is a implicit_def\n";
310 // Virtual registers may be defined multiple times (due to phi
311 // elimination and 2-addr elimination). Much of what we do only has to be
312 // done once for the vreg. We use an empty interval to detect the first
313 // time we see a vreg.
314 if (interval.empty()) {
315 // Get the Idx of the defining instructions.
316 unsigned defIndex = getDefIndex(MIIdx);
318 MachineInstr *CopyMI = NULL;
319 unsigned SrcReg, DstReg;
320 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
321 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
322 tii_->isMoveInstr(*mi, SrcReg, DstReg))
324 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
326 assert(ValNo->id == 0 && "First value in interval is not 0?");
328 // Loop over all of the blocks that the vreg is defined in. There are
329 // two cases we have to handle here. The most common case is a vreg
330 // whose lifetime is contained within a basic block. In this case there
331 // will be a single kill, in MBB, which comes after the definition.
332 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
333 // FIXME: what about dead vars?
335 if (vi.Kills[0] != mi)
336 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
338 killIdx = defIndex+1;
340 // If the kill happens after the definition, we have an intra-block
342 if (killIdx > defIndex) {
343 assert(vi.AliveBlocks.none() &&
344 "Shouldn't be alive across any blocks!");
345 LiveRange LR(defIndex, killIdx, ValNo);
346 interval.addRange(LR);
347 DOUT << " +" << LR << "\n";
348 interval.addKill(ValNo, killIdx);
353 // The other case we handle is when a virtual register lives to the end
354 // of the defining block, potentially live across some blocks, then is
355 // live into some number of blocks, but gets killed. Start by adding a
356 // range that goes from this definition to the end of the defining block.
357 LiveRange NewLR(defIndex,
358 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
360 DOUT << " +" << NewLR;
361 interval.addRange(NewLR);
363 // Iterate over all of the blocks that the variable is completely
364 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
366 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
367 if (vi.AliveBlocks[i]) {
368 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
370 LiveRange LR(getMBBStartIdx(i),
371 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
373 interval.addRange(LR);
379 // Finally, this virtual register is live from the start of any killing
380 // block to the 'use' slot of the killing instruction.
381 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
382 MachineInstr *Kill = vi.Kills[i];
383 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
384 LiveRange LR(getMBBStartIdx(Kill->getParent()),
386 interval.addRange(LR);
387 interval.addKill(ValNo, killIdx);
392 // If this is the second time we see a virtual register definition, it
393 // must be due to phi elimination or two addr elimination. If this is
394 // the result of two address elimination, then the vreg is one of the
395 // def-and-use register operand.
396 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
397 // If this is a two-address definition, then we have already processed
398 // the live range. The only problem is that we didn't realize there
399 // are actually two values in the live interval. Because of this we
400 // need to take the LiveRegion that defines this register and split it
402 assert(interval.containsOneValue());
403 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
404 unsigned RedefIndex = getDefIndex(MIIdx);
406 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
407 VNInfo *OldValNo = OldLR->valno;
409 // Delete the initial value, which should be short and continuous,
410 // because the 2-addr copy must be in the same MBB as the redef.
411 interval.removeRange(DefIndex, RedefIndex);
413 // Two-address vregs should always only be redefined once. This means
414 // that at this point, there should be exactly one value number in it.
415 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
417 // The new value number (#1) is defined by the instruction we claimed
419 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
422 // Value#0 is now defined by the 2-addr instruction.
423 OldValNo->def = RedefIndex;
426 // Add the new live interval which replaces the range for the input copy.
427 LiveRange LR(DefIndex, RedefIndex, ValNo);
428 DOUT << " replace range with " << LR;
429 interval.addRange(LR);
430 interval.addKill(ValNo, RedefIndex);
432 // If this redefinition is dead, we need to add a dummy unit live
433 // range covering the def slot.
434 if (mi->registerDefIsDead(interval.reg, tri_))
435 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
438 interval.print(DOUT, tri_);
441 // Otherwise, this must be because of phi elimination. If this is the
442 // first redefinition of the vreg that we have seen, go back and change
443 // the live range in the PHI block to be a different value number.
444 if (interval.containsOneValue()) {
445 assert(vi.Kills.size() == 1 &&
446 "PHI elimination vreg should have one kill, the PHI itself!");
448 // Remove the old range that we now know has an incorrect number.
449 VNInfo *VNI = interval.getValNumInfo(0);
450 MachineInstr *Killer = vi.Kills[0];
451 unsigned Start = getMBBStartIdx(Killer->getParent());
452 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
453 DOUT << " Removing [" << Start << "," << End << "] from: ";
454 interval.print(DOUT, tri_); DOUT << "\n";
455 interval.removeRange(Start, End);
456 VNI->hasPHIKill = true;
457 DOUT << " RESULT: "; interval.print(DOUT, tri_);
459 // Replace the interval with one of a NEW value number. Note that this
460 // value number isn't actually defined by an instruction, weird huh? :)
461 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
462 DOUT << " replace range with " << LR;
463 interval.addRange(LR);
464 interval.addKill(LR.valno, End);
465 DOUT << " RESULT: "; interval.print(DOUT, tri_);
468 // In the case of PHI elimination, each variable definition is only
469 // live until the end of the block. We've already taken care of the
470 // rest of the live range.
471 unsigned defIndex = getDefIndex(MIIdx);
474 MachineInstr *CopyMI = NULL;
475 unsigned SrcReg, DstReg;
476 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
477 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
478 tii_->isMoveInstr(*mi, SrcReg, DstReg))
480 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
482 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
483 LiveRange LR(defIndex, killIndex, ValNo);
484 interval.addRange(LR);
485 interval.addKill(ValNo, killIndex);
486 ValNo->hasPHIKill = true;
494 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
495 MachineBasicBlock::iterator mi,
497 LiveInterval &interval,
498 MachineInstr *CopyMI) {
499 // A physical register cannot be live across basic block, so its
500 // lifetime must end somewhere in its defining basic block.
501 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
503 unsigned baseIndex = MIIdx;
504 unsigned start = getDefIndex(baseIndex);
505 unsigned end = start;
507 // If it is not used after definition, it is considered dead at
508 // the instruction defining it. Hence its interval is:
509 // [defSlot(def), defSlot(def)+1)
510 if (mi->registerDefIsDead(interval.reg, tri_)) {
512 end = getDefIndex(start) + 1;
516 // If it is not dead on definition, it must be killed by a
517 // subsequent instruction. Hence its interval is:
518 // [defSlot(def), useSlot(kill)+1)
519 while (++mi != MBB->end()) {
520 baseIndex += InstrSlots::NUM;
521 if (mi->killsRegister(interval.reg, tri_)) {
523 end = getUseIndex(baseIndex) + 1;
525 } else if (mi->modifiesRegister(interval.reg, tri_)) {
526 // Another instruction redefines the register before it is ever read.
527 // Then the register is essentially dead at the instruction that defines
528 // it. Hence its interval is:
529 // [defSlot(def), defSlot(def)+1)
531 end = getDefIndex(start) + 1;
536 // The only case we should have a dead physreg here without a killing or
537 // instruction where we know it's dead is if it is live-in to the function
539 assert(!CopyMI && "physreg was not killed in defining block!");
540 end = getDefIndex(start) + 1; // It's dead.
543 assert(start < end && "did not find end of interval?");
545 // Already exists? Extend old live interval.
546 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
547 VNInfo *ValNo = (OldLR != interval.end())
548 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
549 LiveRange LR(start, end, ValNo);
550 interval.addRange(LR);
551 interval.addKill(LR.valno, end);
552 DOUT << " +" << LR << '\n';
555 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
556 MachineBasicBlock::iterator MI,
559 if (TargetRegisterInfo::isVirtualRegister(reg))
560 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
561 else if (allocatableRegs_[reg]) {
562 MachineInstr *CopyMI = NULL;
563 unsigned SrcReg, DstReg;
564 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
565 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
566 tii_->isMoveInstr(*MI, SrcReg, DstReg))
568 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
569 // Def of a register also defines its sub-registers.
570 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
571 // If MI also modifies the sub-register explicitly, avoid processing it
572 // more than once. Do not pass in TRI here so it checks for exact match.
573 if (!MI->modifiesRegister(*AS))
574 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
578 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
580 LiveInterval &interval, bool isAlias) {
581 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 unsigned baseIndex = MIIdx;
587 unsigned start = baseIndex;
588 unsigned end = start;
589 while (mi != MBB->end()) {
590 if (mi->killsRegister(interval.reg, tri_)) {
592 end = getUseIndex(baseIndex) + 1;
594 } else if (mi->modifiesRegister(interval.reg, tri_)) {
595 // Another instruction redefines the register before it is ever read.
596 // Then the register is essentially dead at the instruction that defines
597 // it. Hence its interval is:
598 // [defSlot(def), defSlot(def)+1)
600 end = getDefIndex(start) + 1;
604 baseIndex += InstrSlots::NUM;
609 // Live-in register might not be used at all.
613 end = getDefIndex(MIIdx) + 1;
615 DOUT << " live through";
620 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
621 interval.addRange(LR);
622 interval.addKill(LR.valno, end);
623 DOUT << " +" << LR << '\n';
626 /// computeIntervals - computes the live intervals for virtual
627 /// registers. for some ordering of the machine instructions [1,N] a
628 /// live interval is an interval [i, j) where 1 <= i <= j < N for
629 /// which a variable is live
630 void LiveIntervals::computeIntervals() {
631 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
632 << "********** Function: "
633 << ((Value*)mf_->getFunction())->getName() << '\n';
634 // Track the index of the current machine instr.
635 unsigned MIIndex = 0;
636 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
638 MachineBasicBlock *MBB = MBBI;
639 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
641 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
643 // Create intervals for live-ins to this BB first.
644 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
645 LE = MBB->livein_end(); LI != LE; ++LI) {
646 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
647 // Multiple live-ins can alias the same register.
648 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
649 if (!hasInterval(*AS))
650 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
654 for (; MI != miEnd; ++MI) {
655 DOUT << MIIndex << "\t" << *MI;
658 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
659 MachineOperand &MO = MI->getOperand(i);
660 // handle register defs - build intervals
661 if (MO.isRegister() && MO.getReg() && MO.isDef())
662 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
665 MIIndex += InstrSlots::NUM;
670 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
671 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
672 std::vector<IdxMBBPair>::const_iterator I =
673 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
676 while (I != Idx2MBBMap.end()) {
677 if (LR.end <= I->first)
679 MBBs.push_back(I->second);
687 LiveInterval LiveIntervals::createInterval(unsigned reg) {
688 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
690 return LiveInterval(reg, Weight);
693 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
694 /// copy field and returns the source register that defines it.
695 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
699 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
700 return VNI->copy->getOperand(1).getReg();
701 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
702 return VNI->copy->getOperand(2).getReg();
703 unsigned SrcReg, DstReg;
704 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
706 assert(0 && "Unrecognized copy instruction!");
710 //===----------------------------------------------------------------------===//
711 // Register allocator hooks.
714 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
715 /// allow one) virtual register operand, then its uses are implicitly using
716 /// the register. Returns the virtual register.
717 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
718 MachineInstr *MI) const {
720 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
721 MachineOperand &MO = MI->getOperand(i);
722 if (!MO.isRegister() || !MO.isUse())
724 unsigned Reg = MO.getReg();
725 if (Reg == 0 || Reg == li.reg)
727 // FIXME: For now, only remat MI with at most one register operand.
729 "Can't rematerialize instruction with multiple register operand!");
736 /// isValNoAvailableAt - Return true if the val# of the specified interval
737 /// which reaches the given instruction also reaches the specified use index.
738 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
739 unsigned UseIdx) const {
740 unsigned Index = getInstructionIndex(MI);
741 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
742 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
743 return UI != li.end() && UI->valno == ValNo;
746 /// isReMaterializable - Returns true if the definition MI of the specified
747 /// val# of the specified interval is re-materializable.
748 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
749 const VNInfo *ValNo, MachineInstr *MI,
755 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
759 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
760 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
761 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
762 // this but remember this is not safe to fold into a two-address
764 // This is a load from fixed stack slot. It can be rematerialized.
767 if (tii_->isTriviallyReMaterializable(MI)) {
768 const TargetInstrDesc &TID = MI->getDesc();
769 isLoad = TID.isSimpleLoad();
771 unsigned ImpUse = getReMatImplicitUse(li, MI);
773 const LiveInterval &ImpLi = getInterval(ImpUse);
774 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
775 re = mri_->use_end(); ri != re; ++ri) {
776 MachineInstr *UseMI = &*ri;
777 unsigned UseIdx = getInstructionIndex(UseMI);
778 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
780 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
790 /// isReMaterializable - Returns true if every definition of MI of every
791 /// val# of the specified interval is re-materializable.
792 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
794 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
796 const VNInfo *VNI = *i;
797 unsigned DefIdx = VNI->def;
799 continue; // Dead val#.
800 // Is the def for the val# rematerializable?
803 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
804 bool DefIsLoad = false;
806 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
813 /// FilterFoldedOps - Filter out two-address use operands. Return
814 /// true if it finds any issue with the operands that ought to prevent
816 static bool FilterFoldedOps(MachineInstr *MI,
817 SmallVector<unsigned, 2> &Ops,
819 SmallVector<unsigned, 2> &FoldOps) {
820 const TargetInstrDesc &TID = MI->getDesc();
823 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
824 unsigned OpIdx = Ops[i];
825 MachineOperand &MO = MI->getOperand(OpIdx);
826 // FIXME: fold subreg use.
830 MRInfo |= (unsigned)VirtRegMap::isMod;
832 // Filter out two-address use operand(s).
833 if (!MO.isImplicit() &&
834 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
835 MRInfo = VirtRegMap::isModRef;
838 MRInfo |= (unsigned)VirtRegMap::isRef;
840 FoldOps.push_back(OpIdx);
846 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
847 /// slot / to reg or any rematerialized load into ith operand of specified
848 /// MI. If it is successul, MI is updated with the newly created MI and
850 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
851 VirtRegMap &vrm, MachineInstr *DefMI,
853 SmallVector<unsigned, 2> &Ops,
854 bool isSS, int Slot, unsigned Reg) {
855 // If it is an implicit def instruction, just delete it.
856 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
857 RemoveMachineInstrFromMaps(MI);
858 vrm.RemoveMachineInstrFromMaps(MI);
859 MI->eraseFromParent();
864 // Filter the list of operand indexes that are to be folded. Abort if
865 // any operand will prevent folding.
867 SmallVector<unsigned, 2> FoldOps;
868 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
871 // The only time it's safe to fold into a two address instruction is when
872 // it's folding reload and spill from / into a spill stack slot.
873 if (DefMI && (MRInfo & VirtRegMap::isMod))
876 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
877 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
879 // Remember this instruction uses the spill slot.
880 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
882 // Attempt to fold the memory reference into the instruction. If
883 // we can do this, we don't need to insert spill code.
885 lv_->instructionChanged(MI, fmi);
887 fmi->copyKillDeadInfo(MI, tri_);
888 MachineBasicBlock &MBB = *MI->getParent();
889 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
890 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
891 vrm.transferSpillPts(MI, fmi);
892 vrm.transferRestorePts(MI, fmi);
893 vrm.transferEmergencySpills(MI, fmi);
895 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
896 mi2iMap_[fmi] = InstrIdx;
897 MI = MBB.insert(MBB.erase(MI), fmi);
904 /// canFoldMemoryOperand - Returns true if the specified load / store
905 /// folding is possible.
906 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
907 SmallVector<unsigned, 2> &Ops,
909 // Filter the list of operand indexes that are to be folded. Abort if
910 // any operand will prevent folding.
912 SmallVector<unsigned, 2> FoldOps;
913 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
916 // It's only legal to remat for a use, not a def.
917 if (ReMat && (MRInfo & VirtRegMap::isMod))
920 return tii_->canFoldMemoryOperand(MI, FoldOps);
923 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
924 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
925 for (LiveInterval::Ranges::const_iterator
926 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
927 std::vector<IdxMBBPair>::const_iterator II =
928 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
929 if (II == Idx2MBBMap.end())
931 if (I->end > II->first) // crossing a MBB.
933 MBBs.insert(II->second);
940 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
941 /// interval on to-be re-materialized operands of MI) with new register.
942 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
943 MachineInstr *MI, unsigned NewVReg,
945 // There is an implicit use. That means one of the other operand is
946 // being remat'ed and the remat'ed instruction has li.reg as an
947 // use operand. Make sure we rewrite that as well.
948 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
949 MachineOperand &MO = MI->getOperand(i);
950 if (!MO.isRegister())
952 unsigned Reg = MO.getReg();
953 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
955 if (!vrm.isReMaterialized(Reg))
957 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
958 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
960 UseMO->setReg(NewVReg);
964 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
965 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
967 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
968 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
969 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
970 unsigned Slot, int LdSlot,
971 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
973 const TargetRegisterClass* rc,
974 SmallVector<int, 4> &ReMatIds,
975 const MachineLoopInfo *loopInfo,
976 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
977 std::map<unsigned,unsigned> &MBBVRegsMap,
978 std::vector<LiveInterval*> &NewLIs) {
979 bool CanFold = false;
981 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
982 MachineOperand& mop = MI->getOperand(i);
983 if (!mop.isRegister())
985 unsigned Reg = mop.getReg();
987 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
992 bool TryFold = !DefIsReMat;
993 bool FoldSS = true; // Default behavior unless it's a remat.
996 // If this is the rematerializable definition MI itself and
997 // all of its uses are rematerialized, simply delete it.
998 if (MI == ReMatOrigDefMI && CanDelete) {
999 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1001 RemoveMachineInstrFromMaps(MI);
1002 vrm.RemoveMachineInstrFromMaps(MI);
1003 MI->eraseFromParent();
1007 // If def for this use can't be rematerialized, then try folding.
1008 // If def is rematerializable and it's a load, also try folding.
1009 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1011 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1017 // Scan all of the operands of this instruction rewriting operands
1018 // to use NewVReg instead of li.reg as appropriate. We do this for
1021 // 1. If the instr reads the same spilled vreg multiple times, we
1022 // want to reuse the NewVReg.
1023 // 2. If the instr is a two-addr instruction, we are required to
1024 // keep the src/dst regs pinned.
1026 // Keep track of whether we replace a use and/or def so that we can
1027 // create the spill interval with the appropriate range.
1029 HasUse = mop.isUse();
1030 HasDef = mop.isDef();
1031 SmallVector<unsigned, 2> Ops;
1033 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1034 const MachineOperand &MOj = MI->getOperand(j);
1035 if (!MOj.isRegister())
1037 unsigned RegJ = MOj.getReg();
1038 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1042 HasUse |= MOj.isUse();
1043 HasDef |= MOj.isDef();
1048 // Do not fold load / store here if we are splitting. We'll find an
1049 // optimal point to insert a load / store later.
1051 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1052 Ops, FoldSS, FoldSlot, Reg)) {
1053 // Folding the load/store can completely change the instruction in
1054 // unpredictable ways, rescan it from the beginning.
1060 goto RestartInstruction;
1063 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1068 // Create a new virtual register for the spill interval.
1069 bool CreatedNewVReg = false;
1071 NewVReg = mri_->createVirtualRegister(rc);
1073 CreatedNewVReg = true;
1075 mop.setReg(NewVReg);
1076 if (mop.isImplicit())
1077 rewriteImplicitOps(li, MI, NewVReg, vrm);
1079 // Reuse NewVReg for other reads.
1080 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1081 MachineOperand &mopj = MI->getOperand(Ops[j]);
1082 mopj.setReg(NewVReg);
1083 if (mopj.isImplicit())
1084 rewriteImplicitOps(li, MI, NewVReg, vrm);
1087 if (CreatedNewVReg) {
1089 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1090 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1091 // Each valnum may have its own remat id.
1092 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1094 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1096 if (!CanDelete || (HasUse && HasDef)) {
1097 // If this is a two-addr instruction then its use operands are
1098 // rematerializable but its def is not. It should be assigned a
1100 vrm.assignVirt2StackSlot(NewVReg, Slot);
1103 vrm.assignVirt2StackSlot(NewVReg, Slot);
1105 } else if (HasUse && HasDef &&
1106 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1107 // If this interval hasn't been assigned a stack slot (because earlier
1108 // def is a deleted remat def), do it now.
1109 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1110 vrm.assignVirt2StackSlot(NewVReg, Slot);
1113 // Re-matting an instruction with virtual register use. Add the
1114 // register as an implicit use on the use MI.
1115 if (DefIsReMat && ImpUse)
1116 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1118 // create a new register interval for this spill / remat.
1119 LiveInterval &nI = getOrCreateInterval(NewVReg);
1120 if (CreatedNewVReg) {
1121 NewLIs.push_back(&nI);
1122 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1124 vrm.setIsSplitFromReg(NewVReg, li.reg);
1128 if (CreatedNewVReg) {
1129 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1130 nI.getNextValue(~0U, 0, VNInfoAllocator));
1134 // Extend the split live interval to this def / use.
1135 unsigned End = getUseIndex(index)+1;
1136 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1137 nI.getValNumInfo(nI.getNumValNums()-1));
1143 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1144 nI.getNextValue(~0U, 0, VNInfoAllocator));
1149 DOUT << "\t\t\t\tAdded new interval: ";
1150 nI.print(DOUT, tri_);
1155 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1157 MachineBasicBlock *MBB, unsigned Idx) const {
1158 unsigned End = getMBBEndIdx(MBB);
1159 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1160 unsigned KillIdx = VNI->kills[j];
1161 if (KillIdx > Idx && KillIdx < End)
1167 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1168 const VNInfo *VNI = NULL;
1169 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1170 e = li.vni_end(); i != e; ++i)
1171 if ((*i)->def == DefIdx) {
1178 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1179 /// during spilling.
1181 struct RewriteInfo {
1186 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1187 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1190 struct RewriteInfoCompare {
1191 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1192 return LHS.Index < RHS.Index;
1197 void LiveIntervals::
1198 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1199 LiveInterval::Ranges::const_iterator &I,
1200 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1201 unsigned Slot, int LdSlot,
1202 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1204 const TargetRegisterClass* rc,
1205 SmallVector<int, 4> &ReMatIds,
1206 const MachineLoopInfo *loopInfo,
1207 BitVector &SpillMBBs,
1208 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1209 BitVector &RestoreMBBs,
1210 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1211 std::map<unsigned,unsigned> &MBBVRegsMap,
1212 std::vector<LiveInterval*> &NewLIs) {
1213 bool AllCanFold = true;
1214 unsigned NewVReg = 0;
1215 unsigned start = getBaseIndex(I->start);
1216 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1218 // First collect all the def / use in this live range that will be rewritten.
1219 // Make sure they are sorted according to instruction index.
1220 std::vector<RewriteInfo> RewriteMIs;
1221 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1222 re = mri_->reg_end(); ri != re; ) {
1223 MachineInstr *MI = &*ri;
1224 MachineOperand &O = ri.getOperand();
1226 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1227 unsigned index = getInstructionIndex(MI);
1228 if (index < start || index >= end)
1230 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1232 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1234 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1235 // Now rewrite the defs and uses.
1236 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1237 RewriteInfo &rwi = RewriteMIs[i];
1239 unsigned index = rwi.Index;
1240 bool MIHasUse = rwi.HasUse;
1241 bool MIHasDef = rwi.HasDef;
1242 MachineInstr *MI = rwi.MI;
1243 // If MI def and/or use the same register multiple times, then there
1244 // are multiple entries.
1245 unsigned NumUses = MIHasUse;
1246 while (i != e && RewriteMIs[i].MI == MI) {
1247 assert(RewriteMIs[i].Index == index);
1248 bool isUse = RewriteMIs[i].HasUse;
1249 if (isUse) ++NumUses;
1251 MIHasDef |= RewriteMIs[i].HasDef;
1254 MachineBasicBlock *MBB = MI->getParent();
1256 if (ImpUse && MI != ReMatDefMI) {
1257 // Re-matting an instruction with virtual register use. Update the
1258 // register interval's spill weight to HUGE_VALF to prevent it from
1260 LiveInterval &ImpLi = getInterval(ImpUse);
1261 ImpLi.weight = HUGE_VALF;
1264 unsigned MBBId = MBB->getNumber();
1265 unsigned ThisVReg = 0;
1267 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1268 if (NVI != MBBVRegsMap.end()) {
1269 ThisVReg = NVI->second;
1276 // It's better to start a new interval to avoid artifically
1277 // extend the new interval.
1278 if (MIHasDef && !MIHasUse) {
1279 MBBVRegsMap.erase(MBB->getNumber());
1285 bool IsNew = ThisVReg == 0;
1287 // This ends the previous live interval. If all of its def / use
1288 // can be folded, give it a low spill weight.
1289 if (NewVReg && TrySplit && AllCanFold) {
1290 LiveInterval &nI = getOrCreateInterval(NewVReg);
1297 bool HasDef = false;
1298 bool HasUse = false;
1299 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1300 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1301 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1302 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1303 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1304 if (!HasDef && !HasUse)
1307 AllCanFold &= CanFold;
1309 // Update weight of spill interval.
1310 LiveInterval &nI = getOrCreateInterval(NewVReg);
1312 // The spill weight is now infinity as it cannot be spilled again.
1313 nI.weight = HUGE_VALF;
1317 // Keep track of the last def and first use in each MBB.
1319 if (MI != ReMatOrigDefMI || !CanDelete) {
1320 bool HasKill = false;
1322 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1324 // If this is a two-address code, then this index starts a new VNInfo.
1325 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1327 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1329 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1330 SpillIdxes.find(MBBId);
1332 if (SII == SpillIdxes.end()) {
1333 std::vector<SRInfo> S;
1334 S.push_back(SRInfo(index, NewVReg, true));
1335 SpillIdxes.insert(std::make_pair(MBBId, S));
1336 } else if (SII->second.back().vreg != NewVReg) {
1337 SII->second.push_back(SRInfo(index, NewVReg, true));
1338 } else if ((int)index > SII->second.back().index) {
1339 // If there is an earlier def and this is a two-address
1340 // instruction, then it's not possible to fold the store (which
1341 // would also fold the load).
1342 SRInfo &Info = SII->second.back();
1344 Info.canFold = !HasUse;
1346 SpillMBBs.set(MBBId);
1347 } else if (SII != SpillIdxes.end() &&
1348 SII->second.back().vreg == NewVReg &&
1349 (int)index > SII->second.back().index) {
1350 // There is an earlier def that's not killed (must be two-address).
1351 // The spill is no longer needed.
1352 SII->second.pop_back();
1353 if (SII->second.empty()) {
1354 SpillIdxes.erase(MBBId);
1355 SpillMBBs.reset(MBBId);
1362 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1363 SpillIdxes.find(MBBId);
1364 if (SII != SpillIdxes.end() &&
1365 SII->second.back().vreg == NewVReg &&
1366 (int)index > SII->second.back().index)
1367 // Use(s) following the last def, it's not safe to fold the spill.
1368 SII->second.back().canFold = false;
1369 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1370 RestoreIdxes.find(MBBId);
1371 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1372 // If we are splitting live intervals, only fold if it's the first
1373 // use and there isn't another use later in the MBB.
1374 RII->second.back().canFold = false;
1376 // Only need a reload if there isn't an earlier def / use.
1377 if (RII == RestoreIdxes.end()) {
1378 std::vector<SRInfo> Infos;
1379 Infos.push_back(SRInfo(index, NewVReg, true));
1380 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1382 RII->second.push_back(SRInfo(index, NewVReg, true));
1384 RestoreMBBs.set(MBBId);
1388 // Update spill weight.
1389 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1390 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1393 if (NewVReg && TrySplit && AllCanFold) {
1394 // If all of its def / use can be folded, give it a low spill weight.
1395 LiveInterval &nI = getOrCreateInterval(NewVReg);
1400 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1401 BitVector &RestoreMBBs,
1402 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1403 if (!RestoreMBBs[Id])
1405 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1406 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1407 if (Restores[i].index == index &&
1408 Restores[i].vreg == vr &&
1409 Restores[i].canFold)
1414 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1415 BitVector &RestoreMBBs,
1416 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1417 if (!RestoreMBBs[Id])
1419 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1420 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1421 if (Restores[i].index == index && Restores[i].vreg)
1422 Restores[i].index = -1;
1425 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1426 /// spilled and create empty intervals for their uses.
1428 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1429 const TargetRegisterClass* rc,
1430 std::vector<LiveInterval*> &NewLIs) {
1431 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1432 re = mri_->reg_end(); ri != re; ) {
1433 MachineOperand &O = ri.getOperand();
1434 MachineInstr *MI = &*ri;
1437 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1438 "Register def was not rewritten?");
1439 RemoveMachineInstrFromMaps(MI);
1440 vrm.RemoveMachineInstrFromMaps(MI);
1441 MI->eraseFromParent();
1443 // This must be an use of an implicit_def so it's not part of the live
1444 // interval. Create a new empty live interval for it.
1445 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1446 unsigned NewVReg = mri_->createVirtualRegister(rc);
1448 vrm.setIsImplicitlyDefined(NewVReg);
1449 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1450 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1451 MachineOperand &MO = MI->getOperand(i);
1452 if (MO.isReg() && MO.getReg() == li.reg)
1460 std::vector<LiveInterval*> LiveIntervals::
1461 addIntervalsForSpills(const LiveInterval &li,
1462 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1463 // Since this is called after the analysis is done we don't know if
1464 // LiveVariables is available
1465 lv_ = getAnalysisToUpdate<LiveVariables>();
1467 assert(li.weight != HUGE_VALF &&
1468 "attempt to spill already spilled interval!");
1470 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1471 li.print(DOUT, tri_);
1474 // Each bit specify whether it a spill is required in the MBB.
1475 BitVector SpillMBBs(mf_->getNumBlockIDs());
1476 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1477 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1478 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1479 std::map<unsigned,unsigned> MBBVRegsMap;
1480 std::vector<LiveInterval*> NewLIs;
1481 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1483 unsigned NumValNums = li.getNumValNums();
1484 SmallVector<MachineInstr*, 4> ReMatDefs;
1485 ReMatDefs.resize(NumValNums, NULL);
1486 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1487 ReMatOrigDefs.resize(NumValNums, NULL);
1488 SmallVector<int, 4> ReMatIds;
1489 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1490 BitVector ReMatDelete(NumValNums);
1491 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1493 // Spilling a split live interval. It cannot be split any further. Also,
1494 // it's also guaranteed to be a single val# / range interval.
1495 if (vrm.getPreSplitReg(li.reg)) {
1496 vrm.setIsSplitFromReg(li.reg, 0);
1497 // Unset the split kill marker on the last use.
1498 unsigned KillIdx = vrm.getKillPoint(li.reg);
1500 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1501 assert(KillMI && "Last use disappeared?");
1502 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1503 assert(KillOp != -1 && "Last use disappeared?");
1504 KillMI->getOperand(KillOp).setIsKill(false);
1506 vrm.removeKillPoint(li.reg);
1507 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1508 Slot = vrm.getStackSlot(li.reg);
1509 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1510 MachineInstr *ReMatDefMI = DefIsReMat ?
1511 vrm.getReMaterializedMI(li.reg) : NULL;
1513 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1514 bool isLoad = isLoadSS ||
1515 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1516 bool IsFirstRange = true;
1517 for (LiveInterval::Ranges::const_iterator
1518 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1519 // If this is a split live interval with multiple ranges, it means there
1520 // are two-address instructions that re-defined the value. Only the
1521 // first def can be rematerialized!
1523 // Note ReMatOrigDefMI has already been deleted.
1524 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1525 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1526 false, vrm, rc, ReMatIds, loopInfo,
1527 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1528 MBBVRegsMap, NewLIs);
1530 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1531 Slot, 0, false, false, false,
1532 false, vrm, rc, ReMatIds, loopInfo,
1533 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1534 MBBVRegsMap, NewLIs);
1536 IsFirstRange = false;
1539 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1543 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1544 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1548 bool NeedStackSlot = false;
1549 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1551 const VNInfo *VNI = *i;
1552 unsigned VN = VNI->id;
1553 unsigned DefIdx = VNI->def;
1555 continue; // Dead val#.
1556 // Is the def for the val# rematerializable?
1557 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1558 ? 0 : getInstructionFromIndex(DefIdx);
1560 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1561 // Remember how to remat the def of this val#.
1562 ReMatOrigDefs[VN] = ReMatDefMI;
1563 // Original def may be modified so we have to make a copy here. vrm must
1565 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1567 bool CanDelete = true;
1568 if (VNI->hasPHIKill) {
1569 // A kill is a phi node, not all of its uses can be rematerialized.
1570 // It must not be deleted.
1572 // Need a stack slot if there is any live range where uses cannot be
1574 NeedStackSlot = true;
1577 ReMatDelete.set(VN);
1579 // Need a stack slot if there is any live range where uses cannot be
1581 NeedStackSlot = true;
1585 // One stack slot per live interval.
1586 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1587 Slot = vrm.assignVirt2StackSlot(li.reg);
1589 // Create new intervals and rewrite defs and uses.
1590 for (LiveInterval::Ranges::const_iterator
1591 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1592 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1593 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1594 bool DefIsReMat = ReMatDefMI != NULL;
1595 bool CanDelete = ReMatDelete[I->valno->id];
1597 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1598 bool isLoad = isLoadSS ||
1599 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1600 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1601 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1602 CanDelete, vrm, rc, ReMatIds, loopInfo,
1603 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1604 MBBVRegsMap, NewLIs);
1607 // Insert spills / restores if we are splitting.
1609 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1613 SmallPtrSet<LiveInterval*, 4> AddedKill;
1614 SmallVector<unsigned, 2> Ops;
1615 if (NeedStackSlot) {
1616 int Id = SpillMBBs.find_first();
1618 std::vector<SRInfo> &spills = SpillIdxes[Id];
1619 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1620 int index = spills[i].index;
1621 unsigned VReg = spills[i].vreg;
1622 LiveInterval &nI = getOrCreateInterval(VReg);
1623 bool isReMat = vrm.isReMaterialized(VReg);
1624 MachineInstr *MI = getInstructionFromIndex(index);
1625 bool CanFold = false;
1626 bool FoundUse = false;
1628 if (spills[i].canFold) {
1630 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1631 MachineOperand &MO = MI->getOperand(j);
1632 if (!MO.isRegister() || MO.getReg() != VReg)
1639 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1640 RestoreMBBs, RestoreIdxes))) {
1641 // MI has two-address uses of the same register. If the use
1642 // isn't the first and only use in the BB, then we can't fold
1643 // it. FIXME: Move this to rewriteInstructionsForSpills.
1650 // Fold the store into the def if possible.
1651 bool Folded = false;
1652 if (CanFold && !Ops.empty()) {
1653 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1656 // Also folded uses, do not issue a load.
1657 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1658 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1660 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1664 // Otherwise tell the spiller to issue a spill.
1666 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1667 bool isKill = LR->end == getStoreIndex(index);
1668 if (!MI->registerDefIsDead(nI.reg))
1669 // No need to spill a dead def.
1670 vrm.addSpillPoint(VReg, isKill, MI);
1672 AddedKill.insert(&nI);
1675 Id = SpillMBBs.find_next(Id);
1679 int Id = RestoreMBBs.find_first();
1681 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1682 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1683 int index = restores[i].index;
1686 unsigned VReg = restores[i].vreg;
1687 LiveInterval &nI = getOrCreateInterval(VReg);
1688 MachineInstr *MI = getInstructionFromIndex(index);
1689 bool CanFold = false;
1691 if (restores[i].canFold) {
1693 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1694 MachineOperand &MO = MI->getOperand(j);
1695 if (!MO.isRegister() || MO.getReg() != VReg)
1699 // If this restore were to be folded, it would have been folded
1708 // Fold the load into the use if possible.
1709 bool Folded = false;
1710 if (CanFold && !Ops.empty()) {
1711 if (!vrm.isReMaterialized(VReg))
1712 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1714 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1716 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1717 // If the rematerializable def is a load, also try to fold it.
1718 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1719 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1720 Ops, isLoadSS, LdSlot, VReg);
1721 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1723 // Re-matting an instruction with virtual register use. Add the
1724 // register as an implicit use on the use MI and update the register
1725 // interval's spill weight to HUGE_VALF to prevent it from being
1727 LiveInterval &ImpLi = getInterval(ImpUse);
1728 ImpLi.weight = HUGE_VALF;
1729 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1733 // If folding is not possible / failed, then tell the spiller to issue a
1734 // load / rematerialization for us.
1736 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1738 vrm.addRestorePoint(VReg, MI);
1740 Id = RestoreMBBs.find_next(Id);
1743 // Finalize intervals: add kills, finalize spill weights, and filter out
1745 std::vector<LiveInterval*> RetNewLIs;
1746 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1747 LiveInterval *LI = NewLIs[i];
1749 LI->weight /= LI->getSize();
1750 if (!AddedKill.count(LI)) {
1751 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1752 unsigned LastUseIdx = getBaseIndex(LR->end);
1753 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1754 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1755 assert(UseIdx != -1);
1756 if (LastUse->getOperand(UseIdx).isImplicit() ||
1757 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1758 LastUse->getOperand(UseIdx).setIsKill();
1759 vrm.addKillPoint(LI->reg, LastUseIdx);
1762 RetNewLIs.push_back(LI);
1766 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1770 /// hasAllocatableSuperReg - Return true if the specified physical register has
1771 /// any super register that's allocatable.
1772 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1773 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1774 if (allocatableRegs_[*AS] && hasInterval(*AS))
1779 /// getRepresentativeReg - Find the largest super register of the specified
1780 /// physical register.
1781 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1782 // Find the largest super-register that is allocatable.
1783 unsigned BestReg = Reg;
1784 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1785 unsigned SuperReg = *AS;
1786 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1794 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1795 /// specified interval that conflicts with the specified physical register.
1796 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1797 unsigned PhysReg) const {
1798 unsigned NumConflicts = 0;
1799 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1800 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1801 E = mri_->reg_end(); I != E; ++I) {
1802 MachineOperand &O = I.getOperand();
1803 MachineInstr *MI = O.getParent();
1804 unsigned Index = getInstructionIndex(MI);
1805 if (pli.liveAt(Index))
1808 return NumConflicts;
1811 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1812 /// around all defs and uses of the specified interval.
1813 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1814 unsigned PhysReg, VirtRegMap &vrm) {
1815 unsigned SpillReg = getRepresentativeReg(PhysReg);
1817 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1818 // If there are registers which alias PhysReg, but which are not a
1819 // sub-register of the chosen representative super register. Assert
1820 // since we can't handle it yet.
1821 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1822 tri_->isSuperRegister(*AS, SpillReg));
1824 LiveInterval &pli = getInterval(SpillReg);
1825 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1826 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1827 E = mri_->reg_end(); I != E; ++I) {
1828 MachineOperand &O = I.getOperand();
1829 MachineInstr *MI = O.getParent();
1830 if (SeenMIs.count(MI))
1833 unsigned Index = getInstructionIndex(MI);
1834 if (pli.liveAt(Index)) {
1835 vrm.addEmergencySpill(SpillReg, MI);
1836 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1837 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1838 if (!hasInterval(*AS))
1840 LiveInterval &spli = getInterval(*AS);
1841 if (spli.liveAt(Index))
1842 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);