1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
72 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73 VNInfoAllocator.Reset();
74 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
78 void LiveIntervals::computeNumbering() {
79 Index2MiMap OldI2MI = i2miMap_;
86 // Number MachineInstrs and MachineBasicBlocks.
87 // Initialize MBB indexes to a sentinal.
88 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
91 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
93 unsigned StartIdx = MIIndex;
95 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
97 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
98 assert(inserted && "multiple MachineInstr -> index mappings");
99 i2miMap_.push_back(I);
100 MIIndex += InstrSlots::NUM;
103 // Set the MBB2IdxMap entry for this MBB.
104 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
105 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
106 : std::make_pair(StartIdx, MIIndex - 1);
107 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
109 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
111 if (!OldI2MI.empty())
112 for (iterator I = begin(), E = end(); I != E; ++I)
113 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
115 unsigned offset = LI->start % InstrSlots::NUM;
116 LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
118 if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
119 // FIXME: Not correct when the instruction at LI->end has
121 offset = LI->end % InstrSlots::NUM;
122 LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
124 LI->end = i2miMap_.size() * InstrSlots::NUM;
127 VNInfo* vni = LI->valno;
128 offset = vni->def % InstrSlots::NUM;
129 vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
131 for (size_t i = 0; i < vni->kills.size(); ++i) {
132 offset = vni->kills[i] % InstrSlots::NUM;
133 vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
139 /// runOnMachineFunction - Register allocate the whole function
141 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
143 mri_ = &mf_->getRegInfo();
144 tm_ = &fn.getTarget();
145 tri_ = tm_->getRegisterInfo();
146 tii_ = tm_->getInstrInfo();
147 lv_ = &getAnalysis<LiveVariables>();
148 allocatableRegs_ = tri_->getAllocatableSet(fn);
153 numIntervals += getNumIntervals();
155 DOUT << "********** INTERVALS **********\n";
156 for (iterator I = begin(), E = end(); I != E; ++I) {
157 I->second.print(DOUT, tri_);
161 numIntervalsAfter += getNumIntervals();
166 /// print - Implement the dump method.
167 void LiveIntervals::print(std::ostream &O, const Module* ) const {
168 O << "********** INTERVALS **********\n";
169 for (const_iterator I = begin(), E = end(); I != E; ++I) {
170 I->second.print(DOUT, tri_);
174 O << "********** MACHINEINSTRS **********\n";
175 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
176 mbbi != mbbe; ++mbbi) {
177 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
178 for (MachineBasicBlock::iterator mii = mbbi->begin(),
179 mie = mbbi->end(); mii != mie; ++mii) {
180 O << getInstructionIndex(mii) << '\t' << *mii;
185 /// conflictsWithPhysRegDef - Returns true if the specified register
186 /// is defined during the duration of the specified interval.
187 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
188 VirtRegMap &vrm, unsigned reg) {
189 for (LiveInterval::Ranges::const_iterator
190 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
191 for (unsigned index = getBaseIndex(I->start),
192 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
193 index += InstrSlots::NUM) {
194 // skip deleted instructions
195 while (index != end && !getInstructionFromIndex(index))
196 index += InstrSlots::NUM;
197 if (index == end) break;
199 MachineInstr *MI = getInstructionFromIndex(index);
200 unsigned SrcReg, DstReg;
201 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
202 if (SrcReg == li.reg || DstReg == li.reg)
204 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
205 MachineOperand& mop = MI->getOperand(i);
206 if (!mop.isRegister())
208 unsigned PhysReg = mop.getReg();
209 if (PhysReg == 0 || PhysReg == li.reg)
211 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
212 if (!vrm.hasPhys(PhysReg))
214 PhysReg = vrm.getPhys(PhysReg);
216 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
225 void LiveIntervals::printRegName(unsigned reg) const {
226 if (TargetRegisterInfo::isPhysicalRegister(reg))
227 cerr << tri_->getName(reg);
229 cerr << "%reg" << reg;
232 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
233 MachineBasicBlock::iterator mi,
235 LiveInterval &interval) {
236 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
237 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
239 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
240 DOUT << "is a implicit_def\n";
244 // Virtual registers may be defined multiple times (due to phi
245 // elimination and 2-addr elimination). Much of what we do only has to be
246 // done once for the vreg. We use an empty interval to detect the first
247 // time we see a vreg.
248 if (interval.empty()) {
249 // Get the Idx of the defining instructions.
250 unsigned defIndex = getDefIndex(MIIdx);
252 MachineInstr *CopyMI = NULL;
253 unsigned SrcReg, DstReg;
254 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
255 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
256 tii_->isMoveInstr(*mi, SrcReg, DstReg))
258 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
260 assert(ValNo->id == 0 && "First value in interval is not 0?");
262 // Loop over all of the blocks that the vreg is defined in. There are
263 // two cases we have to handle here. The most common case is a vreg
264 // whose lifetime is contained within a basic block. In this case there
265 // will be a single kill, in MBB, which comes after the definition.
266 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
267 // FIXME: what about dead vars?
269 if (vi.Kills[0] != mi)
270 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
272 killIdx = defIndex+1;
274 // If the kill happens after the definition, we have an intra-block
276 if (killIdx > defIndex) {
277 assert(vi.AliveBlocks.none() &&
278 "Shouldn't be alive across any blocks!");
279 LiveRange LR(defIndex, killIdx, ValNo);
280 interval.addRange(LR);
281 DOUT << " +" << LR << "\n";
282 interval.addKill(ValNo, killIdx);
287 // The other case we handle is when a virtual register lives to the end
288 // of the defining block, potentially live across some blocks, then is
289 // live into some number of blocks, but gets killed. Start by adding a
290 // range that goes from this definition to the end of the defining block.
291 LiveRange NewLR(defIndex,
292 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
294 DOUT << " +" << NewLR;
295 interval.addRange(NewLR);
297 // Iterate over all of the blocks that the variable is completely
298 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
300 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
301 if (vi.AliveBlocks[i]) {
302 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
304 LiveRange LR(getMBBStartIdx(i),
305 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
307 interval.addRange(LR);
313 // Finally, this virtual register is live from the start of any killing
314 // block to the 'use' slot of the killing instruction.
315 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
316 MachineInstr *Kill = vi.Kills[i];
317 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
318 LiveRange LR(getMBBStartIdx(Kill->getParent()),
320 interval.addRange(LR);
321 interval.addKill(ValNo, killIdx);
326 // If this is the second time we see a virtual register definition, it
327 // must be due to phi elimination or two addr elimination. If this is
328 // the result of two address elimination, then the vreg is one of the
329 // def-and-use register operand.
330 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
331 // If this is a two-address definition, then we have already processed
332 // the live range. The only problem is that we didn't realize there
333 // are actually two values in the live interval. Because of this we
334 // need to take the LiveRegion that defines this register and split it
336 assert(interval.containsOneValue());
337 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
338 unsigned RedefIndex = getDefIndex(MIIdx);
340 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
341 VNInfo *OldValNo = OldLR->valno;
343 // Delete the initial value, which should be short and continuous,
344 // because the 2-addr copy must be in the same MBB as the redef.
345 interval.removeRange(DefIndex, RedefIndex);
347 // Two-address vregs should always only be redefined once. This means
348 // that at this point, there should be exactly one value number in it.
349 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
351 // The new value number (#1) is defined by the instruction we claimed
353 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
356 // Value#0 is now defined by the 2-addr instruction.
357 OldValNo->def = RedefIndex;
360 // Add the new live interval which replaces the range for the input copy.
361 LiveRange LR(DefIndex, RedefIndex, ValNo);
362 DOUT << " replace range with " << LR;
363 interval.addRange(LR);
364 interval.addKill(ValNo, RedefIndex);
366 // If this redefinition is dead, we need to add a dummy unit live
367 // range covering the def slot.
368 if (mi->registerDefIsDead(interval.reg, tri_))
369 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
372 interval.print(DOUT, tri_);
375 // Otherwise, this must be because of phi elimination. If this is the
376 // first redefinition of the vreg that we have seen, go back and change
377 // the live range in the PHI block to be a different value number.
378 if (interval.containsOneValue()) {
379 assert(vi.Kills.size() == 1 &&
380 "PHI elimination vreg should have one kill, the PHI itself!");
382 // Remove the old range that we now know has an incorrect number.
383 VNInfo *VNI = interval.getValNumInfo(0);
384 MachineInstr *Killer = vi.Kills[0];
385 unsigned Start = getMBBStartIdx(Killer->getParent());
386 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
387 DOUT << " Removing [" << Start << "," << End << "] from: ";
388 interval.print(DOUT, tri_); DOUT << "\n";
389 interval.removeRange(Start, End);
390 VNI->hasPHIKill = true;
391 DOUT << " RESULT: "; interval.print(DOUT, tri_);
393 // Replace the interval with one of a NEW value number. Note that this
394 // value number isn't actually defined by an instruction, weird huh? :)
395 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
396 DOUT << " replace range with " << LR;
397 interval.addRange(LR);
398 interval.addKill(LR.valno, End);
399 DOUT << " RESULT: "; interval.print(DOUT, tri_);
402 // In the case of PHI elimination, each variable definition is only
403 // live until the end of the block. We've already taken care of the
404 // rest of the live range.
405 unsigned defIndex = getDefIndex(MIIdx);
408 MachineInstr *CopyMI = NULL;
409 unsigned SrcReg, DstReg;
410 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
411 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
412 tii_->isMoveInstr(*mi, SrcReg, DstReg))
414 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
416 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
417 LiveRange LR(defIndex, killIndex, ValNo);
418 interval.addRange(LR);
419 interval.addKill(ValNo, killIndex);
420 ValNo->hasPHIKill = true;
428 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
429 MachineBasicBlock::iterator mi,
431 LiveInterval &interval,
432 MachineInstr *CopyMI) {
433 // A physical register cannot be live across basic block, so its
434 // lifetime must end somewhere in its defining basic block.
435 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
437 unsigned baseIndex = MIIdx;
438 unsigned start = getDefIndex(baseIndex);
439 unsigned end = start;
441 // If it is not used after definition, it is considered dead at
442 // the instruction defining it. Hence its interval is:
443 // [defSlot(def), defSlot(def)+1)
444 if (mi->registerDefIsDead(interval.reg, tri_)) {
446 end = getDefIndex(start) + 1;
450 // If it is not dead on definition, it must be killed by a
451 // subsequent instruction. Hence its interval is:
452 // [defSlot(def), useSlot(kill)+1)
453 while (++mi != MBB->end()) {
454 baseIndex += InstrSlots::NUM;
455 if (mi->killsRegister(interval.reg, tri_)) {
457 end = getUseIndex(baseIndex) + 1;
459 } else if (mi->modifiesRegister(interval.reg, tri_)) {
460 // Another instruction redefines the register before it is ever read.
461 // Then the register is essentially dead at the instruction that defines
462 // it. Hence its interval is:
463 // [defSlot(def), defSlot(def)+1)
465 end = getDefIndex(start) + 1;
470 // The only case we should have a dead physreg here without a killing or
471 // instruction where we know it's dead is if it is live-in to the function
473 assert(!CopyMI && "physreg was not killed in defining block!");
474 end = getDefIndex(start) + 1; // It's dead.
477 assert(start < end && "did not find end of interval?");
479 // Already exists? Extend old live interval.
480 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
481 VNInfo *ValNo = (OldLR != interval.end())
482 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
483 LiveRange LR(start, end, ValNo);
484 interval.addRange(LR);
485 interval.addKill(LR.valno, end);
486 DOUT << " +" << LR << '\n';
489 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
490 MachineBasicBlock::iterator MI,
493 if (TargetRegisterInfo::isVirtualRegister(reg))
494 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
495 else if (allocatableRegs_[reg]) {
496 MachineInstr *CopyMI = NULL;
497 unsigned SrcReg, DstReg;
498 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
499 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
500 tii_->isMoveInstr(*MI, SrcReg, DstReg))
502 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
503 // Def of a register also defines its sub-registers.
504 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
505 // If MI also modifies the sub-register explicitly, avoid processing it
506 // more than once. Do not pass in TRI here so it checks for exact match.
507 if (!MI->modifiesRegister(*AS))
508 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
512 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
514 LiveInterval &interval, bool isAlias) {
515 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
517 // Look for kills, if it reaches a def before it's killed, then it shouldn't
518 // be considered a livein.
519 MachineBasicBlock::iterator mi = MBB->begin();
520 unsigned baseIndex = MIIdx;
521 unsigned start = baseIndex;
522 unsigned end = start;
523 while (mi != MBB->end()) {
524 if (mi->killsRegister(interval.reg, tri_)) {
526 end = getUseIndex(baseIndex) + 1;
528 } else if (mi->modifiesRegister(interval.reg, tri_)) {
529 // Another instruction redefines the register before it is ever read.
530 // Then the register is essentially dead at the instruction that defines
531 // it. Hence its interval is:
532 // [defSlot(def), defSlot(def)+1)
534 end = getDefIndex(start) + 1;
538 baseIndex += InstrSlots::NUM;
543 // Live-in register might not be used at all.
547 end = getDefIndex(MIIdx) + 1;
549 DOUT << " live through";
554 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
555 interval.addRange(LR);
556 interval.addKill(LR.valno, end);
557 DOUT << " +" << LR << '\n';
560 /// computeIntervals - computes the live intervals for virtual
561 /// registers. for some ordering of the machine instructions [1,N] a
562 /// live interval is an interval [i, j) where 1 <= i <= j < N for
563 /// which a variable is live
564 void LiveIntervals::computeIntervals() {
565 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
566 << "********** Function: "
567 << ((Value*)mf_->getFunction())->getName() << '\n';
568 // Track the index of the current machine instr.
569 unsigned MIIndex = 0;
570 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
572 MachineBasicBlock *MBB = MBBI;
573 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
575 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
577 // Create intervals for live-ins to this BB first.
578 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
579 LE = MBB->livein_end(); LI != LE; ++LI) {
580 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
581 // Multiple live-ins can alias the same register.
582 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
583 if (!hasInterval(*AS))
584 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
588 for (; MI != miEnd; ++MI) {
589 DOUT << MIIndex << "\t" << *MI;
592 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
593 MachineOperand &MO = MI->getOperand(i);
594 // handle register defs - build intervals
595 if (MO.isRegister() && MO.getReg() && MO.isDef())
596 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
599 MIIndex += InstrSlots::NUM;
604 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
605 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
606 std::vector<IdxMBBPair>::const_iterator I =
607 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
610 while (I != Idx2MBBMap.end()) {
611 if (LR.end <= I->first)
613 MBBs.push_back(I->second);
621 LiveInterval LiveIntervals::createInterval(unsigned reg) {
622 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
624 return LiveInterval(reg, Weight);
627 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
628 /// copy field and returns the source register that defines it.
629 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
633 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
634 return VNI->copy->getOperand(1).getReg();
635 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
636 return VNI->copy->getOperand(2).getReg();
637 unsigned SrcReg, DstReg;
638 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
640 assert(0 && "Unrecognized copy instruction!");
644 //===----------------------------------------------------------------------===//
645 // Register allocator hooks.
648 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
649 /// allow one) virtual register operand, then its uses are implicitly using
650 /// the register. Returns the virtual register.
651 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
652 MachineInstr *MI) const {
654 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
655 MachineOperand &MO = MI->getOperand(i);
656 if (!MO.isRegister() || !MO.isUse())
658 unsigned Reg = MO.getReg();
659 if (Reg == 0 || Reg == li.reg)
661 // FIXME: For now, only remat MI with at most one register operand.
663 "Can't rematerialize instruction with multiple register operand!");
670 /// isValNoAvailableAt - Return true if the val# of the specified interval
671 /// which reaches the given instruction also reaches the specified use index.
672 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
673 unsigned UseIdx) const {
674 unsigned Index = getInstructionIndex(MI);
675 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
676 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
677 return UI != li.end() && UI->valno == ValNo;
680 /// isReMaterializable - Returns true if the definition MI of the specified
681 /// val# of the specified interval is re-materializable.
682 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
683 const VNInfo *ValNo, MachineInstr *MI,
689 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
693 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
694 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
695 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
696 // this but remember this is not safe to fold into a two-address
698 // This is a load from fixed stack slot. It can be rematerialized.
701 if (tii_->isTriviallyReMaterializable(MI)) {
702 const TargetInstrDesc &TID = MI->getDesc();
703 isLoad = TID.isSimpleLoad();
705 unsigned ImpUse = getReMatImplicitUse(li, MI);
707 const LiveInterval &ImpLi = getInterval(ImpUse);
708 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
709 re = mri_->use_end(); ri != re; ++ri) {
710 MachineInstr *UseMI = &*ri;
711 unsigned UseIdx = getInstructionIndex(UseMI);
712 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
714 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
724 /// isReMaterializable - Returns true if every definition of MI of every
725 /// val# of the specified interval is re-materializable.
726 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
728 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
730 const VNInfo *VNI = *i;
731 unsigned DefIdx = VNI->def;
733 continue; // Dead val#.
734 // Is the def for the val# rematerializable?
737 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
738 bool DefIsLoad = false;
740 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
747 /// FilterFoldedOps - Filter out two-address use operands. Return
748 /// true if it finds any issue with the operands that ought to prevent
750 static bool FilterFoldedOps(MachineInstr *MI,
751 SmallVector<unsigned, 2> &Ops,
753 SmallVector<unsigned, 2> &FoldOps) {
754 const TargetInstrDesc &TID = MI->getDesc();
757 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
758 unsigned OpIdx = Ops[i];
759 MachineOperand &MO = MI->getOperand(OpIdx);
760 // FIXME: fold subreg use.
764 MRInfo |= (unsigned)VirtRegMap::isMod;
766 // Filter out two-address use operand(s).
767 if (!MO.isImplicit() &&
768 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
769 MRInfo = VirtRegMap::isModRef;
772 MRInfo |= (unsigned)VirtRegMap::isRef;
774 FoldOps.push_back(OpIdx);
780 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
781 /// slot / to reg or any rematerialized load into ith operand of specified
782 /// MI. If it is successul, MI is updated with the newly created MI and
784 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
785 VirtRegMap &vrm, MachineInstr *DefMI,
787 SmallVector<unsigned, 2> &Ops,
788 bool isSS, int Slot, unsigned Reg) {
789 // If it is an implicit def instruction, just delete it.
790 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
791 RemoveMachineInstrFromMaps(MI);
792 vrm.RemoveMachineInstrFromMaps(MI);
793 MI->eraseFromParent();
798 // Filter the list of operand indexes that are to be folded. Abort if
799 // any operand will prevent folding.
801 SmallVector<unsigned, 2> FoldOps;
802 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
805 // The only time it's safe to fold into a two address instruction is when
806 // it's folding reload and spill from / into a spill stack slot.
807 if (DefMI && (MRInfo & VirtRegMap::isMod))
810 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
811 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
813 // Remember this instruction uses the spill slot.
814 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
816 // Attempt to fold the memory reference into the instruction. If
817 // we can do this, we don't need to insert spill code.
819 lv_->instructionChanged(MI, fmi);
821 fmi->copyKillDeadInfo(MI, tri_);
822 MachineBasicBlock &MBB = *MI->getParent();
823 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
824 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
825 vrm.transferSpillPts(MI, fmi);
826 vrm.transferRestorePts(MI, fmi);
827 vrm.transferEmergencySpills(MI, fmi);
829 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
830 mi2iMap_[fmi] = InstrIdx;
831 MI = MBB.insert(MBB.erase(MI), fmi);
838 /// canFoldMemoryOperand - Returns true if the specified load / store
839 /// folding is possible.
840 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
841 SmallVector<unsigned, 2> &Ops,
843 // Filter the list of operand indexes that are to be folded. Abort if
844 // any operand will prevent folding.
846 SmallVector<unsigned, 2> FoldOps;
847 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
850 // It's only legal to remat for a use, not a def.
851 if (ReMat && (MRInfo & VirtRegMap::isMod))
854 return tii_->canFoldMemoryOperand(MI, FoldOps);
857 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
858 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
859 for (LiveInterval::Ranges::const_iterator
860 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
861 std::vector<IdxMBBPair>::const_iterator II =
862 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
863 if (II == Idx2MBBMap.end())
865 if (I->end > II->first) // crossing a MBB.
867 MBBs.insert(II->second);
874 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
875 /// interval on to-be re-materialized operands of MI) with new register.
876 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
877 MachineInstr *MI, unsigned NewVReg,
879 // There is an implicit use. That means one of the other operand is
880 // being remat'ed and the remat'ed instruction has li.reg as an
881 // use operand. Make sure we rewrite that as well.
882 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
883 MachineOperand &MO = MI->getOperand(i);
884 if (!MO.isRegister())
886 unsigned Reg = MO.getReg();
887 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
889 if (!vrm.isReMaterialized(Reg))
891 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
892 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
894 UseMO->setReg(NewVReg);
898 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
899 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
901 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
902 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
903 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
904 unsigned Slot, int LdSlot,
905 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
907 const TargetRegisterClass* rc,
908 SmallVector<int, 4> &ReMatIds,
909 const MachineLoopInfo *loopInfo,
910 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
911 std::map<unsigned,unsigned> &MBBVRegsMap,
912 std::vector<LiveInterval*> &NewLIs) {
913 bool CanFold = false;
915 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
916 MachineOperand& mop = MI->getOperand(i);
917 if (!mop.isRegister())
919 unsigned Reg = mop.getReg();
921 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
926 bool TryFold = !DefIsReMat;
927 bool FoldSS = true; // Default behavior unless it's a remat.
930 // If this is the rematerializable definition MI itself and
931 // all of its uses are rematerialized, simply delete it.
932 if (MI == ReMatOrigDefMI && CanDelete) {
933 DOUT << "\t\t\t\tErasing re-materlizable def: ";
935 RemoveMachineInstrFromMaps(MI);
936 vrm.RemoveMachineInstrFromMaps(MI);
937 MI->eraseFromParent();
941 // If def for this use can't be rematerialized, then try folding.
942 // If def is rematerializable and it's a load, also try folding.
943 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
945 // Try fold loads (from stack slot, constant pool, etc.) into uses.
951 // Scan all of the operands of this instruction rewriting operands
952 // to use NewVReg instead of li.reg as appropriate. We do this for
955 // 1. If the instr reads the same spilled vreg multiple times, we
956 // want to reuse the NewVReg.
957 // 2. If the instr is a two-addr instruction, we are required to
958 // keep the src/dst regs pinned.
960 // Keep track of whether we replace a use and/or def so that we can
961 // create the spill interval with the appropriate range.
963 HasUse = mop.isUse();
964 HasDef = mop.isDef();
965 SmallVector<unsigned, 2> Ops;
967 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
968 const MachineOperand &MOj = MI->getOperand(j);
969 if (!MOj.isRegister())
971 unsigned RegJ = MOj.getReg();
972 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
976 HasUse |= MOj.isUse();
977 HasDef |= MOj.isDef();
982 // Do not fold load / store here if we are splitting. We'll find an
983 // optimal point to insert a load / store later.
985 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
986 Ops, FoldSS, FoldSlot, Reg)) {
987 // Folding the load/store can completely change the instruction in
988 // unpredictable ways, rescan it from the beginning.
994 goto RestartInstruction;
997 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1002 // Create a new virtual register for the spill interval.
1003 bool CreatedNewVReg = false;
1005 NewVReg = mri_->createVirtualRegister(rc);
1007 CreatedNewVReg = true;
1009 mop.setReg(NewVReg);
1010 if (mop.isImplicit())
1011 rewriteImplicitOps(li, MI, NewVReg, vrm);
1013 // Reuse NewVReg for other reads.
1014 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1015 MachineOperand &mopj = MI->getOperand(Ops[j]);
1016 mopj.setReg(NewVReg);
1017 if (mopj.isImplicit())
1018 rewriteImplicitOps(li, MI, NewVReg, vrm);
1021 if (CreatedNewVReg) {
1023 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1024 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1025 // Each valnum may have its own remat id.
1026 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1028 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1030 if (!CanDelete || (HasUse && HasDef)) {
1031 // If this is a two-addr instruction then its use operands are
1032 // rematerializable but its def is not. It should be assigned a
1034 vrm.assignVirt2StackSlot(NewVReg, Slot);
1037 vrm.assignVirt2StackSlot(NewVReg, Slot);
1039 } else if (HasUse && HasDef &&
1040 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1041 // If this interval hasn't been assigned a stack slot (because earlier
1042 // def is a deleted remat def), do it now.
1043 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1044 vrm.assignVirt2StackSlot(NewVReg, Slot);
1047 // Re-matting an instruction with virtual register use. Add the
1048 // register as an implicit use on the use MI.
1049 if (DefIsReMat && ImpUse)
1050 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1052 // create a new register interval for this spill / remat.
1053 LiveInterval &nI = getOrCreateInterval(NewVReg);
1054 if (CreatedNewVReg) {
1055 NewLIs.push_back(&nI);
1056 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1058 vrm.setIsSplitFromReg(NewVReg, li.reg);
1062 if (CreatedNewVReg) {
1063 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1064 nI.getNextValue(~0U, 0, VNInfoAllocator));
1068 // Extend the split live interval to this def / use.
1069 unsigned End = getUseIndex(index)+1;
1070 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1071 nI.getValNumInfo(nI.getNumValNums()-1));
1077 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1078 nI.getNextValue(~0U, 0, VNInfoAllocator));
1083 DOUT << "\t\t\t\tAdded new interval: ";
1084 nI.print(DOUT, tri_);
1089 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1091 MachineBasicBlock *MBB, unsigned Idx) const {
1092 unsigned End = getMBBEndIdx(MBB);
1093 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1094 unsigned KillIdx = VNI->kills[j];
1095 if (KillIdx > Idx && KillIdx < End)
1101 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1102 const VNInfo *VNI = NULL;
1103 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1104 e = li.vni_end(); i != e; ++i)
1105 if ((*i)->def == DefIdx) {
1112 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1113 /// during spilling.
1115 struct RewriteInfo {
1120 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1121 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1124 struct RewriteInfoCompare {
1125 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1126 return LHS.Index < RHS.Index;
1131 void LiveIntervals::
1132 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1133 LiveInterval::Ranges::const_iterator &I,
1134 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1135 unsigned Slot, int LdSlot,
1136 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1138 const TargetRegisterClass* rc,
1139 SmallVector<int, 4> &ReMatIds,
1140 const MachineLoopInfo *loopInfo,
1141 BitVector &SpillMBBs,
1142 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1143 BitVector &RestoreMBBs,
1144 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1145 std::map<unsigned,unsigned> &MBBVRegsMap,
1146 std::vector<LiveInterval*> &NewLIs) {
1147 bool AllCanFold = true;
1148 unsigned NewVReg = 0;
1149 unsigned start = getBaseIndex(I->start);
1150 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1152 // First collect all the def / use in this live range that will be rewritten.
1153 // Make sure they are sorted according to instruction index.
1154 std::vector<RewriteInfo> RewriteMIs;
1155 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1156 re = mri_->reg_end(); ri != re; ) {
1157 MachineInstr *MI = &*ri;
1158 MachineOperand &O = ri.getOperand();
1160 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1161 unsigned index = getInstructionIndex(MI);
1162 if (index < start || index >= end)
1164 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1166 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1168 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1169 // Now rewrite the defs and uses.
1170 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1171 RewriteInfo &rwi = RewriteMIs[i];
1173 unsigned index = rwi.Index;
1174 bool MIHasUse = rwi.HasUse;
1175 bool MIHasDef = rwi.HasDef;
1176 MachineInstr *MI = rwi.MI;
1177 // If MI def and/or use the same register multiple times, then there
1178 // are multiple entries.
1179 unsigned NumUses = MIHasUse;
1180 while (i != e && RewriteMIs[i].MI == MI) {
1181 assert(RewriteMIs[i].Index == index);
1182 bool isUse = RewriteMIs[i].HasUse;
1183 if (isUse) ++NumUses;
1185 MIHasDef |= RewriteMIs[i].HasDef;
1188 MachineBasicBlock *MBB = MI->getParent();
1190 if (ImpUse && MI != ReMatDefMI) {
1191 // Re-matting an instruction with virtual register use. Update the
1192 // register interval's spill weight to HUGE_VALF to prevent it from
1194 LiveInterval &ImpLi = getInterval(ImpUse);
1195 ImpLi.weight = HUGE_VALF;
1198 unsigned MBBId = MBB->getNumber();
1199 unsigned ThisVReg = 0;
1201 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1202 if (NVI != MBBVRegsMap.end()) {
1203 ThisVReg = NVI->second;
1210 // It's better to start a new interval to avoid artifically
1211 // extend the new interval.
1212 if (MIHasDef && !MIHasUse) {
1213 MBBVRegsMap.erase(MBB->getNumber());
1219 bool IsNew = ThisVReg == 0;
1221 // This ends the previous live interval. If all of its def / use
1222 // can be folded, give it a low spill weight.
1223 if (NewVReg && TrySplit && AllCanFold) {
1224 LiveInterval &nI = getOrCreateInterval(NewVReg);
1231 bool HasDef = false;
1232 bool HasUse = false;
1233 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1234 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1235 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1236 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1237 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1238 if (!HasDef && !HasUse)
1241 AllCanFold &= CanFold;
1243 // Update weight of spill interval.
1244 LiveInterval &nI = getOrCreateInterval(NewVReg);
1246 // The spill weight is now infinity as it cannot be spilled again.
1247 nI.weight = HUGE_VALF;
1251 // Keep track of the last def and first use in each MBB.
1253 if (MI != ReMatOrigDefMI || !CanDelete) {
1254 bool HasKill = false;
1256 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1258 // If this is a two-address code, then this index starts a new VNInfo.
1259 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1261 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1263 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1264 SpillIdxes.find(MBBId);
1266 if (SII == SpillIdxes.end()) {
1267 std::vector<SRInfo> S;
1268 S.push_back(SRInfo(index, NewVReg, true));
1269 SpillIdxes.insert(std::make_pair(MBBId, S));
1270 } else if (SII->second.back().vreg != NewVReg) {
1271 SII->second.push_back(SRInfo(index, NewVReg, true));
1272 } else if ((int)index > SII->second.back().index) {
1273 // If there is an earlier def and this is a two-address
1274 // instruction, then it's not possible to fold the store (which
1275 // would also fold the load).
1276 SRInfo &Info = SII->second.back();
1278 Info.canFold = !HasUse;
1280 SpillMBBs.set(MBBId);
1281 } else if (SII != SpillIdxes.end() &&
1282 SII->second.back().vreg == NewVReg &&
1283 (int)index > SII->second.back().index) {
1284 // There is an earlier def that's not killed (must be two-address).
1285 // The spill is no longer needed.
1286 SII->second.pop_back();
1287 if (SII->second.empty()) {
1288 SpillIdxes.erase(MBBId);
1289 SpillMBBs.reset(MBBId);
1296 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1297 SpillIdxes.find(MBBId);
1298 if (SII != SpillIdxes.end() &&
1299 SII->second.back().vreg == NewVReg &&
1300 (int)index > SII->second.back().index)
1301 // Use(s) following the last def, it's not safe to fold the spill.
1302 SII->second.back().canFold = false;
1303 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1304 RestoreIdxes.find(MBBId);
1305 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1306 // If we are splitting live intervals, only fold if it's the first
1307 // use and there isn't another use later in the MBB.
1308 RII->second.back().canFold = false;
1310 // Only need a reload if there isn't an earlier def / use.
1311 if (RII == RestoreIdxes.end()) {
1312 std::vector<SRInfo> Infos;
1313 Infos.push_back(SRInfo(index, NewVReg, true));
1314 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1316 RII->second.push_back(SRInfo(index, NewVReg, true));
1318 RestoreMBBs.set(MBBId);
1322 // Update spill weight.
1323 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1324 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1327 if (NewVReg && TrySplit && AllCanFold) {
1328 // If all of its def / use can be folded, give it a low spill weight.
1329 LiveInterval &nI = getOrCreateInterval(NewVReg);
1334 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1335 BitVector &RestoreMBBs,
1336 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1337 if (!RestoreMBBs[Id])
1339 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1340 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1341 if (Restores[i].index == index &&
1342 Restores[i].vreg == vr &&
1343 Restores[i].canFold)
1348 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1349 BitVector &RestoreMBBs,
1350 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1351 if (!RestoreMBBs[Id])
1353 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1354 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1355 if (Restores[i].index == index && Restores[i].vreg)
1356 Restores[i].index = -1;
1359 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1360 /// spilled and create empty intervals for their uses.
1362 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1363 const TargetRegisterClass* rc,
1364 std::vector<LiveInterval*> &NewLIs) {
1365 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1366 re = mri_->reg_end(); ri != re; ) {
1367 MachineOperand &O = ri.getOperand();
1368 MachineInstr *MI = &*ri;
1371 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1372 "Register def was not rewritten?");
1373 RemoveMachineInstrFromMaps(MI);
1374 vrm.RemoveMachineInstrFromMaps(MI);
1375 MI->eraseFromParent();
1377 // This must be an use of an implicit_def so it's not part of the live
1378 // interval. Create a new empty live interval for it.
1379 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1380 unsigned NewVReg = mri_->createVirtualRegister(rc);
1382 vrm.setIsImplicitlyDefined(NewVReg);
1383 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1384 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1385 MachineOperand &MO = MI->getOperand(i);
1386 if (MO.isReg() && MO.getReg() == li.reg)
1394 std::vector<LiveInterval*> LiveIntervals::
1395 addIntervalsForSpills(const LiveInterval &li,
1396 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1397 // Since this is called after the analysis is done we don't know if
1398 // LiveVariables is available
1399 lv_ = getAnalysisToUpdate<LiveVariables>();
1401 assert(li.weight != HUGE_VALF &&
1402 "attempt to spill already spilled interval!");
1404 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1405 li.print(DOUT, tri_);
1408 // Each bit specify whether it a spill is required in the MBB.
1409 BitVector SpillMBBs(mf_->getNumBlockIDs());
1410 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1411 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1412 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1413 std::map<unsigned,unsigned> MBBVRegsMap;
1414 std::vector<LiveInterval*> NewLIs;
1415 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1417 unsigned NumValNums = li.getNumValNums();
1418 SmallVector<MachineInstr*, 4> ReMatDefs;
1419 ReMatDefs.resize(NumValNums, NULL);
1420 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1421 ReMatOrigDefs.resize(NumValNums, NULL);
1422 SmallVector<int, 4> ReMatIds;
1423 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1424 BitVector ReMatDelete(NumValNums);
1425 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1427 // Spilling a split live interval. It cannot be split any further. Also,
1428 // it's also guaranteed to be a single val# / range interval.
1429 if (vrm.getPreSplitReg(li.reg)) {
1430 vrm.setIsSplitFromReg(li.reg, 0);
1431 // Unset the split kill marker on the last use.
1432 unsigned KillIdx = vrm.getKillPoint(li.reg);
1434 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1435 assert(KillMI && "Last use disappeared?");
1436 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1437 assert(KillOp != -1 && "Last use disappeared?");
1438 KillMI->getOperand(KillOp).setIsKill(false);
1440 vrm.removeKillPoint(li.reg);
1441 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1442 Slot = vrm.getStackSlot(li.reg);
1443 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1444 MachineInstr *ReMatDefMI = DefIsReMat ?
1445 vrm.getReMaterializedMI(li.reg) : NULL;
1447 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1448 bool isLoad = isLoadSS ||
1449 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1450 bool IsFirstRange = true;
1451 for (LiveInterval::Ranges::const_iterator
1452 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1453 // If this is a split live interval with multiple ranges, it means there
1454 // are two-address instructions that re-defined the value. Only the
1455 // first def can be rematerialized!
1457 // Note ReMatOrigDefMI has already been deleted.
1458 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1459 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1460 false, vrm, rc, ReMatIds, loopInfo,
1461 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1462 MBBVRegsMap, NewLIs);
1464 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1465 Slot, 0, false, false, false,
1466 false, vrm, rc, ReMatIds, loopInfo,
1467 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1468 MBBVRegsMap, NewLIs);
1470 IsFirstRange = false;
1473 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1477 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1478 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1482 bool NeedStackSlot = false;
1483 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1485 const VNInfo *VNI = *i;
1486 unsigned VN = VNI->id;
1487 unsigned DefIdx = VNI->def;
1489 continue; // Dead val#.
1490 // Is the def for the val# rematerializable?
1491 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1492 ? 0 : getInstructionFromIndex(DefIdx);
1494 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1495 // Remember how to remat the def of this val#.
1496 ReMatOrigDefs[VN] = ReMatDefMI;
1497 // Original def may be modified so we have to make a copy here. vrm must
1499 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1501 bool CanDelete = true;
1502 if (VNI->hasPHIKill) {
1503 // A kill is a phi node, not all of its uses can be rematerialized.
1504 // It must not be deleted.
1506 // Need a stack slot if there is any live range where uses cannot be
1508 NeedStackSlot = true;
1511 ReMatDelete.set(VN);
1513 // Need a stack slot if there is any live range where uses cannot be
1515 NeedStackSlot = true;
1519 // One stack slot per live interval.
1520 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1521 Slot = vrm.assignVirt2StackSlot(li.reg);
1523 // Create new intervals and rewrite defs and uses.
1524 for (LiveInterval::Ranges::const_iterator
1525 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1526 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1527 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1528 bool DefIsReMat = ReMatDefMI != NULL;
1529 bool CanDelete = ReMatDelete[I->valno->id];
1531 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1532 bool isLoad = isLoadSS ||
1533 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1534 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1535 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1536 CanDelete, vrm, rc, ReMatIds, loopInfo,
1537 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1538 MBBVRegsMap, NewLIs);
1541 // Insert spills / restores if we are splitting.
1543 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1547 SmallPtrSet<LiveInterval*, 4> AddedKill;
1548 SmallVector<unsigned, 2> Ops;
1549 if (NeedStackSlot) {
1550 int Id = SpillMBBs.find_first();
1552 std::vector<SRInfo> &spills = SpillIdxes[Id];
1553 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1554 int index = spills[i].index;
1555 unsigned VReg = spills[i].vreg;
1556 LiveInterval &nI = getOrCreateInterval(VReg);
1557 bool isReMat = vrm.isReMaterialized(VReg);
1558 MachineInstr *MI = getInstructionFromIndex(index);
1559 bool CanFold = false;
1560 bool FoundUse = false;
1562 if (spills[i].canFold) {
1564 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1565 MachineOperand &MO = MI->getOperand(j);
1566 if (!MO.isRegister() || MO.getReg() != VReg)
1573 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1574 RestoreMBBs, RestoreIdxes))) {
1575 // MI has two-address uses of the same register. If the use
1576 // isn't the first and only use in the BB, then we can't fold
1577 // it. FIXME: Move this to rewriteInstructionsForSpills.
1584 // Fold the store into the def if possible.
1585 bool Folded = false;
1586 if (CanFold && !Ops.empty()) {
1587 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1590 // Also folded uses, do not issue a load.
1591 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1592 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1594 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1598 // Otherwise tell the spiller to issue a spill.
1600 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1601 bool isKill = LR->end == getStoreIndex(index);
1602 if (!MI->registerDefIsDead(nI.reg))
1603 // No need to spill a dead def.
1604 vrm.addSpillPoint(VReg, isKill, MI);
1606 AddedKill.insert(&nI);
1609 Id = SpillMBBs.find_next(Id);
1613 int Id = RestoreMBBs.find_first();
1615 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1616 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1617 int index = restores[i].index;
1620 unsigned VReg = restores[i].vreg;
1621 LiveInterval &nI = getOrCreateInterval(VReg);
1622 MachineInstr *MI = getInstructionFromIndex(index);
1623 bool CanFold = false;
1625 if (restores[i].canFold) {
1627 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1628 MachineOperand &MO = MI->getOperand(j);
1629 if (!MO.isRegister() || MO.getReg() != VReg)
1633 // If this restore were to be folded, it would have been folded
1642 // Fold the load into the use if possible.
1643 bool Folded = false;
1644 if (CanFold && !Ops.empty()) {
1645 if (!vrm.isReMaterialized(VReg))
1646 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1648 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1650 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1651 // If the rematerializable def is a load, also try to fold it.
1652 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1653 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1654 Ops, isLoadSS, LdSlot, VReg);
1655 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1657 // Re-matting an instruction with virtual register use. Add the
1658 // register as an implicit use on the use MI and update the register
1659 // interval's spill weight to HUGE_VALF to prevent it from being
1661 LiveInterval &ImpLi = getInterval(ImpUse);
1662 ImpLi.weight = HUGE_VALF;
1663 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1667 // If folding is not possible / failed, then tell the spiller to issue a
1668 // load / rematerialization for us.
1670 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1672 vrm.addRestorePoint(VReg, MI);
1674 Id = RestoreMBBs.find_next(Id);
1677 // Finalize intervals: add kills, finalize spill weights, and filter out
1679 std::vector<LiveInterval*> RetNewLIs;
1680 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1681 LiveInterval *LI = NewLIs[i];
1683 LI->weight /= LI->getSize();
1684 if (!AddedKill.count(LI)) {
1685 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1686 unsigned LastUseIdx = getBaseIndex(LR->end);
1687 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1688 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1689 assert(UseIdx != -1);
1690 if (LastUse->getOperand(UseIdx).isImplicit() ||
1691 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1692 LastUse->getOperand(UseIdx).setIsKill();
1693 vrm.addKillPoint(LI->reg, LastUseIdx);
1696 RetNewLIs.push_back(LI);
1700 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1704 /// hasAllocatableSuperReg - Return true if the specified physical register has
1705 /// any super register that's allocatable.
1706 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1707 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1708 if (allocatableRegs_[*AS] && hasInterval(*AS))
1713 /// getRepresentativeReg - Find the largest super register of the specified
1714 /// physical register.
1715 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1716 // Find the largest super-register that is allocatable.
1717 unsigned BestReg = Reg;
1718 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1719 unsigned SuperReg = *AS;
1720 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1728 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1729 /// specified interval that conflicts with the specified physical register.
1730 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1731 unsigned PhysReg) const {
1732 unsigned NumConflicts = 0;
1733 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1734 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1735 E = mri_->reg_end(); I != E; ++I) {
1736 MachineOperand &O = I.getOperand();
1737 MachineInstr *MI = O.getParent();
1738 unsigned Index = getInstructionIndex(MI);
1739 if (pli.liveAt(Index))
1742 return NumConflicts;
1745 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1746 /// around all defs and uses of the specified interval.
1747 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1748 unsigned PhysReg, VirtRegMap &vrm) {
1749 unsigned SpillReg = getRepresentativeReg(PhysReg);
1751 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1752 // If there are registers which alias PhysReg, but which are not a
1753 // sub-register of the chosen representative super register. Assert
1754 // since we can't handle it yet.
1755 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1756 tri_->isSuperRegister(*AS, SpillReg));
1758 LiveInterval &pli = getInterval(SpillReg);
1759 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1760 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1761 E = mri_->reg_end(); I != E; ++I) {
1762 MachineOperand &O = I.getOperand();
1763 MachineInstr *MI = O.getParent();
1764 if (SeenMIs.count(MI))
1767 unsigned Index = getInstructionIndex(MI);
1768 if (pli.liveAt(Index)) {
1769 vrm.addEmergencySpill(SpillReg, MI);
1770 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1771 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1772 if (!hasInterval(*AS))
1774 LiveInterval &spli = getInterval(*AS);
1775 if (spli.liveAt(Index))
1776 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);