1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
72 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73 VNInfoAllocator.Reset();
74 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
78 /// runOnMachineFunction - Register allocate the whole function
80 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
82 mri_ = &mf_->getRegInfo();
83 tm_ = &fn.getTarget();
84 tri_ = tm_->getRegisterInfo();
85 tii_ = tm_->getInstrInfo();
86 lv_ = &getAnalysis<LiveVariables>();
87 allocatableRegs_ = tri_->getAllocatableSet(fn);
89 // Number MachineInstrs and MachineBasicBlocks.
90 // Initialize MBB indexes to a sentinal.
91 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
94 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
96 unsigned StartIdx = MIIndex;
98 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
100 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
101 assert(inserted && "multiple MachineInstr -> index mappings");
102 i2miMap_.push_back(I);
103 MIIndex += InstrSlots::NUM;
106 // Set the MBB2IdxMap entry for this MBB.
107 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
108 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
109 : std::make_pair(StartIdx, MIIndex - 1);
110 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
112 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
116 numIntervals += getNumIntervals();
118 DOUT << "********** INTERVALS **********\n";
119 for (iterator I = begin(), E = end(); I != E; ++I) {
120 I->second.print(DOUT, tri_);
124 numIntervalsAfter += getNumIntervals();
129 /// print - Implement the dump method.
130 void LiveIntervals::print(std::ostream &O, const Module* ) const {
131 O << "********** INTERVALS **********\n";
132 for (const_iterator I = begin(), E = end(); I != E; ++I) {
133 I->second.print(DOUT, tri_);
137 O << "********** MACHINEINSTRS **********\n";
138 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
139 mbbi != mbbe; ++mbbi) {
140 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 O << getInstructionIndex(mii) << '\t' << *mii;
148 /// conflictsWithPhysRegDef - Returns true if the specified register
149 /// is defined during the duration of the specified interval.
150 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
151 VirtRegMap &vrm, unsigned reg) {
152 for (LiveInterval::Ranges::const_iterator
153 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
154 for (unsigned index = getBaseIndex(I->start),
155 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
156 index += InstrSlots::NUM) {
157 // skip deleted instructions
158 while (index != end && !getInstructionFromIndex(index))
159 index += InstrSlots::NUM;
160 if (index == end) break;
162 MachineInstr *MI = getInstructionFromIndex(index);
163 unsigned SrcReg, DstReg;
164 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
165 if (SrcReg == li.reg || DstReg == li.reg)
167 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
168 MachineOperand& mop = MI->getOperand(i);
169 if (!mop.isRegister())
171 unsigned PhysReg = mop.getReg();
172 if (PhysReg == 0 || PhysReg == li.reg)
174 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
175 if (!vrm.hasPhys(PhysReg))
177 PhysReg = vrm.getPhys(PhysReg);
179 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
188 void LiveIntervals::printRegName(unsigned reg) const {
189 if (TargetRegisterInfo::isPhysicalRegister(reg))
190 cerr << tri_->getName(reg);
192 cerr << "%reg" << reg;
195 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
196 MachineBasicBlock::iterator mi,
198 LiveInterval &interval) {
199 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
200 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
202 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
203 DOUT << "is a implicit_def\n";
207 // Virtual registers may be defined multiple times (due to phi
208 // elimination and 2-addr elimination). Much of what we do only has to be
209 // done once for the vreg. We use an empty interval to detect the first
210 // time we see a vreg.
211 if (interval.empty()) {
212 // Get the Idx of the defining instructions.
213 unsigned defIndex = getDefIndex(MIIdx);
215 MachineInstr *CopyMI = NULL;
216 unsigned SrcReg, DstReg;
217 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
218 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
219 tii_->isMoveInstr(*mi, SrcReg, DstReg))
221 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
223 assert(ValNo->id == 0 && "First value in interval is not 0?");
225 // Loop over all of the blocks that the vreg is defined in. There are
226 // two cases we have to handle here. The most common case is a vreg
227 // whose lifetime is contained within a basic block. In this case there
228 // will be a single kill, in MBB, which comes after the definition.
229 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
230 // FIXME: what about dead vars?
232 if (vi.Kills[0] != mi)
233 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
235 killIdx = defIndex+1;
237 // If the kill happens after the definition, we have an intra-block
239 if (killIdx > defIndex) {
240 assert(vi.AliveBlocks.none() &&
241 "Shouldn't be alive across any blocks!");
242 LiveRange LR(defIndex, killIdx, ValNo);
243 interval.addRange(LR);
244 DOUT << " +" << LR << "\n";
245 interval.addKill(ValNo, killIdx);
250 // The other case we handle is when a virtual register lives to the end
251 // of the defining block, potentially live across some blocks, then is
252 // live into some number of blocks, but gets killed. Start by adding a
253 // range that goes from this definition to the end of the defining block.
254 LiveRange NewLR(defIndex,
255 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
257 DOUT << " +" << NewLR;
258 interval.addRange(NewLR);
260 // Iterate over all of the blocks that the variable is completely
261 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
263 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
264 if (vi.AliveBlocks[i]) {
265 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
267 LiveRange LR(getMBBStartIdx(i),
268 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
270 interval.addRange(LR);
276 // Finally, this virtual register is live from the start of any killing
277 // block to the 'use' slot of the killing instruction.
278 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
279 MachineInstr *Kill = vi.Kills[i];
280 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
281 LiveRange LR(getMBBStartIdx(Kill->getParent()),
283 interval.addRange(LR);
284 interval.addKill(ValNo, killIdx);
289 // If this is the second time we see a virtual register definition, it
290 // must be due to phi elimination or two addr elimination. If this is
291 // the result of two address elimination, then the vreg is one of the
292 // def-and-use register operand.
293 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
294 // If this is a two-address definition, then we have already processed
295 // the live range. The only problem is that we didn't realize there
296 // are actually two values in the live interval. Because of this we
297 // need to take the LiveRegion that defines this register and split it
299 assert(interval.containsOneValue());
300 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
301 unsigned RedefIndex = getDefIndex(MIIdx);
303 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
304 VNInfo *OldValNo = OldLR->valno;
306 // Delete the initial value, which should be short and continuous,
307 // because the 2-addr copy must be in the same MBB as the redef.
308 interval.removeRange(DefIndex, RedefIndex);
310 // Two-address vregs should always only be redefined once. This means
311 // that at this point, there should be exactly one value number in it.
312 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
314 // The new value number (#1) is defined by the instruction we claimed
316 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
319 // Value#0 is now defined by the 2-addr instruction.
320 OldValNo->def = RedefIndex;
323 // Add the new live interval which replaces the range for the input copy.
324 LiveRange LR(DefIndex, RedefIndex, ValNo);
325 DOUT << " replace range with " << LR;
326 interval.addRange(LR);
327 interval.addKill(ValNo, RedefIndex);
329 // If this redefinition is dead, we need to add a dummy unit live
330 // range covering the def slot.
331 if (mi->registerDefIsDead(interval.reg, tri_))
332 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
335 interval.print(DOUT, tri_);
338 // Otherwise, this must be because of phi elimination. If this is the
339 // first redefinition of the vreg that we have seen, go back and change
340 // the live range in the PHI block to be a different value number.
341 if (interval.containsOneValue()) {
342 assert(vi.Kills.size() == 1 &&
343 "PHI elimination vreg should have one kill, the PHI itself!");
345 // Remove the old range that we now know has an incorrect number.
346 VNInfo *VNI = interval.getValNumInfo(0);
347 MachineInstr *Killer = vi.Kills[0];
348 unsigned Start = getMBBStartIdx(Killer->getParent());
349 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
350 DOUT << " Removing [" << Start << "," << End << "] from: ";
351 interval.print(DOUT, tri_); DOUT << "\n";
352 interval.removeRange(Start, End);
353 VNI->hasPHIKill = true;
354 DOUT << " RESULT: "; interval.print(DOUT, tri_);
356 // Replace the interval with one of a NEW value number. Note that this
357 // value number isn't actually defined by an instruction, weird huh? :)
358 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
359 DOUT << " replace range with " << LR;
360 interval.addRange(LR);
361 interval.addKill(LR.valno, End);
362 DOUT << " RESULT: "; interval.print(DOUT, tri_);
365 // In the case of PHI elimination, each variable definition is only
366 // live until the end of the block. We've already taken care of the
367 // rest of the live range.
368 unsigned defIndex = getDefIndex(MIIdx);
371 MachineInstr *CopyMI = NULL;
372 unsigned SrcReg, DstReg;
373 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
374 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
375 tii_->isMoveInstr(*mi, SrcReg, DstReg))
377 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
379 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
380 LiveRange LR(defIndex, killIndex, ValNo);
381 interval.addRange(LR);
382 interval.addKill(ValNo, killIndex);
383 ValNo->hasPHIKill = true;
391 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
392 MachineBasicBlock::iterator mi,
394 LiveInterval &interval,
395 MachineInstr *CopyMI) {
396 // A physical register cannot be live across basic block, so its
397 // lifetime must end somewhere in its defining basic block.
398 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
400 unsigned baseIndex = MIIdx;
401 unsigned start = getDefIndex(baseIndex);
402 unsigned end = start;
404 // If it is not used after definition, it is considered dead at
405 // the instruction defining it. Hence its interval is:
406 // [defSlot(def), defSlot(def)+1)
407 if (mi->registerDefIsDead(interval.reg, tri_)) {
409 end = getDefIndex(start) + 1;
413 // If it is not dead on definition, it must be killed by a
414 // subsequent instruction. Hence its interval is:
415 // [defSlot(def), useSlot(kill)+1)
416 while (++mi != MBB->end()) {
417 baseIndex += InstrSlots::NUM;
418 if (mi->killsRegister(interval.reg, tri_)) {
420 end = getUseIndex(baseIndex) + 1;
422 } else if (mi->modifiesRegister(interval.reg, tri_)) {
423 // Another instruction redefines the register before it is ever read.
424 // Then the register is essentially dead at the instruction that defines
425 // it. Hence its interval is:
426 // [defSlot(def), defSlot(def)+1)
428 end = getDefIndex(start) + 1;
433 // The only case we should have a dead physreg here without a killing or
434 // instruction where we know it's dead is if it is live-in to the function
436 assert(!CopyMI && "physreg was not killed in defining block!");
437 end = getDefIndex(start) + 1; // It's dead.
440 assert(start < end && "did not find end of interval?");
442 // Already exists? Extend old live interval.
443 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
444 VNInfo *ValNo = (OldLR != interval.end())
445 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
446 LiveRange LR(start, end, ValNo);
447 interval.addRange(LR);
448 interval.addKill(LR.valno, end);
449 DOUT << " +" << LR << '\n';
452 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
453 MachineBasicBlock::iterator MI,
456 if (TargetRegisterInfo::isVirtualRegister(reg))
457 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
458 else if (allocatableRegs_[reg]) {
459 MachineInstr *CopyMI = NULL;
460 unsigned SrcReg, DstReg;
461 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
462 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
463 tii_->isMoveInstr(*MI, SrcReg, DstReg))
465 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
466 // Def of a register also defines its sub-registers.
467 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
468 // If MI also modifies the sub-register explicitly, avoid processing it
469 // more than once. Do not pass in TRI here so it checks for exact match.
470 if (!MI->modifiesRegister(*AS))
471 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
475 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
477 LiveInterval &interval, bool isAlias) {
478 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
480 // Look for kills, if it reaches a def before it's killed, then it shouldn't
481 // be considered a livein.
482 MachineBasicBlock::iterator mi = MBB->begin();
483 unsigned baseIndex = MIIdx;
484 unsigned start = baseIndex;
485 unsigned end = start;
486 while (mi != MBB->end()) {
487 if (mi->killsRegister(interval.reg, tri_)) {
489 end = getUseIndex(baseIndex) + 1;
491 } else if (mi->modifiesRegister(interval.reg, tri_)) {
492 // Another instruction redefines the register before it is ever read.
493 // Then the register is essentially dead at the instruction that defines
494 // it. Hence its interval is:
495 // [defSlot(def), defSlot(def)+1)
497 end = getDefIndex(start) + 1;
501 baseIndex += InstrSlots::NUM;
506 // Live-in register might not be used at all.
510 end = getDefIndex(MIIdx) + 1;
512 DOUT << " live through";
517 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
518 interval.addRange(LR);
519 interval.addKill(LR.valno, end);
520 DOUT << " +" << LR << '\n';
523 /// computeIntervals - computes the live intervals for virtual
524 /// registers. for some ordering of the machine instructions [1,N] a
525 /// live interval is an interval [i, j) where 1 <= i <= j < N for
526 /// which a variable is live
527 void LiveIntervals::computeIntervals() {
528 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
529 << "********** Function: "
530 << ((Value*)mf_->getFunction())->getName() << '\n';
531 // Track the index of the current machine instr.
532 unsigned MIIndex = 0;
533 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
535 MachineBasicBlock *MBB = MBBI;
536 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
538 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
540 // Create intervals for live-ins to this BB first.
541 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
542 LE = MBB->livein_end(); LI != LE; ++LI) {
543 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
544 // Multiple live-ins can alias the same register.
545 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
546 if (!hasInterval(*AS))
547 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
551 for (; MI != miEnd; ++MI) {
552 DOUT << MIIndex << "\t" << *MI;
555 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
556 MachineOperand &MO = MI->getOperand(i);
557 // handle register defs - build intervals
558 if (MO.isRegister() && MO.getReg() && MO.isDef())
559 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
562 MIIndex += InstrSlots::NUM;
567 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
568 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
569 std::vector<IdxMBBPair>::const_iterator I =
570 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
573 while (I != Idx2MBBMap.end()) {
574 if (LR.end <= I->first)
576 MBBs.push_back(I->second);
584 LiveInterval LiveIntervals::createInterval(unsigned reg) {
585 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
587 return LiveInterval(reg, Weight);
590 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
591 /// copy field and returns the source register that defines it.
592 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
596 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
597 return VNI->copy->getOperand(1).getReg();
598 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
599 return VNI->copy->getOperand(2).getReg();
600 unsigned SrcReg, DstReg;
601 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
603 assert(0 && "Unrecognized copy instruction!");
607 //===----------------------------------------------------------------------===//
608 // Register allocator hooks.
611 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
612 /// allow one) virtual register operand, then its uses are implicitly using
613 /// the register. Returns the virtual register.
614 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
615 MachineInstr *MI) const {
617 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
618 MachineOperand &MO = MI->getOperand(i);
619 if (!MO.isRegister() || !MO.isUse())
621 unsigned Reg = MO.getReg();
622 if (Reg == 0 || Reg == li.reg)
624 // FIXME: For now, only remat MI with at most one register operand.
626 "Can't rematerialize instruction with multiple register operand!");
633 /// isValNoAvailableAt - Return true if the val# of the specified interval
634 /// which reaches the given instruction also reaches the specified use index.
635 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
636 unsigned UseIdx) const {
637 unsigned Index = getInstructionIndex(MI);
638 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
639 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
640 return UI != li.end() && UI->valno == ValNo;
643 /// isReMaterializable - Returns true if the definition MI of the specified
644 /// val# of the specified interval is re-materializable.
645 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
646 const VNInfo *ValNo, MachineInstr *MI,
652 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
656 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
657 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
658 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
659 // this but remember this is not safe to fold into a two-address
661 // This is a load from fixed stack slot. It can be rematerialized.
664 if (tii_->isTriviallyReMaterializable(MI)) {
665 const TargetInstrDesc &TID = MI->getDesc();
666 isLoad = TID.isSimpleLoad();
668 unsigned ImpUse = getReMatImplicitUse(li, MI);
670 const LiveInterval &ImpLi = getInterval(ImpUse);
671 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
672 re = mri_->use_end(); ri != re; ++ri) {
673 MachineInstr *UseMI = &*ri;
674 unsigned UseIdx = getInstructionIndex(UseMI);
675 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
677 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
687 /// isReMaterializable - Returns true if every definition of MI of every
688 /// val# of the specified interval is re-materializable.
689 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
691 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
693 const VNInfo *VNI = *i;
694 unsigned DefIdx = VNI->def;
696 continue; // Dead val#.
697 // Is the def for the val# rematerializable?
700 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
701 bool DefIsLoad = false;
703 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
710 /// FilterFoldedOps - Filter out two-address use operands. Return
711 /// true if it finds any issue with the operands that ought to prevent
713 static bool FilterFoldedOps(MachineInstr *MI,
714 SmallVector<unsigned, 2> &Ops,
716 SmallVector<unsigned, 2> &FoldOps) {
717 const TargetInstrDesc &TID = MI->getDesc();
720 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
721 unsigned OpIdx = Ops[i];
722 MachineOperand &MO = MI->getOperand(OpIdx);
723 // FIXME: fold subreg use.
727 MRInfo |= (unsigned)VirtRegMap::isMod;
729 // Filter out two-address use operand(s).
730 if (!MO.isImplicit() &&
731 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
732 MRInfo = VirtRegMap::isModRef;
735 MRInfo |= (unsigned)VirtRegMap::isRef;
737 FoldOps.push_back(OpIdx);
743 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
744 /// slot / to reg or any rematerialized load into ith operand of specified
745 /// MI. If it is successul, MI is updated with the newly created MI and
747 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
748 VirtRegMap &vrm, MachineInstr *DefMI,
750 SmallVector<unsigned, 2> &Ops,
751 bool isSS, int Slot, unsigned Reg) {
752 // If it is an implicit def instruction, just delete it.
753 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
754 RemoveMachineInstrFromMaps(MI);
755 vrm.RemoveMachineInstrFromMaps(MI);
756 MI->eraseFromParent();
761 // Filter the list of operand indexes that are to be folded. Abort if
762 // any operand will prevent folding.
764 SmallVector<unsigned, 2> FoldOps;
765 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
768 // The only time it's safe to fold into a two address instruction is when
769 // it's folding reload and spill from / into a spill stack slot.
770 if (DefMI && (MRInfo & VirtRegMap::isMod))
773 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
774 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
776 // Remember this instruction uses the spill slot.
777 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
779 // Attempt to fold the memory reference into the instruction. If
780 // we can do this, we don't need to insert spill code.
782 lv_->instructionChanged(MI, fmi);
784 fmi->copyKillDeadInfo(MI, tri_);
785 MachineBasicBlock &MBB = *MI->getParent();
786 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
787 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
788 vrm.transferSpillPts(MI, fmi);
789 vrm.transferRestorePts(MI, fmi);
790 vrm.transferEmergencySpills(MI, fmi);
792 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
793 mi2iMap_[fmi] = InstrIdx;
794 MI = MBB.insert(MBB.erase(MI), fmi);
801 /// canFoldMemoryOperand - Returns true if the specified load / store
802 /// folding is possible.
803 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
804 SmallVector<unsigned, 2> &Ops,
806 // Filter the list of operand indexes that are to be folded. Abort if
807 // any operand will prevent folding.
809 SmallVector<unsigned, 2> FoldOps;
810 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
813 // It's only legal to remat for a use, not a def.
814 if (ReMat && (MRInfo & VirtRegMap::isMod))
817 return tii_->canFoldMemoryOperand(MI, FoldOps);
820 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
821 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
822 for (LiveInterval::Ranges::const_iterator
823 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
824 std::vector<IdxMBBPair>::const_iterator II =
825 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
826 if (II == Idx2MBBMap.end())
828 if (I->end > II->first) // crossing a MBB.
830 MBBs.insert(II->second);
837 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
838 /// interval on to-be re-materialized operands of MI) with new register.
839 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
840 MachineInstr *MI, unsigned NewVReg,
842 // There is an implicit use. That means one of the other operand is
843 // being remat'ed and the remat'ed instruction has li.reg as an
844 // use operand. Make sure we rewrite that as well.
845 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
846 MachineOperand &MO = MI->getOperand(i);
847 if (!MO.isRegister())
849 unsigned Reg = MO.getReg();
850 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
852 if (!vrm.isReMaterialized(Reg))
854 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
855 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
857 UseMO->setReg(NewVReg);
861 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
862 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
864 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
865 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
866 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
867 unsigned Slot, int LdSlot,
868 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
870 const TargetRegisterClass* rc,
871 SmallVector<int, 4> &ReMatIds,
872 const MachineLoopInfo *loopInfo,
873 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
874 std::map<unsigned,unsigned> &MBBVRegsMap,
875 std::vector<LiveInterval*> &NewLIs) {
876 bool CanFold = false;
878 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
879 MachineOperand& mop = MI->getOperand(i);
880 if (!mop.isRegister())
882 unsigned Reg = mop.getReg();
884 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
889 bool TryFold = !DefIsReMat;
890 bool FoldSS = true; // Default behavior unless it's a remat.
893 // If this is the rematerializable definition MI itself and
894 // all of its uses are rematerialized, simply delete it.
895 if (MI == ReMatOrigDefMI && CanDelete) {
896 DOUT << "\t\t\t\tErasing re-materlizable def: ";
898 RemoveMachineInstrFromMaps(MI);
899 vrm.RemoveMachineInstrFromMaps(MI);
900 MI->eraseFromParent();
904 // If def for this use can't be rematerialized, then try folding.
905 // If def is rematerializable and it's a load, also try folding.
906 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
908 // Try fold loads (from stack slot, constant pool, etc.) into uses.
914 // Scan all of the operands of this instruction rewriting operands
915 // to use NewVReg instead of li.reg as appropriate. We do this for
918 // 1. If the instr reads the same spilled vreg multiple times, we
919 // want to reuse the NewVReg.
920 // 2. If the instr is a two-addr instruction, we are required to
921 // keep the src/dst regs pinned.
923 // Keep track of whether we replace a use and/or def so that we can
924 // create the spill interval with the appropriate range.
926 HasUse = mop.isUse();
927 HasDef = mop.isDef();
928 SmallVector<unsigned, 2> Ops;
930 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
931 const MachineOperand &MOj = MI->getOperand(j);
932 if (!MOj.isRegister())
934 unsigned RegJ = MOj.getReg();
935 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
939 HasUse |= MOj.isUse();
940 HasDef |= MOj.isDef();
945 // Do not fold load / store here if we are splitting. We'll find an
946 // optimal point to insert a load / store later.
948 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
949 Ops, FoldSS, FoldSlot, Reg)) {
950 // Folding the load/store can completely change the instruction in
951 // unpredictable ways, rescan it from the beginning.
957 goto RestartInstruction;
960 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
965 // Create a new virtual register for the spill interval.
966 bool CreatedNewVReg = false;
968 NewVReg = mri_->createVirtualRegister(rc);
970 CreatedNewVReg = true;
973 if (mop.isImplicit())
974 rewriteImplicitOps(li, MI, NewVReg, vrm);
976 // Reuse NewVReg for other reads.
977 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
978 MachineOperand &mopj = MI->getOperand(Ops[j]);
979 mopj.setReg(NewVReg);
980 if (mopj.isImplicit())
981 rewriteImplicitOps(li, MI, NewVReg, vrm);
984 if (CreatedNewVReg) {
986 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
987 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
988 // Each valnum may have its own remat id.
989 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
991 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
993 if (!CanDelete || (HasUse && HasDef)) {
994 // If this is a two-addr instruction then its use operands are
995 // rematerializable but its def is not. It should be assigned a
997 vrm.assignVirt2StackSlot(NewVReg, Slot);
1000 vrm.assignVirt2StackSlot(NewVReg, Slot);
1002 } else if (HasUse && HasDef &&
1003 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1004 // If this interval hasn't been assigned a stack slot (because earlier
1005 // def is a deleted remat def), do it now.
1006 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1007 vrm.assignVirt2StackSlot(NewVReg, Slot);
1010 // Re-matting an instruction with virtual register use. Add the
1011 // register as an implicit use on the use MI.
1012 if (DefIsReMat && ImpUse)
1013 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1015 // create a new register interval for this spill / remat.
1016 LiveInterval &nI = getOrCreateInterval(NewVReg);
1017 if (CreatedNewVReg) {
1018 NewLIs.push_back(&nI);
1019 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1021 vrm.setIsSplitFromReg(NewVReg, li.reg);
1025 if (CreatedNewVReg) {
1026 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1027 nI.getNextValue(~0U, 0, VNInfoAllocator));
1031 // Extend the split live interval to this def / use.
1032 unsigned End = getUseIndex(index)+1;
1033 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1034 nI.getValNumInfo(nI.getNumValNums()-1));
1040 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1041 nI.getNextValue(~0U, 0, VNInfoAllocator));
1046 DOUT << "\t\t\t\tAdded new interval: ";
1047 nI.print(DOUT, tri_);
1052 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1054 MachineBasicBlock *MBB, unsigned Idx) const {
1055 unsigned End = getMBBEndIdx(MBB);
1056 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1057 unsigned KillIdx = VNI->kills[j];
1058 if (KillIdx > Idx && KillIdx < End)
1064 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1065 const VNInfo *VNI = NULL;
1066 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1067 e = li.vni_end(); i != e; ++i)
1068 if ((*i)->def == DefIdx) {
1075 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1076 /// during spilling.
1078 struct RewriteInfo {
1083 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1084 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1087 struct RewriteInfoCompare {
1088 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1089 return LHS.Index < RHS.Index;
1094 void LiveIntervals::
1095 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1096 LiveInterval::Ranges::const_iterator &I,
1097 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1098 unsigned Slot, int LdSlot,
1099 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1101 const TargetRegisterClass* rc,
1102 SmallVector<int, 4> &ReMatIds,
1103 const MachineLoopInfo *loopInfo,
1104 BitVector &SpillMBBs,
1105 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1106 BitVector &RestoreMBBs,
1107 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1108 std::map<unsigned,unsigned> &MBBVRegsMap,
1109 std::vector<LiveInterval*> &NewLIs) {
1110 bool AllCanFold = true;
1111 unsigned NewVReg = 0;
1112 unsigned start = getBaseIndex(I->start);
1113 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1115 // First collect all the def / use in this live range that will be rewritten.
1116 // Make sure they are sorted according to instruction index.
1117 std::vector<RewriteInfo> RewriteMIs;
1118 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1119 re = mri_->reg_end(); ri != re; ) {
1120 MachineInstr *MI = &*ri;
1121 MachineOperand &O = ri.getOperand();
1123 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1124 unsigned index = getInstructionIndex(MI);
1125 if (index < start || index >= end)
1127 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1129 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1131 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1132 // Now rewrite the defs and uses.
1133 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1134 RewriteInfo &rwi = RewriteMIs[i];
1136 unsigned index = rwi.Index;
1137 bool MIHasUse = rwi.HasUse;
1138 bool MIHasDef = rwi.HasDef;
1139 MachineInstr *MI = rwi.MI;
1140 // If MI def and/or use the same register multiple times, then there
1141 // are multiple entries.
1142 unsigned NumUses = MIHasUse;
1143 while (i != e && RewriteMIs[i].MI == MI) {
1144 assert(RewriteMIs[i].Index == index);
1145 bool isUse = RewriteMIs[i].HasUse;
1146 if (isUse) ++NumUses;
1148 MIHasDef |= RewriteMIs[i].HasDef;
1151 MachineBasicBlock *MBB = MI->getParent();
1153 if (ImpUse && MI != ReMatDefMI) {
1154 // Re-matting an instruction with virtual register use. Update the
1155 // register interval's spill weight to HUGE_VALF to prevent it from
1157 LiveInterval &ImpLi = getInterval(ImpUse);
1158 ImpLi.weight = HUGE_VALF;
1161 unsigned MBBId = MBB->getNumber();
1162 unsigned ThisVReg = 0;
1164 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1165 if (NVI != MBBVRegsMap.end()) {
1166 ThisVReg = NVI->second;
1173 // It's better to start a new interval to avoid artifically
1174 // extend the new interval.
1175 if (MIHasDef && !MIHasUse) {
1176 MBBVRegsMap.erase(MBB->getNumber());
1182 bool IsNew = ThisVReg == 0;
1184 // This ends the previous live interval. If all of its def / use
1185 // can be folded, give it a low spill weight.
1186 if (NewVReg && TrySplit && AllCanFold) {
1187 LiveInterval &nI = getOrCreateInterval(NewVReg);
1194 bool HasDef = false;
1195 bool HasUse = false;
1196 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1197 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1198 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1199 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1200 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1201 if (!HasDef && !HasUse)
1204 AllCanFold &= CanFold;
1206 // Update weight of spill interval.
1207 LiveInterval &nI = getOrCreateInterval(NewVReg);
1209 // The spill weight is now infinity as it cannot be spilled again.
1210 nI.weight = HUGE_VALF;
1214 // Keep track of the last def and first use in each MBB.
1216 if (MI != ReMatOrigDefMI || !CanDelete) {
1217 bool HasKill = false;
1219 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1221 // If this is a two-address code, then this index starts a new VNInfo.
1222 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1224 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1226 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1227 SpillIdxes.find(MBBId);
1229 if (SII == SpillIdxes.end()) {
1230 std::vector<SRInfo> S;
1231 S.push_back(SRInfo(index, NewVReg, true));
1232 SpillIdxes.insert(std::make_pair(MBBId, S));
1233 } else if (SII->second.back().vreg != NewVReg) {
1234 SII->second.push_back(SRInfo(index, NewVReg, true));
1235 } else if ((int)index > SII->second.back().index) {
1236 // If there is an earlier def and this is a two-address
1237 // instruction, then it's not possible to fold the store (which
1238 // would also fold the load).
1239 SRInfo &Info = SII->second.back();
1241 Info.canFold = !HasUse;
1243 SpillMBBs.set(MBBId);
1244 } else if (SII != SpillIdxes.end() &&
1245 SII->second.back().vreg == NewVReg &&
1246 (int)index > SII->second.back().index) {
1247 // There is an earlier def that's not killed (must be two-address).
1248 // The spill is no longer needed.
1249 SII->second.pop_back();
1250 if (SII->second.empty()) {
1251 SpillIdxes.erase(MBBId);
1252 SpillMBBs.reset(MBBId);
1259 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1260 SpillIdxes.find(MBBId);
1261 if (SII != SpillIdxes.end() &&
1262 SII->second.back().vreg == NewVReg &&
1263 (int)index > SII->second.back().index)
1264 // Use(s) following the last def, it's not safe to fold the spill.
1265 SII->second.back().canFold = false;
1266 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1267 RestoreIdxes.find(MBBId);
1268 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1269 // If we are splitting live intervals, only fold if it's the first
1270 // use and there isn't another use later in the MBB.
1271 RII->second.back().canFold = false;
1273 // Only need a reload if there isn't an earlier def / use.
1274 if (RII == RestoreIdxes.end()) {
1275 std::vector<SRInfo> Infos;
1276 Infos.push_back(SRInfo(index, NewVReg, true));
1277 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1279 RII->second.push_back(SRInfo(index, NewVReg, true));
1281 RestoreMBBs.set(MBBId);
1285 // Update spill weight.
1286 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1287 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1290 if (NewVReg && TrySplit && AllCanFold) {
1291 // If all of its def / use can be folded, give it a low spill weight.
1292 LiveInterval &nI = getOrCreateInterval(NewVReg);
1297 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1298 BitVector &RestoreMBBs,
1299 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1300 if (!RestoreMBBs[Id])
1302 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1303 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1304 if (Restores[i].index == index &&
1305 Restores[i].vreg == vr &&
1306 Restores[i].canFold)
1311 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1312 BitVector &RestoreMBBs,
1313 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1314 if (!RestoreMBBs[Id])
1316 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1317 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1318 if (Restores[i].index == index && Restores[i].vreg)
1319 Restores[i].index = -1;
1322 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1323 /// spilled and create empty intervals for their uses.
1325 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1326 const TargetRegisterClass* rc,
1327 std::vector<LiveInterval*> &NewLIs) {
1328 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1329 re = mri_->reg_end(); ri != re; ) {
1330 MachineOperand &O = ri.getOperand();
1331 MachineInstr *MI = &*ri;
1334 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1335 "Register def was not rewritten?");
1336 RemoveMachineInstrFromMaps(MI);
1337 vrm.RemoveMachineInstrFromMaps(MI);
1338 MI->eraseFromParent();
1340 // This must be an use of an implicit_def so it's not part of the live
1341 // interval. Create a new empty live interval for it.
1342 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1343 unsigned NewVReg = mri_->createVirtualRegister(rc);
1345 vrm.setIsImplicitlyDefined(NewVReg);
1346 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1347 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1348 MachineOperand &MO = MI->getOperand(i);
1349 if (MO.isReg() && MO.getReg() == li.reg)
1357 std::vector<LiveInterval*> LiveIntervals::
1358 addIntervalsForSpills(const LiveInterval &li,
1359 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1360 // Since this is called after the analysis is done we don't know if
1361 // LiveVariables is available
1362 lv_ = getAnalysisToUpdate<LiveVariables>();
1364 assert(li.weight != HUGE_VALF &&
1365 "attempt to spill already spilled interval!");
1367 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1368 li.print(DOUT, tri_);
1371 // Each bit specify whether it a spill is required in the MBB.
1372 BitVector SpillMBBs(mf_->getNumBlockIDs());
1373 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1374 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1375 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1376 std::map<unsigned,unsigned> MBBVRegsMap;
1377 std::vector<LiveInterval*> NewLIs;
1378 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1380 unsigned NumValNums = li.getNumValNums();
1381 SmallVector<MachineInstr*, 4> ReMatDefs;
1382 ReMatDefs.resize(NumValNums, NULL);
1383 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1384 ReMatOrigDefs.resize(NumValNums, NULL);
1385 SmallVector<int, 4> ReMatIds;
1386 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1387 BitVector ReMatDelete(NumValNums);
1388 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1390 // Spilling a split live interval. It cannot be split any further. Also,
1391 // it's also guaranteed to be a single val# / range interval.
1392 if (vrm.getPreSplitReg(li.reg)) {
1393 vrm.setIsSplitFromReg(li.reg, 0);
1394 // Unset the split kill marker on the last use.
1395 unsigned KillIdx = vrm.getKillPoint(li.reg);
1397 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1398 assert(KillMI && "Last use disappeared?");
1399 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1400 assert(KillOp != -1 && "Last use disappeared?");
1401 KillMI->getOperand(KillOp).setIsKill(false);
1403 vrm.removeKillPoint(li.reg);
1404 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1405 Slot = vrm.getStackSlot(li.reg);
1406 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1407 MachineInstr *ReMatDefMI = DefIsReMat ?
1408 vrm.getReMaterializedMI(li.reg) : NULL;
1410 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1411 bool isLoad = isLoadSS ||
1412 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1413 bool IsFirstRange = true;
1414 for (LiveInterval::Ranges::const_iterator
1415 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1416 // If this is a split live interval with multiple ranges, it means there
1417 // are two-address instructions that re-defined the value. Only the
1418 // first def can be rematerialized!
1420 // Note ReMatOrigDefMI has already been deleted.
1421 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1422 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1423 false, vrm, rc, ReMatIds, loopInfo,
1424 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1425 MBBVRegsMap, NewLIs);
1427 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1428 Slot, 0, false, false, false,
1429 false, vrm, rc, ReMatIds, loopInfo,
1430 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1431 MBBVRegsMap, NewLIs);
1433 IsFirstRange = false;
1436 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1440 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1441 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1445 bool NeedStackSlot = false;
1446 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1448 const VNInfo *VNI = *i;
1449 unsigned VN = VNI->id;
1450 unsigned DefIdx = VNI->def;
1452 continue; // Dead val#.
1453 // Is the def for the val# rematerializable?
1454 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1455 ? 0 : getInstructionFromIndex(DefIdx);
1457 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1458 // Remember how to remat the def of this val#.
1459 ReMatOrigDefs[VN] = ReMatDefMI;
1460 // Original def may be modified so we have to make a copy here. vrm must
1462 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1464 bool CanDelete = true;
1465 if (VNI->hasPHIKill) {
1466 // A kill is a phi node, not all of its uses can be rematerialized.
1467 // It must not be deleted.
1469 // Need a stack slot if there is any live range where uses cannot be
1471 NeedStackSlot = true;
1474 ReMatDelete.set(VN);
1476 // Need a stack slot if there is any live range where uses cannot be
1478 NeedStackSlot = true;
1482 // One stack slot per live interval.
1483 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1484 Slot = vrm.assignVirt2StackSlot(li.reg);
1486 // Create new intervals and rewrite defs and uses.
1487 for (LiveInterval::Ranges::const_iterator
1488 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1489 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1490 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1491 bool DefIsReMat = ReMatDefMI != NULL;
1492 bool CanDelete = ReMatDelete[I->valno->id];
1494 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1495 bool isLoad = isLoadSS ||
1496 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1497 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1498 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1499 CanDelete, vrm, rc, ReMatIds, loopInfo,
1500 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1501 MBBVRegsMap, NewLIs);
1504 // Insert spills / restores if we are splitting.
1506 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1510 SmallPtrSet<LiveInterval*, 4> AddedKill;
1511 SmallVector<unsigned, 2> Ops;
1512 if (NeedStackSlot) {
1513 int Id = SpillMBBs.find_first();
1515 std::vector<SRInfo> &spills = SpillIdxes[Id];
1516 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1517 int index = spills[i].index;
1518 unsigned VReg = spills[i].vreg;
1519 LiveInterval &nI = getOrCreateInterval(VReg);
1520 bool isReMat = vrm.isReMaterialized(VReg);
1521 MachineInstr *MI = getInstructionFromIndex(index);
1522 bool CanFold = false;
1523 bool FoundUse = false;
1525 if (spills[i].canFold) {
1527 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1528 MachineOperand &MO = MI->getOperand(j);
1529 if (!MO.isRegister() || MO.getReg() != VReg)
1536 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1537 RestoreMBBs, RestoreIdxes))) {
1538 // MI has two-address uses of the same register. If the use
1539 // isn't the first and only use in the BB, then we can't fold
1540 // it. FIXME: Move this to rewriteInstructionsForSpills.
1547 // Fold the store into the def if possible.
1548 bool Folded = false;
1549 if (CanFold && !Ops.empty()) {
1550 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1553 // Also folded uses, do not issue a load.
1554 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1555 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1557 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1561 // Otherwise tell the spiller to issue a spill.
1563 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1564 bool isKill = LR->end == getStoreIndex(index);
1565 if (!MI->registerDefIsDead(nI.reg))
1566 // No need to spill a dead def.
1567 vrm.addSpillPoint(VReg, isKill, MI);
1569 AddedKill.insert(&nI);
1572 Id = SpillMBBs.find_next(Id);
1576 int Id = RestoreMBBs.find_first();
1578 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1579 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1580 int index = restores[i].index;
1583 unsigned VReg = restores[i].vreg;
1584 LiveInterval &nI = getOrCreateInterval(VReg);
1585 MachineInstr *MI = getInstructionFromIndex(index);
1586 bool CanFold = false;
1588 if (restores[i].canFold) {
1590 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1591 MachineOperand &MO = MI->getOperand(j);
1592 if (!MO.isRegister() || MO.getReg() != VReg)
1596 // If this restore were to be folded, it would have been folded
1605 // Fold the load into the use if possible.
1606 bool Folded = false;
1607 if (CanFold && !Ops.empty()) {
1608 if (!vrm.isReMaterialized(VReg))
1609 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1611 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1613 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1614 // If the rematerializable def is a load, also try to fold it.
1615 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1616 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1617 Ops, isLoadSS, LdSlot, VReg);
1618 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1620 // Re-matting an instruction with virtual register use. Add the
1621 // register as an implicit use on the use MI and update the register
1622 // interval's spill weight to HUGE_VALF to prevent it from being
1624 LiveInterval &ImpLi = getInterval(ImpUse);
1625 ImpLi.weight = HUGE_VALF;
1626 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1630 // If folding is not possible / failed, then tell the spiller to issue a
1631 // load / rematerialization for us.
1633 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1635 vrm.addRestorePoint(VReg, MI);
1637 Id = RestoreMBBs.find_next(Id);
1640 // Finalize intervals: add kills, finalize spill weights, and filter out
1642 std::vector<LiveInterval*> RetNewLIs;
1643 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1644 LiveInterval *LI = NewLIs[i];
1646 LI->weight /= LI->getSize();
1647 if (!AddedKill.count(LI)) {
1648 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1649 unsigned LastUseIdx = getBaseIndex(LR->end);
1650 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1651 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1652 assert(UseIdx != -1);
1653 if (LastUse->getOperand(UseIdx).isImplicit() ||
1654 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1655 LastUse->getOperand(UseIdx).setIsKill();
1656 vrm.addKillPoint(LI->reg, LastUseIdx);
1659 RetNewLIs.push_back(LI);
1663 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1667 /// hasAllocatableSuperReg - Return true if the specified physical register has
1668 /// any super register that's allocatable.
1669 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1670 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1671 if (allocatableRegs_[*AS] && hasInterval(*AS))
1676 /// getRepresentativeReg - Find the largest super register of the specified
1677 /// physical register.
1678 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1679 // Find the largest super-register that is allocatable.
1680 unsigned BestReg = Reg;
1681 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1682 unsigned SuperReg = *AS;
1683 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1691 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1692 /// specified interval that conflicts with the specified physical register.
1693 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1694 unsigned PhysReg) const {
1695 unsigned NumConflicts = 0;
1696 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1697 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1698 E = mri_->reg_end(); I != E; ++I) {
1699 MachineOperand &O = I.getOperand();
1700 MachineInstr *MI = O.getParent();
1701 unsigned Index = getInstructionIndex(MI);
1702 if (pli.liveAt(Index))
1705 return NumConflicts;
1708 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1709 /// around all defs and uses of the specified interval.
1710 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1711 unsigned PhysReg, VirtRegMap &vrm) {
1712 unsigned SpillReg = getRepresentativeReg(PhysReg);
1714 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1715 // If there are registers which alias PhysReg, but which are not a
1716 // sub-register of the chosen representative super register. Assert
1717 // since we can't handle it yet.
1718 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1719 tri_->isSuperRegister(*AS, SpillReg));
1721 LiveInterval &pli = getInterval(SpillReg);
1722 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1723 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1724 E = mri_->reg_end(); I != E; ++I) {
1725 MachineOperand &O = I.getOperand();
1726 MachineInstr *MI = O.getParent();
1727 if (SeenMIs.count(MI))
1730 unsigned Index = getInstructionIndex(MI);
1731 if (pli.liveAt(Index)) {
1732 vrm.addEmergencySpill(SpillReg, MI);
1733 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1734 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1735 if (!hasInterval(*AS))
1737 LiveInterval &spli = getInterval(*AS);
1738 if (spli.liveAt(Index))
1739 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);